diff --git a/awx/lib/site-packages/redis/CHANGES b/awx/lib/site-packages/redis/CHANGES deleted file mode 100644 index c455f05a94..0000000000 --- a/awx/lib/site-packages/redis/CHANGES +++ /dev/null @@ -1,426 +0,0 @@ -* 2.10.3 - * Fixed a bug with the bytearray support introduced in 2.10.2. Thanks - Josh Owen. -* 2.10.2 - * Added support for Hiredis's new bytearray support. Thanks - https://github.com/tzickel - * POSSIBLE BACKWARDS INCOMPATBLE CHANGE: Fixed a possible race condition - when multiple threads share the same Lock instance with a timeout. Lock - tokens are now stored in thread local storage by default. If you have - code that acquires a lock in one thread and passes that lock instance to - another thread to release it, you need to disable thread local storage. - Refer to the doc strings on the Lock class about the thread_local - argument information. - * Fixed a regression in from_url where "charset" and "errors" weren't - valid options. "encoding" and "encoding_errors" are still accepted - and preferred. - * The "charset" and "errors" options have been deprecated. Passing - either to StrictRedis.__init__ or from_url will still work but will - also emit a DeprecationWarning. Instead use the "encoding" and - "encoding_errors" options. - * Fixed a compatability bug with Python 3 when the server closes a - connection. - * Added BITPOS command. Thanks https://github.com/jettify. - * Fixed a bug when attempting to send large values to Redis in a Pipeline. -* 2.10.1 - * Fixed a bug where Sentinel connections to a server that's no longer a - master and receives a READONLY error will disconnect and reconnect to - the master. -* 2.10.0 - * Discontinuted support for Python 2.5. Upgrade. You'll be happier. - * The HiRedis parser will now properly raise ConnectionErrors. - * Completely refactored PubSub support. Fixes all known PubSub bugs and - adds a bunch of new features. Docs can be found in the README under the - new "Publish / Subscribe" section. - * Added the new HyperLogLog commanads (PFADD, PFCOUNT, PFMERGE). Thanks - Pepijn de Vos and Vincent Ohprecio. - * Updated TTL and PTTL commands with Redis 2.8+ semantics. Thanks Markus - Kaiserswerth. - * *SCAN commands now return a long (int on Python3) cursor value rather - than the string representation. This might be slightly backwards - incompatible in code using *SCAN commands loops such as - "while cursor != '0':". - * Added extra *SCAN commands that return iterators instead of the normal - [cursor, data] type. Use scan_iter, hscan_iter, sscan_iter, and - zscan_iter for iterators. Thanks Mathieu Longtin. - * Added support for SLOWLOG commands. Thanks Rick van Hattem. - * Added lexicographical commands ZRANGEBYLEX, ZREMRANGEBYLEX, and ZLEXCOUNT - for sorted sets. - * Connection objects now support an optional argument, socket_read_size, - indicating how much data to read during each socket.recv() call. After - benchmarking, increased the default size to 64k, which dramatically - improves performance when fetching large values, such as many results - in a pipeline or a large (>1MB) string value. - * Improved the pack_command and send_packed_command functions to increase - performance when sending large (>1MB) values. - * Sentinel Connections to master servers now detect when a READONLY error - is encountered and disconnect themselves and all other active connections - to the same master so that the new master can be discovered. - * Fixed Sentinel state parsing on Python 3. - * Added support for SENTINEL MONITOR, SENTINEL REMOVE, and SENTINEL SET - commands. Thanks Greg Murphy. - * INFO ouput that doesn't follow the "key:value" format will now be - appended to a key named "__raw__" in the INFO dictionary. Thanks Pedro - Larroy. - * The "vagrant" directory contains a complete vagrant environment for - redis-py developers. The environment runs a Redis master, a Redis slave, - and 3 Sentinels. Future iterations of the test sutie will incorporate - more integration style tests, ensuring things like failover happen - correctly. - * It's now possible to create connection pool instances from a URL. - StrictRedis.from_url() now uses this feature to create a connection pool - instance and use that when creating a new client instance. Thanks - https://github.com/chillipino - * When creating client instances or connection pool instances from an URL, - it's now possible to pass additional options to the connection pool with - querystring arguments. - * Fixed a bug where some encodings (like utf-16) were unusable on Python 3 - as command names and literals would get encoded. - * Added an SSLConnection class that allows for secure connections through - stunnel or other means. Construct and SSL connection with the sll=True - option on client classes, using the rediss:// scheme from an URL, or - by passing the SSLConnection class to a connection pool's - connection_class argument. Thanks https://github.com/oranagra. - * Added a socket_connect_timeout option to control how long to wait while - establishing a TCP connection before timing out. This lets the client - fail fast when attempting to connect to a downed server while keeping - a more lenient timeout for all other socket operations. - * Added TCP Keep-alive support by passing use the socket_keepalive=True - option. Finer grain control can be achieved using the - socket_keepalive_options option which expects a dictionary with any of - the keys (socket.TCP_KEEPIDLE, socket.TCP_KEEPCNT, socket.TCP_KEEPINTVL) - and integers for values. Thanks Yossi Gottlieb. - * Added a `retry_on_timeout` option that controls how socket.timeout errors - are handled. By default it is set to False and will cause the client to - raise a TimeoutError anytime a socket.timeout is encountered. If - `retry_on_timeout` is set to True, the client will retry a command that - timed out once like other `socket.error`s. - * Completely refactored the Lock system. There is now a LuaLock class - that's used when the Redis server is capable of running Lua scripts along - with a fallback class for Redis servers < 2.6. The new locks fix several - subtle race consider that the old lock could face. In additional, a - new method, "extend" is available on lock instances that all a lock - owner to extend the amount of time they have the lock for. Thanks to - Eli Finkelshteyn and https://github.com/chillipino for contributions. -* 2.9.1 - * IPv6 support. Thanks https://github.com/amashinchi -* 2.9.0 - * Performance improvement for packing commands when using the PythonParser. - Thanks Guillaume Viot. - * Executing an empty pipeline transaction no longer sends MULTI/EXEC to - the server. Thanks EliFinkelshteyn. - * Errors when authenticating (incorrect password) and selecting a database - now close the socket. - * Full Sentinel support thanks to Vitja Makarov. Thanks! - * Better repr support for client and connection pool instances. Thanks - Mark Roberts. - * Error messages that the server sends to the client are now included - in the client error message. Thanks Sangjin Lim. - * Added the SCAN, SSCAN, HSCAN, and ZSCAN commands. Thanks Jingchao Hu. - * ResponseErrors generated by pipeline execution provide addition context - including the position of the command in the pipeline and the actual - command text generated the error. - * ConnectionPools now play nicer in threaded environments that fork. Thanks - Christian Joergensen. -* 2.8.0 - * redis-py should play better with gevent when a gevent Timeout is raised. - Thanks leifkb. - * Added SENTINEL command. Thanks Anna Janackova. - * Fixed a bug where pipelines could potentially corrupt a connection - if the MULTI command generated a ResponseError. Thanks EliFinkelshteyn - for the report. - * Connections now call socket.shutdown() prior to socket.close() to - ensure communication ends immediately per the note at - http://docs.python.org/2/library/socket.html#socket.socket.close - Thanks to David Martin for pointing this out. - * Lock checks are now based on floats rather than ints. Thanks - Vitja Makarov. -* 2.7.6 - * Added CONFIG RESETSTAT command. Thanks Yossi Gottlieb. - * Fixed a bug introduced in 2.7.3 that caused issues with script objects - and pipelines. Thanks Carpentier Pierre-Francois. - * Converted redis-py's test suite to use the awesome py.test library. - * Fixed a bug introduced in 2.7.5 that prevented a ConnectionError from - being raised when the Redis server is LOADING data. - * Added a BusyLoadingError exception that's raised when the Redis server - is starting up and not accepting commands yet. BusyLoadingError - subclasses ConnectionError, which this state previously returned. - Thanks Yossi Gottlieb. -* 2.7.5 - * DEL, HDEL and ZREM commands now return the numbers of keys deleted - instead of just True/False. - * from_url now supports URIs with a port number. Thanks Aaron Westendorf. -* 2.7.4 - * Added missing INCRBY method. Thanks Krzysztof Dorosz. - * SET now accepts the EX, PX, NX and XX options from Redis 2.6.12. These - options will generate errors if these options are used when connected - to a Redis server < 2.6.12. Thanks George Yoshida. -* 2.7.3 - * Fixed a bug with BRPOPLPUSH and lists with empty strings. - * All empty except: clauses have been replaced to only catch Exception - subclasses. This prevents a KeyboardInterrupt from triggering exception - handlers. Thanks Lucian Branescu Mihaila. - * All exceptions that are the result of redis server errors now share a - command Exception subclass, ServerError. Thanks Matt Robenolt. - * Prevent DISCARD from being called if MULTI wasn't also called. Thanks - Pete Aykroyd. - * SREM now returns an integer indicating the number of items removed from - the set. Thanks http://github.com/ronniekk. - * Fixed a bug with BGSAVE and BGREWRITEAOF response callbacks with Python3. - Thanks Nathan Wan. - * Added CLIENT GETNAME and CLIENT SETNAME commands. - Thanks http://github.com/bitterb. - * It's now possible to use len() on a pipeline instance to determine the - number of commands that will be executed. Thanks Jon Parise. - * Fixed a bug in INFO's parse routine with floating point numbers. Thanks - Ali Onur Uyar. - * Fixed a bug with BITCOUNT to allow `start` and `end` to both be zero. - Thanks Tim Bart. - * The transaction() method now accepts a boolean keyword argument, - value_from_callable. By default, or if False is passes, the transaction() - method will return the value of the pipelines execution. Otherwise, it - will return whatever func() returns. - * Python3 compatibility fix ensuring we're not already bytes(). Thanks - Salimane Adjao Moustapha. - * Added PSETEX. Thanks YAMAMOTO Takashi. - * Added a BlockingConnectionPool to limit the number of connections that - can be created. Thanks James Arthur. - * SORT now accepts a `groups` option that if specified, will return - tuples of n-length, where n is the number of keys specified in the GET - argument. This allows for convenient row-based iteration. Thanks - Ionuț Arțăriși. -* 2.7.2 - * Parse errors are now *always* raised on multi/exec pipelines, regardless - of the `raise_on_error` flag. See - https://groups.google.com/forum/?hl=en&fromgroups=#!topic/redis-db/VUiEFT8U8U0 - for more info. -* 2.7.1 - * Packaged tests with source code -* 2.7.0 - * Added BITOP and BITCOUNT commands. Thanks Mark Tozzi. - * Added the TIME command. Thanks Jason Knight. - * Added support for LUA scripting. Thanks to Angus Peart, Drew Smathers, - Issac Kelly, Louis-Philippe Perron, Sean Bleier, Jeffrey Kaditz, and - Dvir Volk for various patches and contributions to this feature. - * Changed the default error handling in pipelines. By default, the first - error in a pipeline will now be raised. A new parameter to the - pipeline's execute, `raise_on_error`, can be set to False to keep the - old behavior of embeedding the exception instances in the result. - * Fixed a bug with pipelines where parse errors won't corrupt the - socket. - * Added the optional `number` argument to SRANDMEMBER for use with - Redis 2.6+ servers. - * Added PEXPIRE/PEXPIREAT/PTTL commands. Thanks Luper Rouch. - * Added INCRBYFLOAT/HINCRBYFLOAT commands. Thanks Nikita Uvarov. - * High precision floating point values won't lose their precision when - being sent to the Redis server. Thanks Jason Oster and Oleg Pudeyev. - * Added CLIENT LIST/CLIENT KILL commands -* 2.6.2 - * `from_url` is now available as a classmethod on client classes. Thanks - Jon Parise for the patch. - * Fixed several encoding errors resulting from the Python 3.x support. -* 2.6.1 - * Python 3.x support! Big thanks to Alex Grönholm. - * Fixed a bug in the PythonParser's read_response that could hide an error - from the client (#251). -* 2.6.0 - * Changed (p)subscribe and (p)unsubscribe to no longer return messages - indicating the channel was subscribed/unsubscribed to. These messages - are available in the listen() loop instead. This is to prevent the - following scenario: - * Client A is subscribed to "foo" - * Client B publishes message to "foo" - * Client A subscribes to channel "bar" at the same time. - Prior to this change, the subscribe() call would return the published - messages on "foo" rather than the subscription confirmation to "bar". - * Added support for GETRANGE, thanks Jean-Philippe Caruana - * A new setting "decode_responses" specifies whether return values from - Redis commands get decoded automatically using the client's charset - value. Thanks to Frankie Dintino for the patch. -* 2.4.13 - * redis.from_url() can take an URL representing a Redis connection string - and return a client object. Thanks Kenneth Reitz for the patch. -* 2.4.12 - * ConnectionPool is now fork-safe. Thanks Josiah Carson for the patch. -* 2.4.11 - * AuthenticationError will now be correctly raised if an invalid password - is supplied. - * If Hiredis is unavailable, the HiredisParser will raise a RedisError - if selected manually. - * Made the INFO command more tolerant of Redis changes formatting. Fix - for #217. -* 2.4.10 - * Buffer reads from socket in the PythonParser. Fix for a Windows-specific - bug (#205). - * Added the OBJECT and DEBUG OBJECT commands. - * Added __del__ methods for classes that hold on to resources that need to - be cleaned up. This should prevent resource leakage when these objects - leave scope due to misuse or unhandled exceptions. Thanks David Wolever - for the suggestion. - * Added the ECHO command for completeness. - * Fixed a bug where attempting to subscribe to a PubSub channel of a Redis - server that's down would blow out the stack. Fixes #179 and #195. Thanks - Ovidiu Predescu for the test case. - * StrictRedis's TTL command now returns a -1 when querying a key with no - expiration. The Redis class continues to return None. - * ZADD and SADD now return integer values indicating the number of items - added. Thanks Homer Strong. - * Renamed the base client class to StrictRedis, replacing ZADD and LREM in - favor of their official argument order. The Redis class is now a subclass - of StrictRedis, implementing the legacy redis-py implementations of ZADD - and LREM. Docs have been updated to suggesting the use of StrictRedis. - * SETEX in StrictRedis is now compliant with official Redis SETEX command. - the name, value, time implementation moved to "Redis" for backwards - compatability. -* 2.4.9 - * Removed socket retry logic in Connection. This is the responsbility of - the caller to determine if the command is safe and can be retried. Thanks - David Wolver. - * Added some extra guards around various types of exceptions being raised - when sending or parsing data. Thanks David Wolver and Denis Bilenko. -* 2.4.8 - * Imported with_statement from __future__ for Python 2.5 compatability. -* 2.4.7 - * Fixed a bug where some connections were not getting released back to the - connection pool after pipeline execution. - * Pipelines can now be used as context managers. This is the preferred way - of use to ensure that connections get cleaned up properly. Thanks - David Wolever. - * Added a convenience method called transaction() on the base Redis class. - This method eliminates much of the boilerplate used when using pipelines - to watch Redis keys. See the documentation for details on usage. -* 2.4.6 - * Variadic arguments for SADD, SREM, ZREN, HDEL, LPUSH, and RPUSH. Thanks - Raphaël Vinot. - * (CRITICAL) Fixed an error in the Hiredis parser that occasionally caused - the socket connection to become corrupted and unusable. This became - noticeable once connection pools started to be used. - * ZRANGE, ZREVRANGE, ZRANGEBYSCORE, and ZREVRANGEBYSCORE now take an - additional optional argument, score_cast_func, which is a callable used - to cast the score value in the return type. The default is float. - * Removed the PUBLISH method from the PubSub class. Connections that are - [P]SUBSCRIBEd cannot issue PUBLISH commands, so it doesn't make sense - to have it here. - * Pipelines now contain WATCH and UNWATCH. Calling WATCH or UNWATCH from - the base client class will result in a deprecation warning. After - WATCHing one or more keys, the pipeline will be placed in immediate - execution mode until UNWATCH or MULTI are called. Refer to the new - pipeline docs in the README for more information. Thanks to David Wolever - and Randall Leeds for greatly helping with this. -* 2.4.5 - * The PythonParser now works better when reading zero length strings. -* 2.4.4 - * Fixed a typo introduced in 2.4.3 -* 2.4.3 - * Fixed a bug in the UnixDomainSocketConnection caused when trying to - form an error message after a socket error. -* 2.4.2 - * Fixed a bug in pipeline that caused an exception while trying to - reconnect after a connection timeout. -* 2.4.1 - * Fixed a bug in the PythonParser if disconnect is called before connect. -* 2.4.0 - * WARNING: 2.4 contains several backwards incompatible changes. - * Completely refactored Connection objects. Moved much of the Redis - protocol packing for requests here, and eliminated the nasty dependencies - it had on the client to do AUTH and SELECT commands on connect. - * Connection objects now have a parser attribute. Parsers are responsible - for reading data Redis sends. Two parsers ship with redis-py: a - PythonParser and the HiRedis parser. redis-py will automatically use the - HiRedis parser if you have the Python hiredis module installed, otherwise - it will fall back to the PythonParser. You can force or the other, or even - an external one by passing the `parser_class` argument to ConnectionPool. - * Added a UnixDomainSocketConnection for users wanting to talk to the Redis - instance running on a local machine only. You can use this connection - by passing it to the `connection_class` argument of the ConnectionPool. - * Connections no longer derive from threading.local. See threading.local - note below. - * ConnectionPool has been comletely refactored. The ConnectionPool now - maintains a list of connections. The redis-py client only hangs on to - a ConnectionPool instance, calling get_connection() anytime it needs to - send a command. When get_connection() is called, the command name and - any keys involved in the command are passed as arguments. Subclasses of - ConnectionPool could use this information to identify the shard the keys - belong to and return a connection to it. ConnectionPool also implements - disconnect() to force all connections in the pool to disconnect from - the Redis server. - * redis-py no longer support the SELECT command. You can still connect to - a specific database by specifing it when instantiating a client instance - or by creating a connection pool. If you need to talk to multiplate - databases within your application, you should use a separate client - instance for each database you want to talk to. - * Completely refactored Publish/Subscribe support. The subscribe and listen - commands are no longer available on the redis-py Client class. Instead, - the `pubsub` method returns an instance of the PubSub class which contains - all publish/subscribe support. Note, you can still PUBLISH from the - redis-py client class if you desire. - * Removed support for all previously deprecated commands or options. - * redis-py no longer uses threading.local in any way. Since the Client - class no longer holds on to a connection, it's no longer needed. You can - now pass client instances between threads, and commands run on those - threads will retrieve an available connection from the pool, use it and - release it. It should now be trivial to use redis-py with eventlet or - greenlet. - * ZADD now accepts pairs of value=score keyword arguments. This should help - resolve the long standing #72. The older value and score arguments have - been deprecated in favor of the keyword argument style. - * Client instances now get their own copy of RESPONSE_CALLBACKS. The new - set_response_callback method adds a user defined callback to the instance. - * Support Jython, fixing #97. Thanks to Adam Vandenberg for the patch. - * Using __getitem__ now properly raises a KeyError when the key is not - found. Thanks Ionuț Arțăriși for the patch. - * Newer Redis versions return a LOADING message for some commands while - the database is loading from disk during server start. This could cause - problems with SELECT. We now force a socket disconnection prior to - raising a ResponseError so subsuquent connections have to reconnect and - re-select the appropriate database. Thanks to Benjamin Anderson for - finding this and fixing. -* 2.2.4 - * WARNING: Potential backwards incompatible change - Changed order of - parameters of ZREVRANGEBYSCORE to match those of the actual Redis command. - This is only backwards-incompatible if you were passing max and min via - keyword args. If passing by normal args, nothing in user code should have - to change. Thanks Stéphane Angel for the fix. - * Fixed INFO to properly parse the Redis data correctly for both 2.2.x and - 2.3+. Thanks Stéphane Angel for the fix. - * Lock objects now store their timeout value as a float. This allows floats - to be used as timeout values. No changes to existing code required. - * WATCH now supports multiple keys. Thanks Rich Schumacher. - * Broke out some code that was Python 2.4 incompatible. redis-py should - now be useable on 2.4, but this hasn't actually been tested. Thanks - Dan Colish for the patch. - * Optimized some code using izip and islice. Should have a pretty good - speed up on larger data sets. Thanks Dan Colish. - * Better error handling when submitting an empty mapping to HMSET. Thanks - Dan Colish. - * Subscription status is now reset after every (re)connection. -* 2.2.3 - * Added support for Hiredis. To use, simply "pip install hiredis" or - "easy_install hiredis". Thanks for Pieter Noordhuis for the hiredis-py - bindings and the patch to redis-py. - * The connection class is chosen based on whether hiredis is installed - or not. To force the use of the PythonConnection, simply create - your own ConnectionPool instance with the connection_class argument - assigned to to PythonConnection class. - * Added missing command ZREVRANGEBYSCORE. Thanks Jay Baird for the patch. - * The INFO command should be parsed correctly on 2.2.x server versions - and is backwards compatible with older versions. Thanks Brett Hoerner. -* 2.2.2 - * Fixed a bug in ZREVRANK where retriving the rank of a value not in - the zset would raise an error. - * Fixed a bug in Connection.send where the errno import was getting - overwritten by a local variable. - * Fixed a bug in SLAVEOF when promoting an existing slave to a master. - * Reverted change of download URL back to redis-VERSION.tar.gz. 2.2.1's - change of this actually broke Pypi for Pip installs. Sorry! -* 2.2.1 - * Changed archive name to redis-py-VERSION.tar.gz to not conflict - with the Redis server archive. -* 2.2.0 - * Implemented SLAVEOF - * Implemented CONFIG as config_get and config_set - * Implemented GETBIT/SETBIT - * Implemented BRPOPLPUSH - * Implemented STRLEN - * Implemented PERSIST - * Implemented SETRANGE diff --git a/awx/lib/site-packages/redis/INSTALL b/awx/lib/site-packages/redis/INSTALL deleted file mode 100644 index 951f7dea8a..0000000000 --- a/awx/lib/site-packages/redis/INSTALL +++ /dev/null @@ -1,6 +0,0 @@ - -Please use - python setup.py install - -and report errors to Andy McCurdy (sedrik@gmail.com) - diff --git a/awx/lib/site-packages/redis/LICENSE b/awx/lib/site-packages/redis/LICENSE deleted file mode 100644 index 29a3fe3845..0000000000 --- a/awx/lib/site-packages/redis/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 Andy McCurdy - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. diff --git a/awx/lib/site-packages/redis/MANIFEST.in b/awx/lib/site-packages/redis/MANIFEST.in deleted file mode 100644 index 7aaee12a1d..0000000000 --- a/awx/lib/site-packages/redis/MANIFEST.in +++ /dev/null @@ -1,7 +0,0 @@ -include CHANGES -include INSTALL -include LICENSE -include README.rst -exclude __pycache__ -recursive-include tests * -recursive-exclude tests *.pyc diff --git a/awx/lib/site-packages/redis/PKG-INFO b/awx/lib/site-packages/redis/PKG-INFO deleted file mode 100644 index adde85d9eb..0000000000 --- a/awx/lib/site-packages/redis/PKG-INFO +++ /dev/null @@ -1,696 +0,0 @@ -Metadata-Version: 1.1 -Name: redis -Version: 2.10.3 -Summary: Python client for Redis key-value store -Home-page: http://github.com/andymccurdy/redis-py -Author: Andy McCurdy -Author-email: sedrik@gmail.com -License: MIT -Description: redis-py - ======== - - The Python interface to the Redis key-value store. - - .. image:: https://secure.travis-ci.org/andymccurdy/redis-py.png?branch=master - :target: http://travis-ci.org/andymccurdy/redis-py - - Installation - ------------ - - redis-py requires a running Redis server. See `Redis's quickstart - `_ for installation instructions. - - To install redis-py, simply: - - .. code-block:: bash - - $ sudo pip install redis - - or alternatively (you really should be using pip though): - - .. code-block:: bash - - $ sudo easy_install redis - - or from source: - - .. code-block:: bash - - $ sudo python setup.py install - - - Getting Started - --------------- - - .. code-block:: pycon - - >>> import redis - >>> r = redis.StrictRedis(host='localhost', port=6379, db=0) - >>> r.set('foo', 'bar') - True - >>> r.get('foo') - 'bar' - - API Reference - ------------- - - The `official Redis command documentation `_ does a - great job of explaining each command in detail. redis-py exposes two client - classes that implement these commands. The StrictRedis class attempts to adhere - to the official command syntax. There are a few exceptions: - - * **SELECT**: Not implemented. See the explanation in the Thread Safety section - below. - * **DEL**: 'del' is a reserved keyword in the Python syntax. Therefore redis-py - uses 'delete' instead. - * **CONFIG GET|SET**: These are implemented separately as config_get or config_set. - * **MULTI/EXEC**: These are implemented as part of the Pipeline class. The - pipeline is wrapped with the MULTI and EXEC statements by default when it - is executed, which can be disabled by specifying transaction=False. - See more about Pipelines below. - * **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate - class as it places the underlying connection in a state where it can't - execute non-pubsub commands. Calling the pubsub method from the Redis client - will return a PubSub instance where you can subscribe to channels and listen - for messages. You can only call PUBLISH from the Redis client (see - `this comment on issue #151 - `_ - for details). - * **SCAN/SSCAN/HSCAN/ZSCAN**: The *SCAN commands are implemented as they - exist in the Redis documentation. In addition, each command has an equivilant - iterator method. These are purely for convenience so the user doesn't have - to keep track of the cursor while iterating. Use the - scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this behavior. - - In addition to the changes above, the Redis class, a subclass of StrictRedis, - overrides several other commands to provide backwards compatibility with older - versions of redis-py: - - * **LREM**: Order of 'num' and 'value' arguments reversed such that 'num' can - provide a default value of zero. - * **ZADD**: Redis specifies the 'score' argument before 'value'. These were swapped - accidentally when being implemented and not discovered until after people - were already using it. The Redis class expects \*args in the form of: - `name1, score1, name2, score2, ...` - * **SETEX**: Order of 'time' and 'value' arguments reversed. - - - More Detail - ----------- - - Connection Pools - ^^^^^^^^^^^^^^^^ - - Behind the scenes, redis-py uses a connection pool to manage connections to - a Redis server. By default, each Redis instance you create will in turn create - its own connection pool. You can override this behavior and use an existing - connection pool by passing an already created connection pool instance to the - connection_pool argument of the Redis class. You may choose to do this in order - to implement client side sharding or have finer grain control of how - connections are managed. - - .. code-block:: pycon - - >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) - >>> r = redis.Redis(connection_pool=pool) - - Connections - ^^^^^^^^^^^ - - ConnectionPools manage a set of Connection instances. redis-py ships with two - types of Connections. The default, Connection, is a normal TCP socket based - connection. The UnixDomainSocketConnection allows for clients running on the - same device as the server to connect via a unix domain socket. To use a - UnixDomainSocketConnection connection, simply pass the unix_socket_path - argument, which is a string to the unix domain socket file. Additionally, make - sure the unixsocket parameter is defined in your redis.conf file. It's - commented out by default. - - .. code-block:: pycon - - >>> r = redis.Redis(unix_socket_path='/tmp/redis.sock') - - You can create your own Connection subclasses as well. This may be useful if - you want to control the socket behavior within an async framework. To - instantiate a client class using your own connection, you need to create - a connection pool, passing your class to the connection_class argument. - Other keyword parameters your pass to the pool will be passed to the class - specified during initialization. - - .. code-block:: pycon - - >>> pool = redis.ConnectionPool(connection_class=YourConnectionClass, - your_arg='...', ...) - - Parsers - ^^^^^^^ - - Parser classes provide a way to control how responses from the Redis server - are parsed. redis-py ships with two parser classes, the PythonParser and the - HiredisParser. By default, redis-py will attempt to use the HiredisParser if - you have the hiredis module installed and will fallback to the PythonParser - otherwise. - - Hiredis is a C library maintained by the core Redis team. Pieter Noordhuis was - kind enough to create Python bindings. Using Hiredis can provide up to a - 10x speed improvement in parsing responses from the Redis server. The - performance increase is most noticeable when retrieving many pieces of data, - such as from LRANGE or SMEMBERS operations. - - Hiredis is available on PyPI, and can be installed via pip or easy_install - just like redis-py. - - .. code-block:: bash - - $ pip install hiredis - - or - - .. code-block:: bash - - $ easy_install hiredis - - Response Callbacks - ^^^^^^^^^^^^^^^^^^ - - The client class uses a set of callbacks to cast Redis responses to the - appropriate Python type. There are a number of these callbacks defined on - the Redis client class in a dictionary called RESPONSE_CALLBACKS. - - Custom callbacks can be added on a per-instance basis using the - set_response_callback method. This method accepts two arguments: a command - name and the callback. Callbacks added in this manner are only valid on the - instance the callback is added to. If you want to define or override a callback - globally, you should make a subclass of the Redis client and add your callback - to its REDIS_CALLBACKS class dictionary. - - Response callbacks take at least one parameter: the response from the Redis - server. Keyword arguments may also be accepted in order to further control - how to interpret the response. These keyword arguments are specified during the - command's call to execute_command. The ZRANGE implementation demonstrates the - use of response callback keyword arguments with its "withscores" argument. - - Thread Safety - ^^^^^^^^^^^^^ - - Redis client instances can safely be shared between threads. Internally, - connection instances are only retrieved from the connection pool during - command execution, and returned to the pool directly after. Command execution - never modifies state on the client instance. - - However, there is one caveat: the Redis SELECT command. The SELECT command - allows you to switch the database currently in use by the connection. That - database remains selected until another is selected or until the connection is - closed. This creates an issue in that connections could be returned to the pool - that are connected to a different database. - - As a result, redis-py does not implement the SELECT command on client - instances. If you use multiple Redis databases within the same application, you - should create a separate client instance (and possibly a separate connection - pool) for each database. - - It is not safe to pass PubSub or Pipeline objects between threads. - - Pipelines - ^^^^^^^^^ - - Pipelines are a subclass of the base Redis class that provide support for - buffering multiple commands to the server in a single request. They can be used - to dramatically increase the performance of groups of commands by reducing the - number of back-and-forth TCP packets between the client and server. - - Pipelines are quite simple to use: - - .. code-block:: pycon - - >>> r = redis.Redis(...) - >>> r.set('bing', 'baz') - >>> # Use the pipeline() method to create a pipeline instance - >>> pipe = r.pipeline() - >>> # The following SET commands are buffered - >>> pipe.set('foo', 'bar') - >>> pipe.get('bing') - >>> # the EXECUTE call sends all buffered commands to the server, returning - >>> # a list of responses, one for each command. - >>> pipe.execute() - [True, 'baz'] - - For ease of use, all commands being buffered into the pipeline return the - pipeline object itself. Therefore calls can be chained like: - - .. code-block:: pycon - - >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute() - [True, True, 6] - - In addition, pipelines can also ensure the buffered commands are executed - atomically as a group. This happens by default. If you want to disable the - atomic nature of a pipeline but still want to buffer commands, you can turn - off transactions. - - .. code-block:: pycon - - >>> pipe = r.pipeline(transaction=False) - - A common issue occurs when requiring atomic transactions but needing to - retrieve values in Redis prior for use within the transaction. For instance, - let's assume that the INCR command didn't exist and we need to build an atomic - version of INCR in Python. - - The completely naive implementation could GET the value, increment it in - Python, and SET the new value back. However, this is not atomic because - multiple clients could be doing this at the same time, each getting the same - value from GET. - - Enter the WATCH command. WATCH provides the ability to monitor one or more keys - prior to starting a transaction. If any of those keys change prior the - execution of that transaction, the entire transaction will be canceled and a - WatchError will be raised. To implement our own client-side INCR command, we - could do something like this: - - .. code-block:: pycon - - >>> with r.pipeline() as pipe: - ... while 1: - ... try: - ... # put a WATCH on the key that holds our sequence value - ... pipe.watch('OUR-SEQUENCE-KEY') - ... # after WATCHing, the pipeline is put into immediate execution - ... # mode until we tell it to start buffering commands again. - ... # this allows us to get the current value of our sequence - ... current_value = pipe.get('OUR-SEQUENCE-KEY') - ... next_value = int(current_value) + 1 - ... # now we can put the pipeline back into buffered mode with MULTI - ... pipe.multi() - ... pipe.set('OUR-SEQUENCE-KEY', next_value) - ... # and finally, execute the pipeline (the set command) - ... pipe.execute() - ... # if a WatchError wasn't raised during execution, everything - ... # we just did happened atomically. - ... break - ... except WatchError: - ... # another client must have changed 'OUR-SEQUENCE-KEY' between - ... # the time we started WATCHing it and the pipeline's execution. - ... # our best bet is to just retry. - ... continue - - Note that, because the Pipeline must bind to a single connection for the - duration of a WATCH, care must be taken to ensure that the connection is - returned to the connection pool by calling the reset() method. If the - Pipeline is used as a context manager (as in the example above) reset() - will be called automatically. Of course you can do this the manual way by - explicity calling reset(): - - .. code-block:: pycon - - >>> pipe = r.pipeline() - >>> while 1: - ... try: - ... pipe.watch('OUR-SEQUENCE-KEY') - ... ... - ... pipe.execute() - ... break - ... except WatchError: - ... continue - ... finally: - ... pipe.reset() - - A convenience method named "transaction" exists for handling all the - boilerplate of handling and retrying watch errors. It takes a callable that - should expect a single parameter, a pipeline object, and any number of keys to - be WATCHed. Our client-side INCR command above can be written like this, - which is much easier to read: - - .. code-block:: pycon - - >>> def client_side_incr(pipe): - ... current_value = pipe.get('OUR-SEQUENCE-KEY') - ... next_value = int(current_value) + 1 - ... pipe.multi() - ... pipe.set('OUR-SEQUENCE-KEY', next_value) - >>> - >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') - [True] - - Publish / Subscribe - ^^^^^^^^^^^^^^^^^^^ - - redis-py includes a `PubSub` object that subscribes to channels and listens - for new messages. Creating a `PubSub` object is easy. - - .. code-block:: pycon - - >>> r = redis.StrictRedis(...) - >>> p = r.pubsub() - - Once a `PubSub` instance is created, channels and patterns can be subscribed - to. - - .. code-block:: pycon - - >>> p.subscribe('my-first-channel', 'my-second-channel', ...) - >>> p.psubscribe('my-*', ...) - - The `PubSub` instance is now subscribed to those channels/patterns. The - subscription confirmations can be seen by reading messages from the `PubSub` - instance. - - .. code-block:: pycon - - >>> p.get_message() - {'pattern': None, 'type': 'subscribe', 'channel': 'my-second-channel', 'data': 1L} - >>> p.get_message() - {'pattern': None, 'type': 'subscribe', 'channel': 'my-first-channel', 'data': 2L} - >>> p.get_message() - {'pattern': None, 'type': 'psubscribe', 'channel': 'my-*', 'data': 3L} - - Every message read from a `PubSub` instance will be a dictionary with the - following keys. - - * **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe', - 'punsubscribe', 'message', 'pmessage' - * **channel**: The channel [un]subscribed to or the channel a message was - published to - * **pattern**: The pattern that matched a published message's channel. Will be - `None` in all cases except for 'pmessage' types. - * **data**: The message data. With [un]subscribe messages, this value will be - the number of channels and patterns the connection is currently subscribed - to. With [p]message messages, this value will be the actual published - message. - - Let's send a message now. - - .. code-block:: pycon - - # the publish method returns the number matching channel and pattern - # subscriptions. 'my-first-channel' matches both the 'my-first-channel' - # subscription and the 'my-*' pattern subscription, so this message will - # be delivered to 2 channels/patterns - >>> r.publish('my-first-channel', 'some data') - 2 - >>> p.get_message() - {'channel': 'my-first-channel', 'data': 'some data', 'pattern': None, 'type': 'message'} - >>> p.get_message() - {'channel': 'my-first-channel', 'data': 'some data', 'pattern': 'my-*', 'type': 'pmessage'} - - Unsubscribing works just like subscribing. If no arguments are passed to - [p]unsubscribe, all channels or patterns will be unsubscribed from. - - .. code-block:: pycon - - >>> p.unsubscribe() - >>> p.punsubscribe('my-*') - >>> p.get_message() - {'channel': 'my-second-channel', 'data': 2L, 'pattern': None, 'type': 'unsubscribe'} - >>> p.get_message() - {'channel': 'my-first-channel', 'data': 1L, 'pattern': None, 'type': 'unsubscribe'} - >>> p.get_message() - {'channel': 'my-*', 'data': 0L, 'pattern': None, 'type': 'punsubscribe'} - - redis-py also allows you to register callback functions to handle published - messages. Message handlers take a single argument, the message, which is a - dictionary just like the examples above. To subscribe to a channel or pattern - with a message handler, pass the channel or pattern name as a keyword argument - with its value being the callback function. - - When a message is read on a channel or pattern with a message handler, the - message dictionary is created and passed to the message handler. In this case, - a `None` value is returned from get_message() since the message was already - handled. - - .. code-block:: pycon - - >>> def my_handler(message): - ... print 'MY HANDLER: ', message['data'] - >>> p.subscribe(**{'my-channel': my_handler}) - # read the subscribe confirmation message - >>> p.get_message() - {'pattern': None, 'type': 'subscribe', 'channel': 'my-channel', 'data': 1L} - >>> r.publish('my-channel', 'awesome data') - 1 - # for the message handler to work, we need tell the instance to read data. - # this can be done in several ways (read more below). we'll just use - # the familiar get_message() function for now - >>> message = p.get_message() - MY HANDLER: awesome data - # note here that the my_handler callback printed the string above. - # `message` is None because the message was handled by our handler. - >>> print message - None - - If your application is not interested in the (sometimes noisy) - subscribe/unsubscribe confirmation messages, you can ignore them by passing - `ignore_subscribe_messages=True` to `r.pubsub()`. This will cause all - subscribe/unsubscribe messages to be read, but they won't bubble up to your - application. - - .. code-block:: pycon - - >>> p = r.pubsub(ignore_subscribe_messages=True) - >>> p.subscribe('my-channel') - >>> p.get_message() # hides the subscribe message and returns None - >>> r.publish('my-channel') - 1 - >>> p.get_message() - {'channel': 'my-channel', data': 'my data', 'pattern': None, 'type': 'message'} - - There are three different strategies for reading messages. - - The examples above have been using `pubsub.get_message()`. Behind the scenes, - `get_message()` uses the system's 'select' module to quickly poll the - connection's socket. If there's data available to be read, `get_message()` will - read it, format the message and return it or pass it to a message handler. If - there's no data to be read, `get_message()` will immediately return None. This - makes it trivial to integrate into an existing event loop inside your - application. - - .. code-block:: pycon - - >>> while True: - >>> message = p.get_message() - >>> if message: - >>> # do something with the message - >>> time.sleep(0.001) # be nice to the system :) - - Older versions of redis-py only read messages with `pubsub.listen()`. listen() - is a generator that blocks until a message is available. If your application - doesn't need to do anything else but receive and act on messages received from - redis, listen() is an easy way to get up an running. - - .. code-block:: pycon - - >>> for message in p.listen(): - ... # do something with the message - - The third option runs an event loop in a separate thread. - `pubsub.run_in_thread()` creates a new thread and starts the event loop. The - thread object is returned to the caller of `run_in_thread()`. The caller can - use the `thread.stop()` method to shut down the event loop and thread. Behind - the scenes, this is simply a wrapper around `get_message()` that runs in a - separate thread, essentially creating a tiny non-blocking event loop for you. - `run_in_thread()` takes an optional `sleep_time` argument. If specified, the - event loop will call `time.sleep()` with the value in each iteration of the - loop. - - Note: Since we're running in a separate thread, there's no way to handle - messages that aren't automatically handled with registered message handlers. - Therefore, redis-py prevents you from calling `run_in_thread()` if you're - subscribed to patterns or channels that don't have message handlers attached. - - .. code-block:: pycon - - >>> p.subscribe(**{'my-channel': my_handler}) - >>> thread = p.run_in_thread(sleep_time=0.001) - # the event loop is now running in the background processing messages - # when it's time to shut it down... - >>> thread.stop() - - A PubSub object adheres to the same encoding semantics as the client instance - it was created from. Any channel or pattern that's unicode will be encoded - using the `charset` specified on the client before being sent to Redis. If the - client's `decode_responses` flag is set the False (the default), the - 'channel', 'pattern' and 'data' values in message dictionaries will be byte - strings (str on Python 2, bytes on Python 3). If the client's - `decode_responses` is True, then the 'channel', 'pattern' and 'data' values - will be automatically decoded to unicode strings using the client's `charset`. - - PubSub objects remember what channels and patterns they are subscribed to. In - the event of a disconnection such as a network error or timeout, the - PubSub object will re-subscribe to all prior channels and patterns when - reconnecting. Messages that were published while the client was disconnected - cannot be delivered. When you're finished with a PubSub object, call its - `.close()` method to shutdown the connection. - - .. code-block:: pycon - - >>> p = r.pubsub() - >>> ... - >>> p.close() - - LUA Scripting - ^^^^^^^^^^^^^ - - redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are - a number of edge cases that make these commands tedious to use in real world - scenarios. Therefore, redis-py exposes a Script object that makes scripting - much easier to use. - - To create a Script instance, use the `register_script` function on a client - instance passing the LUA code as the first argument. `register_script` returns - a Script instance that you can use throughout your code. - - The following trivial LUA script accepts two parameters: the name of a key and - a multiplier value. The script fetches the value stored in the key, multiplies - it with the multiplier value and returns the result. - - .. code-block:: pycon - - >>> r = redis.StrictRedis() - >>> lua = """ - ... local value = redis.call('GET', KEYS[1]) - ... value = tonumber(value) - ... return value * ARGV[1]""" - >>> multiply = r.register_script(lua) - - `multiply` is now a Script instance that is invoked by calling it like a - function. Script instances accept the following optional arguments: - - * **keys**: A list of key names that the script will access. This becomes the - KEYS list in LUA. - * **args**: A list of argument values. This becomes the ARGV list in LUA. - * **client**: A redis-py Client or Pipeline instance that will invoke the - script. If client isn't specified, the client that intiially - created the Script instance (the one that `register_script` was - invoked from) will be used. - - Continuing the example from above: - - .. code-block:: pycon - - >>> r.set('foo', 2) - >>> multiply(keys=['foo'], args=[5]) - 10 - - The value of key 'foo' is set to 2. When multiply is invoked, the 'foo' key is - passed to the script along with the multiplier value of 5. LUA executes the - script and returns the result, 10. - - Script instances can be executed using a different client instance, even one - that points to a completely different Redis server. - - .. code-block:: pycon - - >>> r2 = redis.StrictRedis('redis2.example.com') - >>> r2.set('foo', 3) - >>> multiply(keys=['foo'], args=[5], client=r2) - 15 - - The Script object ensures that the LUA script is loaded into Redis's script - cache. In the event of a NOSCRIPT error, it will load the script and retry - executing it. - - Script objects can also be used in pipelines. The pipeline instance should be - passed as the client argument when calling the script. Care is taken to ensure - that the script is registered in Redis's script cache just prior to pipeline - execution. - - .. code-block:: pycon - - >>> pipe = r.pipeline() - >>> pipe.set('foo', 5) - >>> multiply(keys=['foo'], args=[5], client=pipe) - >>> pipe.execute() - [True, 25] - - Sentinel support - ^^^^^^^^^^^^^^^^ - - redis-py can be used together with `Redis Sentinel `_ - to discover Redis nodes. You need to have at least one Sentinel daemon running - in order to use redis-py's Sentinel support. - - Connecting redis-py to the Sentinel instance(s) is easy. You can use a - Sentinel connection to discover the master and slaves network addresses: - - .. code-block:: pycon - - >>> from redis.sentinel import Sentinel - >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) - >>> sentinel.discover_master('mymaster') - ('127.0.0.1', 6379) - >>> sentinel.discover_slaves('mymaster') - [('127.0.0.1', 6380)] - - You can also create Redis client connections from a Sentinel instnace. You can - connect to either the master (for write operations) or a slave (for read-only - operations). - - .. code-block:: pycon - - >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) - >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) - >>> master.set('foo', 'bar') - >>> slave.get('foo') - 'bar' - - The master and slave objects are normal StrictRedis instances with their - connection pool bound to the Sentinel instance. When a Sentinel backed client - attempts to establish a connection, it first queries the Sentinel servers to - determine an appropriate host to connect to. If no server is found, - a MasterNotFoundError or SlaveNotFoundError is raised. Both exceptions are - subclasses of ConnectionError. - - When trying to connect to a slave client, the Sentinel connection pool will - iterate over the list of slaves until it finds one that can be connected to. - If no slaves can be connected to, a connection will be established with the - master. - - See `Guidelines for Redis clients with support for Redis Sentinel - `_ to learn more about Redis Sentinel. - - Scan Iterators - ^^^^^^^^^^^^^^ - - The *SCAN commands introduced in Redis 2.8 can be cumbersome to use. While - these commands are fully supported, redis-py also exposes the following methods - that return Python iterators for convenience: `scan_iter`, `hscan_iter`, - `sscan_iter` and `zscan_iter`. - - .. code-block:: pycon - - >>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')): - ... r.set(key, value) - >>> for key in r.scan_iter(): - ... print key, r.get(key) - A 1 - B 2 - C 3 - - Author - ^^^^^^ - - redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com). - It can be found here: http://github.com/andymccurdy/redis-py - - Special thanks to: - - * Ludovico Magnocavallo, author of the original Python Redis client, from - which some of the socket code is still used. - * Alexander Solovyov for ideas on the generic response callback system. - * Paul Hubbard for initial packaging support. - - -Keywords: Redis,key-value store -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Console -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.2 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 diff --git a/awx/lib/site-packages/redis/README.rst b/awx/lib/site-packages/redis/README.rst deleted file mode 100644 index 41bb92e822..0000000000 --- a/awx/lib/site-packages/redis/README.rst +++ /dev/null @@ -1,673 +0,0 @@ -redis-py -======== - -The Python interface to the Redis key-value store. - -.. image:: https://secure.travis-ci.org/andymccurdy/redis-py.png?branch=master - :target: http://travis-ci.org/andymccurdy/redis-py - -Installation ------------- - -redis-py requires a running Redis server. See `Redis's quickstart -`_ for installation instructions. - -To install redis-py, simply: - -.. code-block:: bash - - $ sudo pip install redis - -or alternatively (you really should be using pip though): - -.. code-block:: bash - - $ sudo easy_install redis - -or from source: - -.. code-block:: bash - - $ sudo python setup.py install - - -Getting Started ---------------- - -.. code-block:: pycon - - >>> import redis - >>> r = redis.StrictRedis(host='localhost', port=6379, db=0) - >>> r.set('foo', 'bar') - True - >>> r.get('foo') - 'bar' - -API Reference -------------- - -The `official Redis command documentation `_ does a -great job of explaining each command in detail. redis-py exposes two client -classes that implement these commands. The StrictRedis class attempts to adhere -to the official command syntax. There are a few exceptions: - -* **SELECT**: Not implemented. See the explanation in the Thread Safety section - below. -* **DEL**: 'del' is a reserved keyword in the Python syntax. Therefore redis-py - uses 'delete' instead. -* **CONFIG GET|SET**: These are implemented separately as config_get or config_set. -* **MULTI/EXEC**: These are implemented as part of the Pipeline class. The - pipeline is wrapped with the MULTI and EXEC statements by default when it - is executed, which can be disabled by specifying transaction=False. - See more about Pipelines below. -* **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate - class as it places the underlying connection in a state where it can't - execute non-pubsub commands. Calling the pubsub method from the Redis client - will return a PubSub instance where you can subscribe to channels and listen - for messages. You can only call PUBLISH from the Redis client (see - `this comment on issue #151 - `_ - for details). -* **SCAN/SSCAN/HSCAN/ZSCAN**: The *SCAN commands are implemented as they - exist in the Redis documentation. In addition, each command has an equivilant - iterator method. These are purely for convenience so the user doesn't have - to keep track of the cursor while iterating. Use the - scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this behavior. - -In addition to the changes above, the Redis class, a subclass of StrictRedis, -overrides several other commands to provide backwards compatibility with older -versions of redis-py: - -* **LREM**: Order of 'num' and 'value' arguments reversed such that 'num' can - provide a default value of zero. -* **ZADD**: Redis specifies the 'score' argument before 'value'. These were swapped - accidentally when being implemented and not discovered until after people - were already using it. The Redis class expects \*args in the form of: - `name1, score1, name2, score2, ...` -* **SETEX**: Order of 'time' and 'value' arguments reversed. - - -More Detail ------------ - -Connection Pools -^^^^^^^^^^^^^^^^ - -Behind the scenes, redis-py uses a connection pool to manage connections to -a Redis server. By default, each Redis instance you create will in turn create -its own connection pool. You can override this behavior and use an existing -connection pool by passing an already created connection pool instance to the -connection_pool argument of the Redis class. You may choose to do this in order -to implement client side sharding or have finer grain control of how -connections are managed. - -.. code-block:: pycon - - >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) - >>> r = redis.Redis(connection_pool=pool) - -Connections -^^^^^^^^^^^ - -ConnectionPools manage a set of Connection instances. redis-py ships with two -types of Connections. The default, Connection, is a normal TCP socket based -connection. The UnixDomainSocketConnection allows for clients running on the -same device as the server to connect via a unix domain socket. To use a -UnixDomainSocketConnection connection, simply pass the unix_socket_path -argument, which is a string to the unix domain socket file. Additionally, make -sure the unixsocket parameter is defined in your redis.conf file. It's -commented out by default. - -.. code-block:: pycon - - >>> r = redis.Redis(unix_socket_path='/tmp/redis.sock') - -You can create your own Connection subclasses as well. This may be useful if -you want to control the socket behavior within an async framework. To -instantiate a client class using your own connection, you need to create -a connection pool, passing your class to the connection_class argument. -Other keyword parameters your pass to the pool will be passed to the class -specified during initialization. - -.. code-block:: pycon - - >>> pool = redis.ConnectionPool(connection_class=YourConnectionClass, - your_arg='...', ...) - -Parsers -^^^^^^^ - -Parser classes provide a way to control how responses from the Redis server -are parsed. redis-py ships with two parser classes, the PythonParser and the -HiredisParser. By default, redis-py will attempt to use the HiredisParser if -you have the hiredis module installed and will fallback to the PythonParser -otherwise. - -Hiredis is a C library maintained by the core Redis team. Pieter Noordhuis was -kind enough to create Python bindings. Using Hiredis can provide up to a -10x speed improvement in parsing responses from the Redis server. The -performance increase is most noticeable when retrieving many pieces of data, -such as from LRANGE or SMEMBERS operations. - -Hiredis is available on PyPI, and can be installed via pip or easy_install -just like redis-py. - -.. code-block:: bash - - $ pip install hiredis - -or - -.. code-block:: bash - - $ easy_install hiredis - -Response Callbacks -^^^^^^^^^^^^^^^^^^ - -The client class uses a set of callbacks to cast Redis responses to the -appropriate Python type. There are a number of these callbacks defined on -the Redis client class in a dictionary called RESPONSE_CALLBACKS. - -Custom callbacks can be added on a per-instance basis using the -set_response_callback method. This method accepts two arguments: a command -name and the callback. Callbacks added in this manner are only valid on the -instance the callback is added to. If you want to define or override a callback -globally, you should make a subclass of the Redis client and add your callback -to its REDIS_CALLBACKS class dictionary. - -Response callbacks take at least one parameter: the response from the Redis -server. Keyword arguments may also be accepted in order to further control -how to interpret the response. These keyword arguments are specified during the -command's call to execute_command. The ZRANGE implementation demonstrates the -use of response callback keyword arguments with its "withscores" argument. - -Thread Safety -^^^^^^^^^^^^^ - -Redis client instances can safely be shared between threads. Internally, -connection instances are only retrieved from the connection pool during -command execution, and returned to the pool directly after. Command execution -never modifies state on the client instance. - -However, there is one caveat: the Redis SELECT command. The SELECT command -allows you to switch the database currently in use by the connection. That -database remains selected until another is selected or until the connection is -closed. This creates an issue in that connections could be returned to the pool -that are connected to a different database. - -As a result, redis-py does not implement the SELECT command on client -instances. If you use multiple Redis databases within the same application, you -should create a separate client instance (and possibly a separate connection -pool) for each database. - -It is not safe to pass PubSub or Pipeline objects between threads. - -Pipelines -^^^^^^^^^ - -Pipelines are a subclass of the base Redis class that provide support for -buffering multiple commands to the server in a single request. They can be used -to dramatically increase the performance of groups of commands by reducing the -number of back-and-forth TCP packets between the client and server. - -Pipelines are quite simple to use: - -.. code-block:: pycon - - >>> r = redis.Redis(...) - >>> r.set('bing', 'baz') - >>> # Use the pipeline() method to create a pipeline instance - >>> pipe = r.pipeline() - >>> # The following SET commands are buffered - >>> pipe.set('foo', 'bar') - >>> pipe.get('bing') - >>> # the EXECUTE call sends all buffered commands to the server, returning - >>> # a list of responses, one for each command. - >>> pipe.execute() - [True, 'baz'] - -For ease of use, all commands being buffered into the pipeline return the -pipeline object itself. Therefore calls can be chained like: - -.. code-block:: pycon - - >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute() - [True, True, 6] - -In addition, pipelines can also ensure the buffered commands are executed -atomically as a group. This happens by default. If you want to disable the -atomic nature of a pipeline but still want to buffer commands, you can turn -off transactions. - -.. code-block:: pycon - - >>> pipe = r.pipeline(transaction=False) - -A common issue occurs when requiring atomic transactions but needing to -retrieve values in Redis prior for use within the transaction. For instance, -let's assume that the INCR command didn't exist and we need to build an atomic -version of INCR in Python. - -The completely naive implementation could GET the value, increment it in -Python, and SET the new value back. However, this is not atomic because -multiple clients could be doing this at the same time, each getting the same -value from GET. - -Enter the WATCH command. WATCH provides the ability to monitor one or more keys -prior to starting a transaction. If any of those keys change prior the -execution of that transaction, the entire transaction will be canceled and a -WatchError will be raised. To implement our own client-side INCR command, we -could do something like this: - -.. code-block:: pycon - - >>> with r.pipeline() as pipe: - ... while 1: - ... try: - ... # put a WATCH on the key that holds our sequence value - ... pipe.watch('OUR-SEQUENCE-KEY') - ... # after WATCHing, the pipeline is put into immediate execution - ... # mode until we tell it to start buffering commands again. - ... # this allows us to get the current value of our sequence - ... current_value = pipe.get('OUR-SEQUENCE-KEY') - ... next_value = int(current_value) + 1 - ... # now we can put the pipeline back into buffered mode with MULTI - ... pipe.multi() - ... pipe.set('OUR-SEQUENCE-KEY', next_value) - ... # and finally, execute the pipeline (the set command) - ... pipe.execute() - ... # if a WatchError wasn't raised during execution, everything - ... # we just did happened atomically. - ... break - ... except WatchError: - ... # another client must have changed 'OUR-SEQUENCE-KEY' between - ... # the time we started WATCHing it and the pipeline's execution. - ... # our best bet is to just retry. - ... continue - -Note that, because the Pipeline must bind to a single connection for the -duration of a WATCH, care must be taken to ensure that the connection is -returned to the connection pool by calling the reset() method. If the -Pipeline is used as a context manager (as in the example above) reset() -will be called automatically. Of course you can do this the manual way by -explicity calling reset(): - -.. code-block:: pycon - - >>> pipe = r.pipeline() - >>> while 1: - ... try: - ... pipe.watch('OUR-SEQUENCE-KEY') - ... ... - ... pipe.execute() - ... break - ... except WatchError: - ... continue - ... finally: - ... pipe.reset() - -A convenience method named "transaction" exists for handling all the -boilerplate of handling and retrying watch errors. It takes a callable that -should expect a single parameter, a pipeline object, and any number of keys to -be WATCHed. Our client-side INCR command above can be written like this, -which is much easier to read: - -.. code-block:: pycon - - >>> def client_side_incr(pipe): - ... current_value = pipe.get('OUR-SEQUENCE-KEY') - ... next_value = int(current_value) + 1 - ... pipe.multi() - ... pipe.set('OUR-SEQUENCE-KEY', next_value) - >>> - >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') - [True] - -Publish / Subscribe -^^^^^^^^^^^^^^^^^^^ - -redis-py includes a `PubSub` object that subscribes to channels and listens -for new messages. Creating a `PubSub` object is easy. - -.. code-block:: pycon - - >>> r = redis.StrictRedis(...) - >>> p = r.pubsub() - -Once a `PubSub` instance is created, channels and patterns can be subscribed -to. - -.. code-block:: pycon - - >>> p.subscribe('my-first-channel', 'my-second-channel', ...) - >>> p.psubscribe('my-*', ...) - -The `PubSub` instance is now subscribed to those channels/patterns. The -subscription confirmations can be seen by reading messages from the `PubSub` -instance. - -.. code-block:: pycon - - >>> p.get_message() - {'pattern': None, 'type': 'subscribe', 'channel': 'my-second-channel', 'data': 1L} - >>> p.get_message() - {'pattern': None, 'type': 'subscribe', 'channel': 'my-first-channel', 'data': 2L} - >>> p.get_message() - {'pattern': None, 'type': 'psubscribe', 'channel': 'my-*', 'data': 3L} - -Every message read from a `PubSub` instance will be a dictionary with the -following keys. - -* **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe', - 'punsubscribe', 'message', 'pmessage' -* **channel**: The channel [un]subscribed to or the channel a message was - published to -* **pattern**: The pattern that matched a published message's channel. Will be - `None` in all cases except for 'pmessage' types. -* **data**: The message data. With [un]subscribe messages, this value will be - the number of channels and patterns the connection is currently subscribed - to. With [p]message messages, this value will be the actual published - message. - -Let's send a message now. - -.. code-block:: pycon - - # the publish method returns the number matching channel and pattern - # subscriptions. 'my-first-channel' matches both the 'my-first-channel' - # subscription and the 'my-*' pattern subscription, so this message will - # be delivered to 2 channels/patterns - >>> r.publish('my-first-channel', 'some data') - 2 - >>> p.get_message() - {'channel': 'my-first-channel', 'data': 'some data', 'pattern': None, 'type': 'message'} - >>> p.get_message() - {'channel': 'my-first-channel', 'data': 'some data', 'pattern': 'my-*', 'type': 'pmessage'} - -Unsubscribing works just like subscribing. If no arguments are passed to -[p]unsubscribe, all channels or patterns will be unsubscribed from. - -.. code-block:: pycon - - >>> p.unsubscribe() - >>> p.punsubscribe('my-*') - >>> p.get_message() - {'channel': 'my-second-channel', 'data': 2L, 'pattern': None, 'type': 'unsubscribe'} - >>> p.get_message() - {'channel': 'my-first-channel', 'data': 1L, 'pattern': None, 'type': 'unsubscribe'} - >>> p.get_message() - {'channel': 'my-*', 'data': 0L, 'pattern': None, 'type': 'punsubscribe'} - -redis-py also allows you to register callback functions to handle published -messages. Message handlers take a single argument, the message, which is a -dictionary just like the examples above. To subscribe to a channel or pattern -with a message handler, pass the channel or pattern name as a keyword argument -with its value being the callback function. - -When a message is read on a channel or pattern with a message handler, the -message dictionary is created and passed to the message handler. In this case, -a `None` value is returned from get_message() since the message was already -handled. - -.. code-block:: pycon - - >>> def my_handler(message): - ... print 'MY HANDLER: ', message['data'] - >>> p.subscribe(**{'my-channel': my_handler}) - # read the subscribe confirmation message - >>> p.get_message() - {'pattern': None, 'type': 'subscribe', 'channel': 'my-channel', 'data': 1L} - >>> r.publish('my-channel', 'awesome data') - 1 - # for the message handler to work, we need tell the instance to read data. - # this can be done in several ways (read more below). we'll just use - # the familiar get_message() function for now - >>> message = p.get_message() - MY HANDLER: awesome data - # note here that the my_handler callback printed the string above. - # `message` is None because the message was handled by our handler. - >>> print message - None - -If your application is not interested in the (sometimes noisy) -subscribe/unsubscribe confirmation messages, you can ignore them by passing -`ignore_subscribe_messages=True` to `r.pubsub()`. This will cause all -subscribe/unsubscribe messages to be read, but they won't bubble up to your -application. - -.. code-block:: pycon - - >>> p = r.pubsub(ignore_subscribe_messages=True) - >>> p.subscribe('my-channel') - >>> p.get_message() # hides the subscribe message and returns None - >>> r.publish('my-channel') - 1 - >>> p.get_message() - {'channel': 'my-channel', data': 'my data', 'pattern': None, 'type': 'message'} - -There are three different strategies for reading messages. - -The examples above have been using `pubsub.get_message()`. Behind the scenes, -`get_message()` uses the system's 'select' module to quickly poll the -connection's socket. If there's data available to be read, `get_message()` will -read it, format the message and return it or pass it to a message handler. If -there's no data to be read, `get_message()` will immediately return None. This -makes it trivial to integrate into an existing event loop inside your -application. - -.. code-block:: pycon - - >>> while True: - >>> message = p.get_message() - >>> if message: - >>> # do something with the message - >>> time.sleep(0.001) # be nice to the system :) - -Older versions of redis-py only read messages with `pubsub.listen()`. listen() -is a generator that blocks until a message is available. If your application -doesn't need to do anything else but receive and act on messages received from -redis, listen() is an easy way to get up an running. - -.. code-block:: pycon - - >>> for message in p.listen(): - ... # do something with the message - -The third option runs an event loop in a separate thread. -`pubsub.run_in_thread()` creates a new thread and starts the event loop. The -thread object is returned to the caller of `run_in_thread()`. The caller can -use the `thread.stop()` method to shut down the event loop and thread. Behind -the scenes, this is simply a wrapper around `get_message()` that runs in a -separate thread, essentially creating a tiny non-blocking event loop for you. -`run_in_thread()` takes an optional `sleep_time` argument. If specified, the -event loop will call `time.sleep()` with the value in each iteration of the -loop. - -Note: Since we're running in a separate thread, there's no way to handle -messages that aren't automatically handled with registered message handlers. -Therefore, redis-py prevents you from calling `run_in_thread()` if you're -subscribed to patterns or channels that don't have message handlers attached. - -.. code-block:: pycon - - >>> p.subscribe(**{'my-channel': my_handler}) - >>> thread = p.run_in_thread(sleep_time=0.001) - # the event loop is now running in the background processing messages - # when it's time to shut it down... - >>> thread.stop() - -A PubSub object adheres to the same encoding semantics as the client instance -it was created from. Any channel or pattern that's unicode will be encoded -using the `charset` specified on the client before being sent to Redis. If the -client's `decode_responses` flag is set the False (the default), the -'channel', 'pattern' and 'data' values in message dictionaries will be byte -strings (str on Python 2, bytes on Python 3). If the client's -`decode_responses` is True, then the 'channel', 'pattern' and 'data' values -will be automatically decoded to unicode strings using the client's `charset`. - -PubSub objects remember what channels and patterns they are subscribed to. In -the event of a disconnection such as a network error or timeout, the -PubSub object will re-subscribe to all prior channels and patterns when -reconnecting. Messages that were published while the client was disconnected -cannot be delivered. When you're finished with a PubSub object, call its -`.close()` method to shutdown the connection. - -.. code-block:: pycon - - >>> p = r.pubsub() - >>> ... - >>> p.close() - -LUA Scripting -^^^^^^^^^^^^^ - -redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are -a number of edge cases that make these commands tedious to use in real world -scenarios. Therefore, redis-py exposes a Script object that makes scripting -much easier to use. - -To create a Script instance, use the `register_script` function on a client -instance passing the LUA code as the first argument. `register_script` returns -a Script instance that you can use throughout your code. - -The following trivial LUA script accepts two parameters: the name of a key and -a multiplier value. The script fetches the value stored in the key, multiplies -it with the multiplier value and returns the result. - -.. code-block:: pycon - - >>> r = redis.StrictRedis() - >>> lua = """ - ... local value = redis.call('GET', KEYS[1]) - ... value = tonumber(value) - ... return value * ARGV[1]""" - >>> multiply = r.register_script(lua) - -`multiply` is now a Script instance that is invoked by calling it like a -function. Script instances accept the following optional arguments: - -* **keys**: A list of key names that the script will access. This becomes the - KEYS list in LUA. -* **args**: A list of argument values. This becomes the ARGV list in LUA. -* **client**: A redis-py Client or Pipeline instance that will invoke the - script. If client isn't specified, the client that intiially - created the Script instance (the one that `register_script` was - invoked from) will be used. - -Continuing the example from above: - -.. code-block:: pycon - - >>> r.set('foo', 2) - >>> multiply(keys=['foo'], args=[5]) - 10 - -The value of key 'foo' is set to 2. When multiply is invoked, the 'foo' key is -passed to the script along with the multiplier value of 5. LUA executes the -script and returns the result, 10. - -Script instances can be executed using a different client instance, even one -that points to a completely different Redis server. - -.. code-block:: pycon - - >>> r2 = redis.StrictRedis('redis2.example.com') - >>> r2.set('foo', 3) - >>> multiply(keys=['foo'], args=[5], client=r2) - 15 - -The Script object ensures that the LUA script is loaded into Redis's script -cache. In the event of a NOSCRIPT error, it will load the script and retry -executing it. - -Script objects can also be used in pipelines. The pipeline instance should be -passed as the client argument when calling the script. Care is taken to ensure -that the script is registered in Redis's script cache just prior to pipeline -execution. - -.. code-block:: pycon - - >>> pipe = r.pipeline() - >>> pipe.set('foo', 5) - >>> multiply(keys=['foo'], args=[5], client=pipe) - >>> pipe.execute() - [True, 25] - -Sentinel support -^^^^^^^^^^^^^^^^ - -redis-py can be used together with `Redis Sentinel `_ -to discover Redis nodes. You need to have at least one Sentinel daemon running -in order to use redis-py's Sentinel support. - -Connecting redis-py to the Sentinel instance(s) is easy. You can use a -Sentinel connection to discover the master and slaves network addresses: - -.. code-block:: pycon - - >>> from redis.sentinel import Sentinel - >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) - >>> sentinel.discover_master('mymaster') - ('127.0.0.1', 6379) - >>> sentinel.discover_slaves('mymaster') - [('127.0.0.1', 6380)] - -You can also create Redis client connections from a Sentinel instnace. You can -connect to either the master (for write operations) or a slave (for read-only -operations). - -.. code-block:: pycon - - >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) - >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) - >>> master.set('foo', 'bar') - >>> slave.get('foo') - 'bar' - -The master and slave objects are normal StrictRedis instances with their -connection pool bound to the Sentinel instance. When a Sentinel backed client -attempts to establish a connection, it first queries the Sentinel servers to -determine an appropriate host to connect to. If no server is found, -a MasterNotFoundError or SlaveNotFoundError is raised. Both exceptions are -subclasses of ConnectionError. - -When trying to connect to a slave client, the Sentinel connection pool will -iterate over the list of slaves until it finds one that can be connected to. -If no slaves can be connected to, a connection will be established with the -master. - -See `Guidelines for Redis clients with support for Redis Sentinel -`_ to learn more about Redis Sentinel. - -Scan Iterators -^^^^^^^^^^^^^^ - -The *SCAN commands introduced in Redis 2.8 can be cumbersome to use. While -these commands are fully supported, redis-py also exposes the following methods -that return Python iterators for convenience: `scan_iter`, `hscan_iter`, -`sscan_iter` and `zscan_iter`. - -.. code-block:: pycon - - >>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')): - ... r.set(key, value) - >>> for key in r.scan_iter(): - ... print key, r.get(key) - A 1 - B 2 - C 3 - -Author -^^^^^^ - -redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com). -It can be found here: http://github.com/andymccurdy/redis-py - -Special thanks to: - -* Ludovico Magnocavallo, author of the original Python Redis client, from - which some of the socket code is still used. -* Alexander Solovyov for ideas on the generic response callback system. -* Paul Hubbard for initial packaging support. - diff --git a/awx/lib/site-packages/redis/redis/__init__.py b/awx/lib/site-packages/redis/__init__.py similarity index 100% rename from awx/lib/site-packages/redis/redis/__init__.py rename to awx/lib/site-packages/redis/__init__.py diff --git a/awx/lib/site-packages/redis/redis/_compat.py b/awx/lib/site-packages/redis/_compat.py similarity index 100% rename from awx/lib/site-packages/redis/redis/_compat.py rename to awx/lib/site-packages/redis/_compat.py diff --git a/awx/lib/site-packages/redis/redis/client.py b/awx/lib/site-packages/redis/client.py similarity index 100% rename from awx/lib/site-packages/redis/redis/client.py rename to awx/lib/site-packages/redis/client.py diff --git a/awx/lib/site-packages/redis/redis/connection.py b/awx/lib/site-packages/redis/connection.py similarity index 100% rename from awx/lib/site-packages/redis/redis/connection.py rename to awx/lib/site-packages/redis/connection.py diff --git a/awx/lib/site-packages/redis/redis/exceptions.py b/awx/lib/site-packages/redis/exceptions.py similarity index 100% rename from awx/lib/site-packages/redis/redis/exceptions.py rename to awx/lib/site-packages/redis/exceptions.py diff --git a/awx/lib/site-packages/redis/redis/lock.py b/awx/lib/site-packages/redis/lock.py similarity index 100% rename from awx/lib/site-packages/redis/redis/lock.py rename to awx/lib/site-packages/redis/lock.py diff --git a/awx/lib/site-packages/redis/redis/sentinel.py b/awx/lib/site-packages/redis/sentinel.py similarity index 100% rename from awx/lib/site-packages/redis/redis/sentinel.py rename to awx/lib/site-packages/redis/sentinel.py diff --git a/awx/lib/site-packages/redis/setup.cfg b/awx/lib/site-packages/redis/setup.cfg deleted file mode 100644 index 861a9f5542..0000000000 --- a/awx/lib/site-packages/redis/setup.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[egg_info] -tag_build = -tag_date = 0 -tag_svn_revision = 0 - diff --git a/awx/lib/site-packages/redis/setup.py b/awx/lib/site-packages/redis/setup.py deleted file mode 100644 index 67706f15ab..0000000000 --- a/awx/lib/site-packages/redis/setup.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python -import os -import sys - -from redis import __version__ - -try: - from setuptools import setup - from setuptools.command.test import test as TestCommand - - class PyTest(TestCommand): - def finalize_options(self): - TestCommand.finalize_options(self) - self.test_args = [] - self.test_suite = True - - def run_tests(self): - # import here, because outside the eggs aren't loaded - import pytest - errno = pytest.main(self.test_args) - sys.exit(errno) - -except ImportError: - - from distutils.core import setup - PyTest = lambda x: x - -f = open(os.path.join(os.path.dirname(__file__), 'README.rst')) -long_description = f.read() -f.close() - -setup( - name='redis', - version=__version__, - description='Python client for Redis key-value store', - long_description=long_description, - url='http://github.com/andymccurdy/redis-py', - author='Andy McCurdy', - author_email='sedrik@gmail.com', - maintainer='Andy McCurdy', - maintainer_email='sedrik@gmail.com', - keywords=['Redis', 'key-value store'], - license='MIT', - packages=['redis'], - tests_require=['pytest>=2.5.0'], - cmdclass={'test': PyTest}, - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: OS Independent', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2.6', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.2', - 'Programming Language :: Python :: 3.3', - 'Programming Language :: Python :: 3.4', - ] -) diff --git a/awx/lib/site-packages/redis/tests/__init__.py b/awx/lib/site-packages/redis/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/awx/lib/site-packages/redis/tests/conftest.py b/awx/lib/site-packages/redis/tests/conftest.py deleted file mode 100644 index bd0116bc5e..0000000000 --- a/awx/lib/site-packages/redis/tests/conftest.py +++ /dev/null @@ -1,46 +0,0 @@ -import pytest -import redis - -from distutils.version import StrictVersion - - -_REDIS_VERSIONS = {} - - -def get_version(**kwargs): - params = {'host': 'localhost', 'port': 6379, 'db': 9} - params.update(kwargs) - key = '%s:%s' % (params['host'], params['port']) - if key not in _REDIS_VERSIONS: - client = redis.Redis(**params) - _REDIS_VERSIONS[key] = client.info()['redis_version'] - client.connection_pool.disconnect() - return _REDIS_VERSIONS[key] - - -def _get_client(cls, request=None, **kwargs): - params = {'host': 'localhost', 'port': 6379, 'db': 9} - params.update(kwargs) - client = cls(**params) - client.flushdb() - if request: - def teardown(): - client.flushdb() - client.connection_pool.disconnect() - request.addfinalizer(teardown) - return client - - -def skip_if_server_version_lt(min_version): - check = StrictVersion(get_version()) < StrictVersion(min_version) - return pytest.mark.skipif(check, reason="") - - -@pytest.fixture() -def r(request, **kwargs): - return _get_client(redis.Redis, request, **kwargs) - - -@pytest.fixture() -def sr(request, **kwargs): - return _get_client(redis.StrictRedis, request, **kwargs) diff --git a/awx/lib/site-packages/redis/tests/test_commands.py b/awx/lib/site-packages/redis/tests/test_commands.py deleted file mode 100644 index 286ea04b25..0000000000 --- a/awx/lib/site-packages/redis/tests/test_commands.py +++ /dev/null @@ -1,1441 +0,0 @@ -from __future__ import with_statement -import binascii -import datetime -import pytest -import redis -import time - -from redis._compat import (unichr, u, b, ascii_letters, iteritems, iterkeys, - itervalues) -from redis.client import parse_info -from redis import exceptions - -from .conftest import skip_if_server_version_lt - - -@pytest.fixture() -def slowlog(request, r): - current_config = r.config_get() - old_slower_than_value = current_config['slowlog-log-slower-than'] - old_max_legnth_value = current_config['slowlog-max-len'] - - def cleanup(): - r.config_set('slowlog-log-slower-than', old_slower_than_value) - r.config_set('slowlog-max-len', old_max_legnth_value) - request.addfinalizer(cleanup) - - r.config_set('slowlog-log-slower-than', 0) - r.config_set('slowlog-max-len', 128) - - -def redis_server_time(client): - seconds, milliseconds = client.time() - timestamp = float('%s.%s' % (seconds, milliseconds)) - return datetime.datetime.fromtimestamp(timestamp) - - -# RESPONSE CALLBACKS -class TestResponseCallbacks(object): - "Tests for the response callback system" - - def test_response_callbacks(self, r): - assert r.response_callbacks == redis.Redis.RESPONSE_CALLBACKS - assert id(r.response_callbacks) != id(redis.Redis.RESPONSE_CALLBACKS) - r.set_response_callback('GET', lambda x: 'static') - r['a'] = 'foo' - assert r['a'] == 'static' - - -class TestRedisCommands(object): - - def test_command_on_invalid_key_type(self, r): - r.lpush('a', '1') - with pytest.raises(redis.ResponseError): - r['a'] - - # SERVER INFORMATION - def test_client_list(self, r): - clients = r.client_list() - assert isinstance(clients[0], dict) - assert 'addr' in clients[0] - - @skip_if_server_version_lt('2.6.9') - def test_client_getname(self, r): - assert r.client_getname() is None - - @skip_if_server_version_lt('2.6.9') - def test_client_setname(self, r): - assert r.client_setname('redis_py_test') - assert r.client_getname() == 'redis_py_test' - - def test_config_get(self, r): - data = r.config_get() - assert 'maxmemory' in data - assert data['maxmemory'].isdigit() - - def test_config_resetstat(self, r): - r.ping() - prior_commands_processed = int(r.info()['total_commands_processed']) - assert prior_commands_processed >= 1 - r.config_resetstat() - reset_commands_processed = int(r.info()['total_commands_processed']) - assert reset_commands_processed < prior_commands_processed - - def test_config_set(self, r): - data = r.config_get() - rdbname = data['dbfilename'] - try: - assert r.config_set('dbfilename', 'redis_py_test.rdb') - assert r.config_get()['dbfilename'] == 'redis_py_test.rdb' - finally: - assert r.config_set('dbfilename', rdbname) - - def test_dbsize(self, r): - r['a'] = 'foo' - r['b'] = 'bar' - assert r.dbsize() == 2 - - def test_echo(self, r): - assert r.echo('foo bar') == b('foo bar') - - def test_info(self, r): - r['a'] = 'foo' - r['b'] = 'bar' - info = r.info() - assert isinstance(info, dict) - assert info['db9']['keys'] == 2 - - def test_lastsave(self, r): - assert isinstance(r.lastsave(), datetime.datetime) - - def test_object(self, r): - r['a'] = 'foo' - assert isinstance(r.object('refcount', 'a'), int) - assert isinstance(r.object('idletime', 'a'), int) - assert r.object('encoding', 'a') == b('raw') - assert r.object('idletime', 'invalid-key') is None - - def test_ping(self, r): - assert r.ping() - - def test_slowlog_get(self, r, slowlog): - assert r.slowlog_reset() - unicode_string = unichr(3456) + u('abcd') + unichr(3421) - r.get(unicode_string) - slowlog = r.slowlog_get() - assert isinstance(slowlog, list) - commands = [log['command'] for log in slowlog] - - get_command = b(' ').join((b('GET'), unicode_string.encode('utf-8'))) - assert get_command in commands - assert b('SLOWLOG RESET') in commands - # the order should be ['GET ', 'SLOWLOG RESET'], - # but if other clients are executing commands at the same time, there - # could be commands, before, between, or after, so just check that - # the two we care about are in the appropriate order. - assert commands.index(get_command) < commands.index(b('SLOWLOG RESET')) - - # make sure other attributes are typed correctly - assert isinstance(slowlog[0]['start_time'], int) - assert isinstance(slowlog[0]['duration'], int) - - def test_slowlog_get_limit(self, r, slowlog): - assert r.slowlog_reset() - r.get('foo') - r.get('bar') - slowlog = r.slowlog_get(1) - assert isinstance(slowlog, list) - commands = [log['command'] for log in slowlog] - assert b('GET foo') not in commands - assert b('GET bar') in commands - - def test_slowlog_length(self, r, slowlog): - r.get('foo') - assert isinstance(r.slowlog_len(), int) - - @skip_if_server_version_lt('2.6.0') - def test_time(self, r): - t = r.time() - assert len(t) == 2 - assert isinstance(t[0], int) - assert isinstance(t[1], int) - - # BASIC KEY COMMANDS - def test_append(self, r): - assert r.append('a', 'a1') == 2 - assert r['a'] == b('a1') - assert r.append('a', 'a2') == 4 - assert r['a'] == b('a1a2') - - @skip_if_server_version_lt('2.6.0') - def test_bitcount(self, r): - r.setbit('a', 5, True) - assert r.bitcount('a') == 1 - r.setbit('a', 6, True) - assert r.bitcount('a') == 2 - r.setbit('a', 5, False) - assert r.bitcount('a') == 1 - r.setbit('a', 9, True) - r.setbit('a', 17, True) - r.setbit('a', 25, True) - r.setbit('a', 33, True) - assert r.bitcount('a') == 5 - assert r.bitcount('a', 0, -1) == 5 - assert r.bitcount('a', 2, 3) == 2 - assert r.bitcount('a', 2, -1) == 3 - assert r.bitcount('a', -2, -1) == 2 - assert r.bitcount('a', 1, 1) == 1 - - @skip_if_server_version_lt('2.6.0') - def test_bitop_not_empty_string(self, r): - r['a'] = '' - r.bitop('not', 'r', 'a') - assert r.get('r') is None - - @skip_if_server_version_lt('2.6.0') - def test_bitop_not(self, r): - test_str = b('\xAA\x00\xFF\x55') - correct = ~0xAA00FF55 & 0xFFFFFFFF - r['a'] = test_str - r.bitop('not', 'r', 'a') - assert int(binascii.hexlify(r['r']), 16) == correct - - @skip_if_server_version_lt('2.6.0') - def test_bitop_not_in_place(self, r): - test_str = b('\xAA\x00\xFF\x55') - correct = ~0xAA00FF55 & 0xFFFFFFFF - r['a'] = test_str - r.bitop('not', 'a', 'a') - assert int(binascii.hexlify(r['a']), 16) == correct - - @skip_if_server_version_lt('2.6.0') - def test_bitop_single_string(self, r): - test_str = b('\x01\x02\xFF') - r['a'] = test_str - r.bitop('and', 'res1', 'a') - r.bitop('or', 'res2', 'a') - r.bitop('xor', 'res3', 'a') - assert r['res1'] == test_str - assert r['res2'] == test_str - assert r['res3'] == test_str - - @skip_if_server_version_lt('2.6.0') - def test_bitop_string_operands(self, r): - r['a'] = b('\x01\x02\xFF\xFF') - r['b'] = b('\x01\x02\xFF') - r.bitop('and', 'res1', 'a', 'b') - r.bitop('or', 'res2', 'a', 'b') - r.bitop('xor', 'res3', 'a', 'b') - assert int(binascii.hexlify(r['res1']), 16) == 0x0102FF00 - assert int(binascii.hexlify(r['res2']), 16) == 0x0102FFFF - assert int(binascii.hexlify(r['res3']), 16) == 0x000000FF - - @skip_if_server_version_lt('2.8.7') - def test_bitpos(self, r): - key = 'key:bitpos' - r.set(key, b('\xff\xf0\x00')) - assert r.bitpos(key, 0) == 12 - assert r.bitpos(key, 0, 2, -1) == 16 - assert r.bitpos(key, 0, -2, -1) == 12 - r.set(key, b('\x00\xff\xf0')) - assert r.bitpos(key, 1, 0) == 8 - assert r.bitpos(key, 1, 1) == 8 - r.set(key, b('\x00\x00\x00')) - assert r.bitpos(key, 1) == -1 - - @skip_if_server_version_lt('2.8.7') - def test_bitpos_wrong_arguments(self, r): - key = 'key:bitpos:wrong:args' - r.set(key, b('\xff\xf0\x00')) - with pytest.raises(exceptions.RedisError): - r.bitpos(key, 0, end=1) == 12 - with pytest.raises(exceptions.RedisError): - r.bitpos(key, 7) == 12 - - def test_decr(self, r): - assert r.decr('a') == -1 - assert r['a'] == b('-1') - assert r.decr('a') == -2 - assert r['a'] == b('-2') - assert r.decr('a', amount=5) == -7 - assert r['a'] == b('-7') - - def test_delete(self, r): - assert r.delete('a') == 0 - r['a'] = 'foo' - assert r.delete('a') == 1 - - def test_delete_with_multiple_keys(self, r): - r['a'] = 'foo' - r['b'] = 'bar' - assert r.delete('a', 'b') == 2 - assert r.get('a') is None - assert r.get('b') is None - - def test_delitem(self, r): - r['a'] = 'foo' - del r['a'] - assert r.get('a') is None - - @skip_if_server_version_lt('2.6.0') - def test_dump_and_restore(self, r): - r['a'] = 'foo' - dumped = r.dump('a') - del r['a'] - r.restore('a', 0, dumped) - assert r['a'] == b('foo') - - def test_exists(self, r): - assert not r.exists('a') - r['a'] = 'foo' - assert r.exists('a') - - def test_exists_contains(self, r): - assert 'a' not in r - r['a'] = 'foo' - assert 'a' in r - - def test_expire(self, r): - assert not r.expire('a', 10) - r['a'] = 'foo' - assert r.expire('a', 10) - assert 0 < r.ttl('a') <= 10 - assert r.persist('a') - assert not r.ttl('a') - - def test_expireat_datetime(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) - r['a'] = 'foo' - assert r.expireat('a', expire_at) - assert 0 < r.ttl('a') <= 61 - - def test_expireat_no_key(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) - assert not r.expireat('a', expire_at) - - def test_expireat_unixtime(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) - r['a'] = 'foo' - expire_at_seconds = int(time.mktime(expire_at.timetuple())) - assert r.expireat('a', expire_at_seconds) - assert 0 < r.ttl('a') <= 61 - - def test_get_and_set(self, r): - # get and set can't be tested independently of each other - assert r.get('a') is None - byte_string = b('value') - integer = 5 - unicode_string = unichr(3456) + u('abcd') + unichr(3421) - assert r.set('byte_string', byte_string) - assert r.set('integer', 5) - assert r.set('unicode_string', unicode_string) - assert r.get('byte_string') == byte_string - assert r.get('integer') == b(str(integer)) - assert r.get('unicode_string').decode('utf-8') == unicode_string - - def test_getitem_and_setitem(self, r): - r['a'] = 'bar' - assert r['a'] == b('bar') - - def test_getitem_raises_keyerror_for_missing_key(self, r): - with pytest.raises(KeyError): - r['a'] - - def test_get_set_bit(self, r): - # no value - assert not r.getbit('a', 5) - # set bit 5 - assert not r.setbit('a', 5, True) - assert r.getbit('a', 5) - # unset bit 4 - assert not r.setbit('a', 4, False) - assert not r.getbit('a', 4) - # set bit 4 - assert not r.setbit('a', 4, True) - assert r.getbit('a', 4) - # set bit 5 again - assert r.setbit('a', 5, True) - assert r.getbit('a', 5) - - def test_getrange(self, r): - r['a'] = 'foo' - assert r.getrange('a', 0, 0) == b('f') - assert r.getrange('a', 0, 2) == b('foo') - assert r.getrange('a', 3, 4) == b('') - - def test_getset(self, r): - assert r.getset('a', 'foo') is None - assert r.getset('a', 'bar') == b('foo') - assert r.get('a') == b('bar') - - def test_incr(self, r): - assert r.incr('a') == 1 - assert r['a'] == b('1') - assert r.incr('a') == 2 - assert r['a'] == b('2') - assert r.incr('a', amount=5) == 7 - assert r['a'] == b('7') - - def test_incrby(self, r): - assert r.incrby('a') == 1 - assert r.incrby('a', 4) == 5 - assert r['a'] == b('5') - - @skip_if_server_version_lt('2.6.0') - def test_incrbyfloat(self, r): - assert r.incrbyfloat('a') == 1.0 - assert r['a'] == b('1') - assert r.incrbyfloat('a', 1.1) == 2.1 - assert float(r['a']) == float(2.1) - - def test_keys(self, r): - assert r.keys() == [] - keys_with_underscores = set([b('test_a'), b('test_b')]) - keys = keys_with_underscores.union(set([b('testc')])) - for key in keys: - r[key] = 1 - assert set(r.keys(pattern='test_*')) == keys_with_underscores - assert set(r.keys(pattern='test*')) == keys - - def test_mget(self, r): - assert r.mget(['a', 'b']) == [None, None] - r['a'] = '1' - r['b'] = '2' - r['c'] = '3' - assert r.mget('a', 'other', 'b', 'c') == [b('1'), None, b('2'), b('3')] - - def test_mset(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} - assert r.mset(d) - for k, v in iteritems(d): - assert r[k] == v - - def test_mset_kwargs(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} - assert r.mset(**d) - for k, v in iteritems(d): - assert r[k] == v - - def test_msetnx(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} - assert r.msetnx(d) - d2 = {'a': b('x'), 'd': b('4')} - assert not r.msetnx(d2) - for k, v in iteritems(d): - assert r[k] == v - assert r.get('d') is None - - def test_msetnx_kwargs(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} - assert r.msetnx(**d) - d2 = {'a': b('x'), 'd': b('4')} - assert not r.msetnx(**d2) - for k, v in iteritems(d): - assert r[k] == v - assert r.get('d') is None - - @skip_if_server_version_lt('2.6.0') - def test_pexpire(self, r): - assert not r.pexpire('a', 60000) - r['a'] = 'foo' - assert r.pexpire('a', 60000) - assert 0 < r.pttl('a') <= 60000 - assert r.persist('a') - assert r.pttl('a') is None - - @skip_if_server_version_lt('2.6.0') - def test_pexpireat_datetime(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) - r['a'] = 'foo' - assert r.pexpireat('a', expire_at) - assert 0 < r.pttl('a') <= 61000 - - @skip_if_server_version_lt('2.6.0') - def test_pexpireat_no_key(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) - assert not r.pexpireat('a', expire_at) - - @skip_if_server_version_lt('2.6.0') - def test_pexpireat_unixtime(self, r): - expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) - r['a'] = 'foo' - expire_at_seconds = int(time.mktime(expire_at.timetuple())) * 1000 - assert r.pexpireat('a', expire_at_seconds) - assert 0 < r.pttl('a') <= 61000 - - @skip_if_server_version_lt('2.6.0') - def test_psetex(self, r): - assert r.psetex('a', 1000, 'value') - assert r['a'] == b('value') - assert 0 < r.pttl('a') <= 1000 - - @skip_if_server_version_lt('2.6.0') - def test_psetex_timedelta(self, r): - expire_at = datetime.timedelta(milliseconds=1000) - assert r.psetex('a', expire_at, 'value') - assert r['a'] == b('value') - assert 0 < r.pttl('a') <= 1000 - - def test_randomkey(self, r): - assert r.randomkey() is None - for key in ('a', 'b', 'c'): - r[key] = 1 - assert r.randomkey() in (b('a'), b('b'), b('c')) - - def test_rename(self, r): - r['a'] = '1' - assert r.rename('a', 'b') - assert r.get('a') is None - assert r['b'] == b('1') - - def test_renamenx(self, r): - r['a'] = '1' - r['b'] = '2' - assert not r.renamenx('a', 'b') - assert r['a'] == b('1') - assert r['b'] == b('2') - - @skip_if_server_version_lt('2.6.0') - def test_set_nx(self, r): - assert r.set('a', '1', nx=True) - assert not r.set('a', '2', nx=True) - assert r['a'] == b('1') - - @skip_if_server_version_lt('2.6.0') - def test_set_xx(self, r): - assert not r.set('a', '1', xx=True) - assert r.get('a') is None - r['a'] = 'bar' - assert r.set('a', '2', xx=True) - assert r.get('a') == b('2') - - @skip_if_server_version_lt('2.6.0') - def test_set_px(self, r): - assert r.set('a', '1', px=10000) - assert r['a'] == b('1') - assert 0 < r.pttl('a') <= 10000 - assert 0 < r.ttl('a') <= 10 - - @skip_if_server_version_lt('2.6.0') - def test_set_px_timedelta(self, r): - expire_at = datetime.timedelta(milliseconds=1000) - assert r.set('a', '1', px=expire_at) - assert 0 < r.pttl('a') <= 1000 - assert 0 < r.ttl('a') <= 1 - - @skip_if_server_version_lt('2.6.0') - def test_set_ex(self, r): - assert r.set('a', '1', ex=10) - assert 0 < r.ttl('a') <= 10 - - @skip_if_server_version_lt('2.6.0') - def test_set_ex_timedelta(self, r): - expire_at = datetime.timedelta(seconds=60) - assert r.set('a', '1', ex=expire_at) - assert 0 < r.ttl('a') <= 60 - - @skip_if_server_version_lt('2.6.0') - def test_set_multipleoptions(self, r): - r['a'] = 'val' - assert r.set('a', '1', xx=True, px=10000) - assert 0 < r.ttl('a') <= 10 - - def test_setex(self, r): - assert r.setex('a', '1', 60) - assert r['a'] == b('1') - assert 0 < r.ttl('a') <= 60 - - def test_setnx(self, r): - assert r.setnx('a', '1') - assert r['a'] == b('1') - assert not r.setnx('a', '2') - assert r['a'] == b('1') - - def test_setrange(self, r): - assert r.setrange('a', 5, 'foo') == 8 - assert r['a'] == b('\0\0\0\0\0foo') - r['a'] = 'abcdefghijh' - assert r.setrange('a', 6, '12345') == 11 - assert r['a'] == b('abcdef12345') - - def test_strlen(self, r): - r['a'] = 'foo' - assert r.strlen('a') == 3 - - def test_substr(self, r): - r['a'] = '0123456789' - assert r.substr('a', 0) == b('0123456789') - assert r.substr('a', 2) == b('23456789') - assert r.substr('a', 3, 5) == b('345') - assert r.substr('a', 3, -2) == b('345678') - - def test_type(self, r): - assert r.type('a') == b('none') - r['a'] = '1' - assert r.type('a') == b('string') - del r['a'] - r.lpush('a', '1') - assert r.type('a') == b('list') - del r['a'] - r.sadd('a', '1') - assert r.type('a') == b('set') - del r['a'] - r.zadd('a', **{'1': 1}) - assert r.type('a') == b('zset') - - # LIST COMMANDS - def test_blpop(self, r): - r.rpush('a', '1', '2') - r.rpush('b', '3', '4') - assert r.blpop(['b', 'a'], timeout=1) == (b('b'), b('3')) - assert r.blpop(['b', 'a'], timeout=1) == (b('b'), b('4')) - assert r.blpop(['b', 'a'], timeout=1) == (b('a'), b('1')) - assert r.blpop(['b', 'a'], timeout=1) == (b('a'), b('2')) - assert r.blpop(['b', 'a'], timeout=1) is None - r.rpush('c', '1') - assert r.blpop('c', timeout=1) == (b('c'), b('1')) - - def test_brpop(self, r): - r.rpush('a', '1', '2') - r.rpush('b', '3', '4') - assert r.brpop(['b', 'a'], timeout=1) == (b('b'), b('4')) - assert r.brpop(['b', 'a'], timeout=1) == (b('b'), b('3')) - assert r.brpop(['b', 'a'], timeout=1) == (b('a'), b('2')) - assert r.brpop(['b', 'a'], timeout=1) == (b('a'), b('1')) - assert r.brpop(['b', 'a'], timeout=1) is None - r.rpush('c', '1') - assert r.brpop('c', timeout=1) == (b('c'), b('1')) - - def test_brpoplpush(self, r): - r.rpush('a', '1', '2') - r.rpush('b', '3', '4') - assert r.brpoplpush('a', 'b') == b('2') - assert r.brpoplpush('a', 'b') == b('1') - assert r.brpoplpush('a', 'b', timeout=1) is None - assert r.lrange('a', 0, -1) == [] - assert r.lrange('b', 0, -1) == [b('1'), b('2'), b('3'), b('4')] - - def test_brpoplpush_empty_string(self, r): - r.rpush('a', '') - assert r.brpoplpush('a', 'b') == b('') - - def test_lindex(self, r): - r.rpush('a', '1', '2', '3') - assert r.lindex('a', '0') == b('1') - assert r.lindex('a', '1') == b('2') - assert r.lindex('a', '2') == b('3') - - def test_linsert(self, r): - r.rpush('a', '1', '2', '3') - assert r.linsert('a', 'after', '2', '2.5') == 4 - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('2.5'), b('3')] - assert r.linsert('a', 'before', '2', '1.5') == 5 - assert r.lrange('a', 0, -1) == \ - [b('1'), b('1.5'), b('2'), b('2.5'), b('3')] - - def test_llen(self, r): - r.rpush('a', '1', '2', '3') - assert r.llen('a') == 3 - - def test_lpop(self, r): - r.rpush('a', '1', '2', '3') - assert r.lpop('a') == b('1') - assert r.lpop('a') == b('2') - assert r.lpop('a') == b('3') - assert r.lpop('a') is None - - def test_lpush(self, r): - assert r.lpush('a', '1') == 1 - assert r.lpush('a', '2') == 2 - assert r.lpush('a', '3', '4') == 4 - assert r.lrange('a', 0, -1) == [b('4'), b('3'), b('2'), b('1')] - - def test_lpushx(self, r): - assert r.lpushx('a', '1') == 0 - assert r.lrange('a', 0, -1) == [] - r.rpush('a', '1', '2', '3') - assert r.lpushx('a', '4') == 4 - assert r.lrange('a', 0, -1) == [b('4'), b('1'), b('2'), b('3')] - - def test_lrange(self, r): - r.rpush('a', '1', '2', '3', '4', '5') - assert r.lrange('a', 0, 2) == [b('1'), b('2'), b('3')] - assert r.lrange('a', 2, 10) == [b('3'), b('4'), b('5')] - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4'), b('5')] - - def test_lrem(self, r): - r.rpush('a', '1', '1', '1', '1') - assert r.lrem('a', '1', 1) == 1 - assert r.lrange('a', 0, -1) == [b('1'), b('1'), b('1')] - assert r.lrem('a', '1') == 3 - assert r.lrange('a', 0, -1) == [] - - def test_lset(self, r): - r.rpush('a', '1', '2', '3') - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3')] - assert r.lset('a', 1, '4') - assert r.lrange('a', 0, 2) == [b('1'), b('4'), b('3')] - - def test_ltrim(self, r): - r.rpush('a', '1', '2', '3') - assert r.ltrim('a', 0, 1) - assert r.lrange('a', 0, -1) == [b('1'), b('2')] - - def test_rpop(self, r): - r.rpush('a', '1', '2', '3') - assert r.rpop('a') == b('3') - assert r.rpop('a') == b('2') - assert r.rpop('a') == b('1') - assert r.rpop('a') is None - - def test_rpoplpush(self, r): - r.rpush('a', 'a1', 'a2', 'a3') - r.rpush('b', 'b1', 'b2', 'b3') - assert r.rpoplpush('a', 'b') == b('a3') - assert r.lrange('a', 0, -1) == [b('a1'), b('a2')] - assert r.lrange('b', 0, -1) == [b('a3'), b('b1'), b('b2'), b('b3')] - - def test_rpush(self, r): - assert r.rpush('a', '1') == 1 - assert r.rpush('a', '2') == 2 - assert r.rpush('a', '3', '4') == 4 - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')] - - def test_rpushx(self, r): - assert r.rpushx('a', 'b') == 0 - assert r.lrange('a', 0, -1) == [] - r.rpush('a', '1', '2', '3') - assert r.rpushx('a', '4') == 4 - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')] - - # SCAN COMMANDS - @skip_if_server_version_lt('2.8.0') - def test_scan(self, r): - r.set('a', 1) - r.set('b', 2) - r.set('c', 3) - cursor, keys = r.scan() - assert cursor == 0 - assert set(keys) == set([b('a'), b('b'), b('c')]) - _, keys = r.scan(match='a') - assert set(keys) == set([b('a')]) - - @skip_if_server_version_lt('2.8.0') - def test_scan_iter(self, r): - r.set('a', 1) - r.set('b', 2) - r.set('c', 3) - keys = list(r.scan_iter()) - assert set(keys) == set([b('a'), b('b'), b('c')]) - keys = list(r.scan_iter(match='a')) - assert set(keys) == set([b('a')]) - - @skip_if_server_version_lt('2.8.0') - def test_sscan(self, r): - r.sadd('a', 1, 2, 3) - cursor, members = r.sscan('a') - assert cursor == 0 - assert set(members) == set([b('1'), b('2'), b('3')]) - _, members = r.sscan('a', match=b('1')) - assert set(members) == set([b('1')]) - - @skip_if_server_version_lt('2.8.0') - def test_sscan_iter(self, r): - r.sadd('a', 1, 2, 3) - members = list(r.sscan_iter('a')) - assert set(members) == set([b('1'), b('2'), b('3')]) - members = list(r.sscan_iter('a', match=b('1'))) - assert set(members) == set([b('1')]) - - @skip_if_server_version_lt('2.8.0') - def test_hscan(self, r): - r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) - cursor, dic = r.hscan('a') - assert cursor == 0 - assert dic == {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} - _, dic = r.hscan('a', match='a') - assert dic == {b('a'): b('1')} - - @skip_if_server_version_lt('2.8.0') - def test_hscan_iter(self, r): - r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) - dic = dict(r.hscan_iter('a')) - assert dic == {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} - dic = dict(r.hscan_iter('a', match='a')) - assert dic == {b('a'): b('1')} - - @skip_if_server_version_lt('2.8.0') - def test_zscan(self, r): - r.zadd('a', 'a', 1, 'b', 2, 'c', 3) - cursor, pairs = r.zscan('a') - assert cursor == 0 - assert set(pairs) == set([(b('a'), 1), (b('b'), 2), (b('c'), 3)]) - _, pairs = r.zscan('a', match='a') - assert set(pairs) == set([(b('a'), 1)]) - - @skip_if_server_version_lt('2.8.0') - def test_zscan_iter(self, r): - r.zadd('a', 'a', 1, 'b', 2, 'c', 3) - pairs = list(r.zscan_iter('a')) - assert set(pairs) == set([(b('a'), 1), (b('b'), 2), (b('c'), 3)]) - pairs = list(r.zscan_iter('a', match='a')) - assert set(pairs) == set([(b('a'), 1)]) - - # SET COMMANDS - def test_sadd(self, r): - members = set([b('1'), b('2'), b('3')]) - r.sadd('a', *members) - assert r.smembers('a') == members - - def test_scard(self, r): - r.sadd('a', '1', '2', '3') - assert r.scard('a') == 3 - - def test_sdiff(self, r): - r.sadd('a', '1', '2', '3') - assert r.sdiff('a', 'b') == set([b('1'), b('2'), b('3')]) - r.sadd('b', '2', '3') - assert r.sdiff('a', 'b') == set([b('1')]) - - def test_sdiffstore(self, r): - r.sadd('a', '1', '2', '3') - assert r.sdiffstore('c', 'a', 'b') == 3 - assert r.smembers('c') == set([b('1'), b('2'), b('3')]) - r.sadd('b', '2', '3') - assert r.sdiffstore('c', 'a', 'b') == 1 - assert r.smembers('c') == set([b('1')]) - - def test_sinter(self, r): - r.sadd('a', '1', '2', '3') - assert r.sinter('a', 'b') == set() - r.sadd('b', '2', '3') - assert r.sinter('a', 'b') == set([b('2'), b('3')]) - - def test_sinterstore(self, r): - r.sadd('a', '1', '2', '3') - assert r.sinterstore('c', 'a', 'b') == 0 - assert r.smembers('c') == set() - r.sadd('b', '2', '3') - assert r.sinterstore('c', 'a', 'b') == 2 - assert r.smembers('c') == set([b('2'), b('3')]) - - def test_sismember(self, r): - r.sadd('a', '1', '2', '3') - assert r.sismember('a', '1') - assert r.sismember('a', '2') - assert r.sismember('a', '3') - assert not r.sismember('a', '4') - - def test_smembers(self, r): - r.sadd('a', '1', '2', '3') - assert r.smembers('a') == set([b('1'), b('2'), b('3')]) - - def test_smove(self, r): - r.sadd('a', 'a1', 'a2') - r.sadd('b', 'b1', 'b2') - assert r.smove('a', 'b', 'a1') - assert r.smembers('a') == set([b('a2')]) - assert r.smembers('b') == set([b('b1'), b('b2'), b('a1')]) - - def test_spop(self, r): - s = [b('1'), b('2'), b('3')] - r.sadd('a', *s) - value = r.spop('a') - assert value in s - assert r.smembers('a') == set(s) - set([value]) - - def test_srandmember(self, r): - s = [b('1'), b('2'), b('3')] - r.sadd('a', *s) - assert r.srandmember('a') in s - - @skip_if_server_version_lt('2.6.0') - def test_srandmember_multi_value(self, r): - s = [b('1'), b('2'), b('3')] - r.sadd('a', *s) - randoms = r.srandmember('a', number=2) - assert len(randoms) == 2 - assert set(randoms).intersection(s) == set(randoms) - - def test_srem(self, r): - r.sadd('a', '1', '2', '3', '4') - assert r.srem('a', '5') == 0 - assert r.srem('a', '2', '4') == 2 - assert r.smembers('a') == set([b('1'), b('3')]) - - def test_sunion(self, r): - r.sadd('a', '1', '2') - r.sadd('b', '2', '3') - assert r.sunion('a', 'b') == set([b('1'), b('2'), b('3')]) - - def test_sunionstore(self, r): - r.sadd('a', '1', '2') - r.sadd('b', '2', '3') - assert r.sunionstore('c', 'a', 'b') == 3 - assert r.smembers('c') == set([b('1'), b('2'), b('3')]) - - # SORTED SET COMMANDS - def test_zadd(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, -1) == [b('a1'), b('a2'), b('a3')] - - def test_zcard(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zcard('a') == 3 - - def test_zcount(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zcount('a', '-inf', '+inf') == 3 - assert r.zcount('a', 1, 2) == 2 - assert r.zcount('a', 10, 20) == 0 - - def test_zincrby(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zincrby('a', 'a2') == 3.0 - assert r.zincrby('a', 'a3', amount=5) == 8.0 - assert r.zscore('a', 'a2') == 3.0 - assert r.zscore('a', 'a3') == 8.0 - - @skip_if_server_version_lt('2.8.9') - def test_zlexcount(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) - assert r.zlexcount('a', '-', '+') == 7 - assert r.zlexcount('a', '[b', '[f') == 5 - - def test_zinterstore_sum(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) - assert r.zinterstore('d', ['a', 'b', 'c']) == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ - [(b('a3'), 8), (b('a1'), 9)] - - def test_zinterstore_max(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) - assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MAX') == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ - [(b('a3'), 5), (b('a1'), 6)] - - def test_zinterstore_min(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - r.zadd('b', a1=2, a2=3, a3=5) - r.zadd('c', a1=6, a3=5, a4=4) - assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MIN') == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ - [(b('a1'), 1), (b('a3'), 3)] - - def test_zinterstore_with_weight(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) - assert r.zinterstore('d', {'a': 1, 'b': 2, 'c': 3}) == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ - [(b('a3'), 20), (b('a1'), 23)] - - def test_zrange(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, 1) == [b('a1'), b('a2')] - assert r.zrange('a', 1, 2) == [b('a2'), b('a3')] - - # withscores - assert r.zrange('a', 0, 1, withscores=True) == \ - [(b('a1'), 1.0), (b('a2'), 2.0)] - assert r.zrange('a', 1, 2, withscores=True) == \ - [(b('a2'), 2.0), (b('a3'), 3.0)] - - # custom score function - assert r.zrange('a', 0, 1, withscores=True, score_cast_func=int) == \ - [(b('a1'), 1), (b('a2'), 2)] - - @skip_if_server_version_lt('2.8.9') - def test_zrangebylex(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) - assert r.zrangebylex('a', '-', '[c') == [b('a'), b('b'), b('c')] - assert r.zrangebylex('a', '-', '(c') == [b('a'), b('b')] - assert r.zrangebylex('a', '[aaa', '(g') == \ - [b('b'), b('c'), b('d'), b('e'), b('f')] - assert r.zrangebylex('a', '[f', '+') == [b('f'), b('g')] - assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b('d'), b('e')] - - def test_zrangebyscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zrangebyscore('a', 2, 4) == [b('a2'), b('a3'), b('a4')] - - # slicing with start/num - assert r.zrangebyscore('a', 2, 4, start=1, num=2) == \ - [b('a3'), b('a4')] - - # withscores - assert r.zrangebyscore('a', 2, 4, withscores=True) == \ - [(b('a2'), 2.0), (b('a3'), 3.0), (b('a4'), 4.0)] - - # custom score function - assert r.zrangebyscore('a', 2, 4, withscores=True, - score_cast_func=int) == \ - [(b('a2'), 2), (b('a3'), 3), (b('a4'), 4)] - - def test_zrank(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zrank('a', 'a1') == 0 - assert r.zrank('a', 'a2') == 1 - assert r.zrank('a', 'a6') is None - - def test_zrem(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrem('a', 'a2') == 1 - assert r.zrange('a', 0, -1) == [b('a1'), b('a3')] - assert r.zrem('a', 'b') == 0 - assert r.zrange('a', 0, -1) == [b('a1'), b('a3')] - - def test_zrem_multiple_keys(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrem('a', 'a1', 'a2') == 2 - assert r.zrange('a', 0, 5) == [b('a3')] - - @skip_if_server_version_lt('2.8.9') - def test_zremrangebylex(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) - assert r.zremrangebylex('a', '-', '[c') == 3 - assert r.zrange('a', 0, -1) == [b('d'), b('e'), b('f'), b('g')] - assert r.zremrangebylex('a', '[f', '+') == 2 - assert r.zrange('a', 0, -1) == [b('d'), b('e')] - assert r.zremrangebylex('a', '[h', '+') == 0 - assert r.zrange('a', 0, -1) == [b('d'), b('e')] - - def test_zremrangebyrank(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zremrangebyrank('a', 1, 3) == 3 - assert r.zrange('a', 0, 5) == [b('a1'), b('a5')] - - def test_zremrangebyscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zremrangebyscore('a', 2, 4) == 3 - assert r.zrange('a', 0, -1) == [b('a1'), b('a5')] - assert r.zremrangebyscore('a', 2, 4) == 0 - assert r.zrange('a', 0, -1) == [b('a1'), b('a5')] - - def test_zrevrange(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrevrange('a', 0, 1) == [b('a3'), b('a2')] - assert r.zrevrange('a', 1, 2) == [b('a2'), b('a1')] - - # withscores - assert r.zrevrange('a', 0, 1, withscores=True) == \ - [(b('a3'), 3.0), (b('a2'), 2.0)] - assert r.zrevrange('a', 1, 2, withscores=True) == \ - [(b('a2'), 2.0), (b('a1'), 1.0)] - - # custom score function - assert r.zrevrange('a', 0, 1, withscores=True, - score_cast_func=int) == \ - [(b('a3'), 3.0), (b('a2'), 2.0)] - - def test_zrevrangebyscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zrevrangebyscore('a', 4, 2) == [b('a4'), b('a3'), b('a2')] - - # slicing with start/num - assert r.zrevrangebyscore('a', 4, 2, start=1, num=2) == \ - [b('a3'), b('a2')] - - # withscores - assert r.zrevrangebyscore('a', 4, 2, withscores=True) == \ - [(b('a4'), 4.0), (b('a3'), 3.0), (b('a2'), 2.0)] - - # custom score function - assert r.zrevrangebyscore('a', 4, 2, withscores=True, - score_cast_func=int) == \ - [(b('a4'), 4), (b('a3'), 3), (b('a2'), 2)] - - def test_zrevrank(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zrevrank('a', 'a1') == 4 - assert r.zrevrank('a', 'a2') == 3 - assert r.zrevrank('a', 'a6') is None - - def test_zscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zscore('a', 'a1') == 1.0 - assert r.zscore('a', 'a2') == 2.0 - assert r.zscore('a', 'a4') is None - - def test_zunionstore_sum(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) - assert r.zunionstore('d', ['a', 'b', 'c']) == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ - [(b('a2'), 3), (b('a4'), 4), (b('a3'), 8), (b('a1'), 9)] - - def test_zunionstore_max(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) - assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ - [(b('a2'), 2), (b('a4'), 4), (b('a3'), 5), (b('a1'), 6)] - - def test_zunionstore_min(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - r.zadd('b', a1=2, a2=2, a3=4) - r.zadd('c', a1=6, a3=5, a4=4) - assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ - [(b('a1'), 1), (b('a2'), 2), (b('a3'), 3), (b('a4'), 4)] - - def test_zunionstore_with_weight(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) - assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ - [(b('a2'), 5), (b('a4'), 12), (b('a3'), 20), (b('a1'), 23)] - - # HYPERLOGLOG TESTS - @skip_if_server_version_lt('2.8.9') - def test_pfadd(self, r): - members = set([b('1'), b('2'), b('3')]) - assert r.pfadd('a', *members) == 1 - assert r.pfadd('a', *members) == 0 - assert r.pfcount('a') == len(members) - - @skip_if_server_version_lt('2.8.9') - def test_pfcount(self, r): - members = set([b('1'), b('2'), b('3')]) - r.pfadd('a', *members) - assert r.pfcount('a') == len(members) - - @skip_if_server_version_lt('2.8.9') - def test_pfmerge(self, r): - mema = set([b('1'), b('2'), b('3')]) - memb = set([b('2'), b('3'), b('4')]) - memc = set([b('5'), b('6'), b('7')]) - r.pfadd('a', *mema) - r.pfadd('b', *memb) - r.pfadd('c', *memc) - r.pfmerge('d', 'c', 'a') - assert r.pfcount('d') == 6 - r.pfmerge('d', 'b') - assert r.pfcount('d') == 7 - - # HASH COMMANDS - def test_hget_and_hset(self, r): - r.hmset('a', {'1': 1, '2': 2, '3': 3}) - assert r.hget('a', '1') == b('1') - assert r.hget('a', '2') == b('2') - assert r.hget('a', '3') == b('3') - - # field was updated, redis returns 0 - assert r.hset('a', '2', 5) == 0 - assert r.hget('a', '2') == b('5') - - # field is new, redis returns 1 - assert r.hset('a', '4', 4) == 1 - assert r.hget('a', '4') == b('4') - - # key inside of hash that doesn't exist returns null value - assert r.hget('a', 'b') is None - - def test_hdel(self, r): - r.hmset('a', {'1': 1, '2': 2, '3': 3}) - assert r.hdel('a', '2') == 1 - assert r.hget('a', '2') is None - assert r.hdel('a', '1', '3') == 2 - assert r.hlen('a') == 0 - - def test_hexists(self, r): - r.hmset('a', {'1': 1, '2': 2, '3': 3}) - assert r.hexists('a', '1') - assert not r.hexists('a', '4') - - def test_hgetall(self, r): - h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} - r.hmset('a', h) - assert r.hgetall('a') == h - - def test_hincrby(self, r): - assert r.hincrby('a', '1') == 1 - assert r.hincrby('a', '1', amount=2) == 3 - assert r.hincrby('a', '1', amount=-2) == 1 - - @skip_if_server_version_lt('2.6.0') - def test_hincrbyfloat(self, r): - assert r.hincrbyfloat('a', '1') == 1.0 - assert r.hincrbyfloat('a', '1') == 2.0 - assert r.hincrbyfloat('a', '1', 1.2) == 3.2 - - def test_hkeys(self, r): - h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} - r.hmset('a', h) - local_keys = list(iterkeys(h)) - remote_keys = r.hkeys('a') - assert (sorted(local_keys) == sorted(remote_keys)) - - def test_hlen(self, r): - r.hmset('a', {'1': 1, '2': 2, '3': 3}) - assert r.hlen('a') == 3 - - def test_hmget(self, r): - assert r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) - assert r.hmget('a', 'a', 'b', 'c') == [b('1'), b('2'), b('3')] - - def test_hmset(self, r): - h = {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} - assert r.hmset('a', h) - assert r.hgetall('a') == h - - def test_hsetnx(self, r): - # Initially set the hash field - assert r.hsetnx('a', '1', 1) - assert r.hget('a', '1') == b('1') - assert not r.hsetnx('a', '1', 2) - assert r.hget('a', '1') == b('1') - - def test_hvals(self, r): - h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} - r.hmset('a', h) - local_vals = list(itervalues(h)) - remote_vals = r.hvals('a') - assert sorted(local_vals) == sorted(remote_vals) - - # SORT - def test_sort_basic(self, r): - r.rpush('a', '3', '2', '1', '4') - assert r.sort('a') == [b('1'), b('2'), b('3'), b('4')] - - def test_sort_limited(self, r): - r.rpush('a', '3', '2', '1', '4') - assert r.sort('a', start=1, num=2) == [b('2'), b('3')] - - def test_sort_by(self, r): - r['score:1'] = 8 - r['score:2'] = 3 - r['score:3'] = 5 - r.rpush('a', '3', '2', '1') - assert r.sort('a', by='score:*') == [b('2'), b('3'), b('1')] - - def test_sort_get(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - assert r.sort('a', get='user:*') == [b('u1'), b('u2'), b('u3')] - - def test_sort_get_multi(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - assert r.sort('a', get=('user:*', '#')) == \ - [b('u1'), b('1'), b('u2'), b('2'), b('u3'), b('3')] - - def test_sort_get_groups_two(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - assert r.sort('a', get=('user:*', '#'), groups=True) == \ - [(b('u1'), b('1')), (b('u2'), b('2')), (b('u3'), b('3'))] - - def test_sort_groups_string_get(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - with pytest.raises(exceptions.DataError): - r.sort('a', get='user:*', groups=True) - - def test_sort_groups_just_one_get(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - with pytest.raises(exceptions.DataError): - r.sort('a', get=['user:*'], groups=True) - - def test_sort_groups_no_get(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - with pytest.raises(exceptions.DataError): - r.sort('a', groups=True) - - def test_sort_groups_three_gets(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r['door:1'] = 'd1' - r['door:2'] = 'd2' - r['door:3'] = 'd3' - r.rpush('a', '2', '3', '1') - assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == \ - [ - (b('u1'), b('d1'), b('1')), - (b('u2'), b('d2'), b('2')), - (b('u3'), b('d3'), b('3')) - ] - - def test_sort_desc(self, r): - r.rpush('a', '2', '3', '1') - assert r.sort('a', desc=True) == [b('3'), b('2'), b('1')] - - def test_sort_alpha(self, r): - r.rpush('a', 'e', 'c', 'b', 'd', 'a') - assert r.sort('a', alpha=True) == \ - [b('a'), b('b'), b('c'), b('d'), b('e')] - - def test_sort_store(self, r): - r.rpush('a', '2', '3', '1') - assert r.sort('a', store='sorted_values') == 3 - assert r.lrange('sorted_values', 0, -1) == [b('1'), b('2'), b('3')] - - def test_sort_all_options(self, r): - r['user:1:username'] = 'zeus' - r['user:2:username'] = 'titan' - r['user:3:username'] = 'hermes' - r['user:4:username'] = 'hercules' - r['user:5:username'] = 'apollo' - r['user:6:username'] = 'athena' - r['user:7:username'] = 'hades' - r['user:8:username'] = 'dionysus' - - r['user:1:favorite_drink'] = 'yuengling' - r['user:2:favorite_drink'] = 'rum' - r['user:3:favorite_drink'] = 'vodka' - r['user:4:favorite_drink'] = 'milk' - r['user:5:favorite_drink'] = 'pinot noir' - r['user:6:favorite_drink'] = 'water' - r['user:7:favorite_drink'] = 'gin' - r['user:8:favorite_drink'] = 'apple juice' - - r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4') - num = r.sort('gods', start=2, num=4, by='user:*:username', - get='user:*:favorite_drink', desc=True, alpha=True, - store='sorted') - assert num == 4 - assert r.lrange('sorted', 0, 10) == \ - [b('vodka'), b('milk'), b('gin'), b('apple juice')] - - -class TestStrictCommands(object): - - def test_strict_zadd(self, sr): - sr.zadd('a', 1.0, 'a1', 2.0, 'a2', a3=3.0) - assert sr.zrange('a', 0, -1, withscores=True) == \ - [(b('a1'), 1.0), (b('a2'), 2.0), (b('a3'), 3.0)] - - def test_strict_lrem(self, sr): - sr.rpush('a', 'a1', 'a2', 'a3', 'a1') - sr.lrem('a', 0, 'a1') - assert sr.lrange('a', 0, -1) == [b('a2'), b('a3')] - - def test_strict_setex(self, sr): - assert sr.setex('a', 60, '1') - assert sr['a'] == b('1') - assert 0 < sr.ttl('a') <= 60 - - def test_strict_ttl(self, sr): - assert not sr.expire('a', 10) - sr['a'] = '1' - assert sr.expire('a', 10) - assert 0 < sr.ttl('a') <= 10 - assert sr.persist('a') - assert sr.ttl('a') == -1 - - @skip_if_server_version_lt('2.6.0') - def test_strict_pttl(self, sr): - assert not sr.pexpire('a', 10000) - sr['a'] = '1' - assert sr.pexpire('a', 10000) - assert 0 < sr.pttl('a') <= 10000 - assert sr.persist('a') - assert sr.pttl('a') == -1 - - -class TestBinarySave(object): - def test_binary_get_set(self, r): - assert r.set(' foo bar ', '123') - assert r.get(' foo bar ') == b('123') - - assert r.set(' foo\r\nbar\r\n ', '456') - assert r.get(' foo\r\nbar\r\n ') == b('456') - - assert r.set(' \r\n\t\x07\x13 ', '789') - assert r.get(' \r\n\t\x07\x13 ') == b('789') - - assert sorted(r.keys('*')) == \ - [b(' \r\n\t\x07\x13 '), b(' foo\r\nbar\r\n '), b(' foo bar ')] - - assert r.delete(' foo bar ') - assert r.delete(' foo\r\nbar\r\n ') - assert r.delete(' \r\n\t\x07\x13 ') - - def test_binary_lists(self, r): - mapping = { - b('foo bar'): [b('1'), b('2'), b('3')], - b('foo\r\nbar\r\n'): [b('4'), b('5'), b('6')], - b('foo\tbar\x07'): [b('7'), b('8'), b('9')], - } - # fill in lists - for key, value in iteritems(mapping): - r.rpush(key, *value) - - # check that KEYS returns all the keys as they are - assert sorted(r.keys('*')) == sorted(list(iterkeys(mapping))) - - # check that it is possible to get list content by key name - for key, value in iteritems(mapping): - assert r.lrange(key, 0, -1) == value - - def test_22_info(self, r): - """ - Older Redis versions contained 'allocation_stats' in INFO that - was the cause of a number of bugs when parsing. - """ - info = "allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330," \ - "13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020," \ - "20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303," \ - "27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160," \ - "34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523," \ - "41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171," \ - "49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332," \ - "58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30," \ - "67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25," \ - "76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46," \ - "85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20," \ - "94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15," \ - "103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52," \ - "111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54," \ - "119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52," \ - "127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62," \ - "135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7," \ - "144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1," \ - "155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2," \ - "172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3," \ - "187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1," \ - "207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2," \ - "220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1," \ - ">=256=203" - parsed = parse_info(info) - assert 'allocation_stats' in parsed - assert '6' in parsed['allocation_stats'] - assert '>=256' in parsed['allocation_stats'] - - def test_large_responses(self, r): - "The PythonParser has some special cases for return values > 1MB" - # load up 5MB of data into a key - data = ''.join([ascii_letters] * (5000000 // len(ascii_letters))) - r['a'] = data - assert r['a'] == b(data) - - def test_floating_point_encoding(self, r): - """ - High precision floating point values sent to the server should keep - precision. - """ - timestamp = 1349673917.939762 - r.zadd('a', 'a1', timestamp) - assert r.zscore('a', 'a1') == timestamp diff --git a/awx/lib/site-packages/redis/tests/test_connection_pool.py b/awx/lib/site-packages/redis/tests/test_connection_pool.py deleted file mode 100644 index 55ccce19ff..0000000000 --- a/awx/lib/site-packages/redis/tests/test_connection_pool.py +++ /dev/null @@ -1,402 +0,0 @@ -from __future__ import with_statement -import os -import pytest -import redis -import time -import re - -from threading import Thread -from redis.connection import ssl_available -from .conftest import skip_if_server_version_lt - - -class DummyConnection(object): - description_format = "DummyConnection<>" - - def __init__(self, **kwargs): - self.kwargs = kwargs - self.pid = os.getpid() - - -class TestConnectionPool(object): - def get_pool(self, connection_kwargs=None, max_connections=None, - connection_class=DummyConnection): - connection_kwargs = connection_kwargs or {} - pool = redis.ConnectionPool( - connection_class=connection_class, - max_connections=max_connections, - **connection_kwargs) - return pool - - def test_connection_creation(self): - connection_kwargs = {'foo': 'bar', 'biz': 'baz'} - pool = self.get_pool(connection_kwargs=connection_kwargs) - connection = pool.get_connection('_') - assert isinstance(connection, DummyConnection) - assert connection.kwargs == connection_kwargs - - def test_multiple_connections(self): - pool = self.get_pool() - c1 = pool.get_connection('_') - c2 = pool.get_connection('_') - assert c1 != c2 - - def test_max_connections(self): - pool = self.get_pool(max_connections=2) - pool.get_connection('_') - pool.get_connection('_') - with pytest.raises(redis.ConnectionError): - pool.get_connection('_') - - def test_reuse_previously_released_connection(self): - pool = self.get_pool() - c1 = pool.get_connection('_') - pool.release(c1) - c2 = pool.get_connection('_') - assert c1 == c2 - - def test_repr_contains_db_info_tcp(self): - connection_kwargs = {'host': 'localhost', 'port': 6379, 'db': 1} - pool = self.get_pool(connection_kwargs=connection_kwargs, - connection_class=redis.Connection) - expected = 'ConnectionPool>' - assert repr(pool) == expected - - def test_repr_contains_db_info_unix(self): - connection_kwargs = {'path': '/abc', 'db': 1} - pool = self.get_pool(connection_kwargs=connection_kwargs, - connection_class=redis.UnixDomainSocketConnection) - expected = 'ConnectionPool>' - assert repr(pool) == expected - - -class TestBlockingConnectionPool(object): - def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20): - connection_kwargs = connection_kwargs or {} - pool = redis.BlockingConnectionPool(connection_class=DummyConnection, - max_connections=max_connections, - timeout=timeout, - **connection_kwargs) - return pool - - def test_connection_creation(self): - connection_kwargs = {'foo': 'bar', 'biz': 'baz'} - pool = self.get_pool(connection_kwargs=connection_kwargs) - connection = pool.get_connection('_') - assert isinstance(connection, DummyConnection) - assert connection.kwargs == connection_kwargs - - def test_multiple_connections(self): - pool = self.get_pool() - c1 = pool.get_connection('_') - c2 = pool.get_connection('_') - assert c1 != c2 - - def test_connection_pool_blocks_until_timeout(self): - "When out of connections, block for timeout seconds, then raise" - pool = self.get_pool(max_connections=1, timeout=0.1) - pool.get_connection('_') - - start = time.time() - with pytest.raises(redis.ConnectionError): - pool.get_connection('_') - # we should have waited at least 0.1 seconds - assert time.time() - start >= 0.1 - - def connection_pool_blocks_until_another_connection_released(self): - """ - When out of connections, block until another connection is released - to the pool - """ - pool = self.get_pool(max_connections=1, timeout=2) - c1 = pool.get_connection('_') - - def target(): - time.sleep(0.1) - pool.release(c1) - - Thread(target=target).start() - start = time.time() - pool.get_connection('_') - assert time.time() - start >= 0.1 - - def test_reuse_previously_released_connection(self): - pool = self.get_pool() - c1 = pool.get_connection('_') - pool.release(c1) - c2 = pool.get_connection('_') - assert c1 == c2 - - def test_repr_contains_db_info_tcp(self): - pool = redis.ConnectionPool(host='localhost', port=6379, db=0) - expected = 'ConnectionPool>' - assert repr(pool) == expected - - def test_repr_contains_db_info_unix(self): - pool = redis.ConnectionPool( - connection_class=redis.UnixDomainSocketConnection, - path='abc', - db=0, - ) - expected = 'ConnectionPool>' - assert repr(pool) == expected - - -class TestConnectionPoolURLParsing(object): - def test_defaults(self): - pool = redis.ConnectionPool.from_url('redis://localhost') - assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - 'host': 'localhost', - 'port': 6379, - 'db': 0, - 'password': None, - } - - def test_hostname(self): - pool = redis.ConnectionPool.from_url('redis://myhost') - assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - 'host': 'myhost', - 'port': 6379, - 'db': 0, - 'password': None, - } - - def test_port(self): - pool = redis.ConnectionPool.from_url('redis://localhost:6380') - assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - 'host': 'localhost', - 'port': 6380, - 'db': 0, - 'password': None, - } - - def test_password(self): - pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost') - assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - 'host': 'localhost', - 'port': 6379, - 'db': 0, - 'password': 'mypassword', - } - - def test_db_as_argument(self): - pool = redis.ConnectionPool.from_url('redis://localhost', db='1') - assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - 'host': 'localhost', - 'port': 6379, - 'db': 1, - 'password': None, - } - - def test_db_in_path(self): - pool = redis.ConnectionPool.from_url('redis://localhost/2', db='1') - assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - 'host': 'localhost', - 'port': 6379, - 'db': 2, - 'password': None, - } - - def test_db_in_querystring(self): - pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3', - db='1') - assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - 'host': 'localhost', - 'port': 6379, - 'db': 3, - 'password': None, - } - - def test_extra_querystring_options(self): - pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2') - assert pool.connection_class == redis.Connection - assert pool.connection_kwargs == { - 'host': 'localhost', - 'port': 6379, - 'db': 0, - 'password': None, - 'a': '1', - 'b': '2' - } - - def test_calling_from_subclass_returns_correct_instance(self): - pool = redis.BlockingConnectionPool.from_url('redis://localhost') - assert isinstance(pool, redis.BlockingConnectionPool) - - def test_client_creates_connection_pool(self): - r = redis.StrictRedis.from_url('redis://myhost') - assert r.connection_pool.connection_class == redis.Connection - assert r.connection_pool.connection_kwargs == { - 'host': 'myhost', - 'port': 6379, - 'db': 0, - 'password': None, - } - - -class TestConnectionPoolUnixSocketURLParsing(object): - def test_defaults(self): - pool = redis.ConnectionPool.from_url('unix:///socket') - assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - 'path': '/socket', - 'db': 0, - 'password': None, - } - - def test_password(self): - pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket') - assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - 'path': '/socket', - 'db': 0, - 'password': 'mypassword', - } - - def test_db_as_argument(self): - pool = redis.ConnectionPool.from_url('unix:///socket', db=1) - assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - 'path': '/socket', - 'db': 1, - 'password': None, - } - - def test_db_in_querystring(self): - pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1) - assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - 'path': '/socket', - 'db': 2, - 'password': None, - } - - def test_extra_querystring_options(self): - pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2') - assert pool.connection_class == redis.UnixDomainSocketConnection - assert pool.connection_kwargs == { - 'path': '/socket', - 'db': 0, - 'password': None, - 'a': '1', - 'b': '2' - } - - -class TestSSLConnectionURLParsing(object): - @pytest.mark.skipif(not ssl_available, reason="SSL not installed") - def test_defaults(self): - pool = redis.ConnectionPool.from_url('rediss://localhost') - assert pool.connection_class == redis.SSLConnection - assert pool.connection_kwargs == { - 'host': 'localhost', - 'port': 6379, - 'db': 0, - 'password': None, - } - - @pytest.mark.skipif(not ssl_available, reason="SSL not installed") - def test_cert_reqs_options(self): - import ssl - pool = redis.ConnectionPool.from_url('rediss://?ssl_cert_reqs=none') - assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE - - pool = redis.ConnectionPool.from_url( - 'rediss://?ssl_cert_reqs=optional') - assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL - - pool = redis.ConnectionPool.from_url( - 'rediss://?ssl_cert_reqs=required') - assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED - - -class TestConnection(object): - def test_on_connect_error(self): - """ - An error in Connection.on_connect should disconnect from the server - see for details: https://github.com/andymccurdy/redis-py/issues/368 - """ - # this assumes the Redis server being tested against doesn't have - # 9999 databases ;) - bad_connection = redis.Redis(db=9999) - # an error should be raised on connect - with pytest.raises(redis.RedisError): - bad_connection.info() - pool = bad_connection.connection_pool - assert len(pool._available_connections) == 1 - assert not pool._available_connections[0]._sock - - @skip_if_server_version_lt('2.8.8') - def test_busy_loading_disconnects_socket(self, r): - """ - If Redis raises a LOADING error, the connection should be - disconnected and a BusyLoadingError raised - """ - with pytest.raises(redis.BusyLoadingError): - r.execute_command('DEBUG', 'ERROR', 'LOADING fake message') - pool = r.connection_pool - assert len(pool._available_connections) == 1 - assert not pool._available_connections[0]._sock - - @skip_if_server_version_lt('2.8.8') - def test_busy_loading_from_pipeline_immediate_command(self, r): - """ - BusyLoadingErrors should raise from Pipelines that execute a - command immediately, like WATCH does. - """ - pipe = r.pipeline() - with pytest.raises(redis.BusyLoadingError): - pipe.immediate_execute_command('DEBUG', 'ERROR', - 'LOADING fake message') - pool = r.connection_pool - assert not pipe.connection - assert len(pool._available_connections) == 1 - assert not pool._available_connections[0]._sock - - @skip_if_server_version_lt('2.8.8') - def test_busy_loading_from_pipeline(self, r): - """ - BusyLoadingErrors should be raised from a pipeline execution - regardless of the raise_on_error flag. - """ - pipe = r.pipeline() - pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message') - with pytest.raises(redis.BusyLoadingError): - pipe.execute() - pool = r.connection_pool - assert not pipe.connection - assert len(pool._available_connections) == 1 - assert not pool._available_connections[0]._sock - - @skip_if_server_version_lt('2.8.8') - def test_read_only_error(self, r): - "READONLY errors get turned in ReadOnlyError exceptions" - with pytest.raises(redis.ReadOnlyError): - r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah') - - def test_connect_from_url_tcp(self): - connection = redis.Redis.from_url('redis://localhost') - pool = connection.connection_pool - - assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == ( - 'ConnectionPool', - 'Connection', - 'host=localhost,port=6379,db=0', - ) - - def test_connect_from_url_unix(self): - connection = redis.Redis.from_url('unix:///path/to/socket') - pool = connection.connection_pool - - assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == ( - 'ConnectionPool', - 'UnixDomainSocketConnection', - 'path=/path/to/socket,db=0', - ) diff --git a/awx/lib/site-packages/redis/tests/test_encoding.py b/awx/lib/site-packages/redis/tests/test_encoding.py deleted file mode 100644 index b1df0a56c2..0000000000 --- a/awx/lib/site-packages/redis/tests/test_encoding.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import with_statement -import pytest - -from redis._compat import unichr, u, unicode -from .conftest import r as _redis_client - - -class TestEncoding(object): - @pytest.fixture() - def r(self, request): - return _redis_client(request=request, decode_responses=True) - - def test_simple_encoding(self, r): - unicode_string = unichr(3456) + u('abcd') + unichr(3421) - r['unicode-string'] = unicode_string - cached_val = r['unicode-string'] - assert isinstance(cached_val, unicode) - assert unicode_string == cached_val - - def test_list_encoding(self, r): - unicode_string = unichr(3456) + u('abcd') + unichr(3421) - result = [unicode_string, unicode_string, unicode_string] - r.rpush('a', *result) - assert r.lrange('a', 0, -1) == result - - -class TestCommandsAndTokensArentEncoded(object): - @pytest.fixture() - def r(self, request): - return _redis_client(request=request, charset='utf-16') - - def test_basic_command(self, r): - r.set('hello', 'world') diff --git a/awx/lib/site-packages/redis/tests/test_lock.py b/awx/lib/site-packages/redis/tests/test_lock.py deleted file mode 100644 index d732ae1f1e..0000000000 --- a/awx/lib/site-packages/redis/tests/test_lock.py +++ /dev/null @@ -1,167 +0,0 @@ -from __future__ import with_statement -import pytest -import time - -from redis.exceptions import LockError, ResponseError -from redis.lock import Lock, LuaLock - - -class TestLock(object): - lock_class = Lock - - def get_lock(self, redis, *args, **kwargs): - kwargs['lock_class'] = self.lock_class - return redis.lock(*args, **kwargs) - - def test_lock(self, sr): - lock = self.get_lock(sr, 'foo') - assert lock.acquire(blocking=False) - assert sr.get('foo') == lock.local.token - assert sr.ttl('foo') == -1 - lock.release() - assert sr.get('foo') is None - - def test_competing_locks(self, sr): - lock1 = self.get_lock(sr, 'foo') - lock2 = self.get_lock(sr, 'foo') - assert lock1.acquire(blocking=False) - assert not lock2.acquire(blocking=False) - lock1.release() - assert lock2.acquire(blocking=False) - assert not lock1.acquire(blocking=False) - lock2.release() - - def test_timeout(self, sr): - lock = self.get_lock(sr, 'foo', timeout=10) - assert lock.acquire(blocking=False) - assert 8 < sr.ttl('foo') <= 10 - lock.release() - - def test_float_timeout(self, sr): - lock = self.get_lock(sr, 'foo', timeout=9.5) - assert lock.acquire(blocking=False) - assert 8 < sr.pttl('foo') <= 9500 - lock.release() - - def test_blocking_timeout(self, sr): - lock1 = self.get_lock(sr, 'foo') - assert lock1.acquire(blocking=False) - lock2 = self.get_lock(sr, 'foo', blocking_timeout=0.2) - start = time.time() - assert not lock2.acquire() - assert (time.time() - start) > 0.2 - lock1.release() - - def test_context_manager(self, sr): - # blocking_timeout prevents a deadlock if the lock can't be acquired - # for some reason - with self.get_lock(sr, 'foo', blocking_timeout=0.2) as lock: - assert sr.get('foo') == lock.local.token - assert sr.get('foo') is None - - def test_high_sleep_raises_error(self, sr): - "If sleep is higher than timeout, it should raise an error" - with pytest.raises(LockError): - self.get_lock(sr, 'foo', timeout=1, sleep=2) - - def test_releasing_unlocked_lock_raises_error(self, sr): - lock = self.get_lock(sr, 'foo') - with pytest.raises(LockError): - lock.release() - - def test_releasing_lock_no_longer_owned_raises_error(self, sr): - lock = self.get_lock(sr, 'foo') - lock.acquire(blocking=False) - # manually change the token - sr.set('foo', 'a') - with pytest.raises(LockError): - lock.release() - # even though we errored, the token is still cleared - assert lock.local.token is None - - def test_extend_lock(self, sr): - lock = self.get_lock(sr, 'foo', timeout=10) - assert lock.acquire(blocking=False) - assert 8000 < sr.pttl('foo') <= 10000 - assert lock.extend(10) - assert 16000 < sr.pttl('foo') <= 20000 - lock.release() - - def test_extend_lock_float(self, sr): - lock = self.get_lock(sr, 'foo', timeout=10.0) - assert lock.acquire(blocking=False) - assert 8000 < sr.pttl('foo') <= 10000 - assert lock.extend(10.0) - assert 16000 < sr.pttl('foo') <= 20000 - lock.release() - - def test_extending_unlocked_lock_raises_error(self, sr): - lock = self.get_lock(sr, 'foo', timeout=10) - with pytest.raises(LockError): - lock.extend(10) - - def test_extending_lock_with_no_timeout_raises_error(self, sr): - lock = self.get_lock(sr, 'foo') - assert lock.acquire(blocking=False) - with pytest.raises(LockError): - lock.extend(10) - lock.release() - - def test_extending_lock_no_longer_owned_raises_error(self, sr): - lock = self.get_lock(sr, 'foo') - assert lock.acquire(blocking=False) - sr.set('foo', 'a') - with pytest.raises(LockError): - lock.extend(10) - - -class TestLuaLock(TestLock): - lock_class = LuaLock - - -class TestLockClassSelection(object): - def test_lock_class_argument(self, sr): - lock = sr.lock('foo', lock_class=Lock) - assert type(lock) == Lock - lock = sr.lock('foo', lock_class=LuaLock) - assert type(lock) == LuaLock - - def test_cached_lualock_flag(self, sr): - try: - sr._use_lua_lock = True - lock = sr.lock('foo') - assert type(lock) == LuaLock - finally: - sr._use_lua_lock = None - - def test_cached_lock_flag(self, sr): - try: - sr._use_lua_lock = False - lock = sr.lock('foo') - assert type(lock) == Lock - finally: - sr._use_lua_lock = None - - def test_lua_compatible_server(self, sr, monkeypatch): - @classmethod - def mock_register(cls, redis): - return - monkeypatch.setattr(LuaLock, 'register_scripts', mock_register) - try: - lock = sr.lock('foo') - assert type(lock) == LuaLock - assert sr._use_lua_lock is True - finally: - sr._use_lua_lock = None - - def test_lua_unavailable(self, sr, monkeypatch): - @classmethod - def mock_register(cls, redis): - raise ResponseError() - monkeypatch.setattr(LuaLock, 'register_scripts', mock_register) - try: - lock = sr.lock('foo') - assert type(lock) == Lock - assert sr._use_lua_lock is False - finally: - sr._use_lua_lock = None diff --git a/awx/lib/site-packages/redis/tests/test_pipeline.py b/awx/lib/site-packages/redis/tests/test_pipeline.py deleted file mode 100644 index 46fc994e4d..0000000000 --- a/awx/lib/site-packages/redis/tests/test_pipeline.py +++ /dev/null @@ -1,226 +0,0 @@ -from __future__ import with_statement -import pytest - -import redis -from redis._compat import b, u, unichr, unicode - - -class TestPipeline(object): - def test_pipeline(self, r): - with r.pipeline() as pipe: - pipe.set('a', 'a1').get('a').zadd('z', z1=1).zadd('z', z2=4) - pipe.zincrby('z', 'z1').zrange('z', 0, 5, withscores=True) - assert pipe.execute() == \ - [ - True, - b('a1'), - True, - True, - 2.0, - [(b('z1'), 2.0), (b('z2'), 4)], - ] - - def test_pipeline_length(self, r): - with r.pipeline() as pipe: - # Initially empty. - assert len(pipe) == 0 - assert not pipe - - # Fill 'er up! - pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') - assert len(pipe) == 3 - assert pipe - - # Execute calls reset(), so empty once again. - pipe.execute() - assert len(pipe) == 0 - assert not pipe - - def test_pipeline_no_transaction(self, r): - with r.pipeline(transaction=False) as pipe: - pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') - assert pipe.execute() == [True, True, True] - assert r['a'] == b('a1') - assert r['b'] == b('b1') - assert r['c'] == b('c1') - - def test_pipeline_no_transaction_watch(self, r): - r['a'] = 0 - - with r.pipeline(transaction=False) as pipe: - pipe.watch('a') - a = pipe.get('a') - - pipe.multi() - pipe.set('a', int(a) + 1) - assert pipe.execute() == [True] - - def test_pipeline_no_transaction_watch_failure(self, r): - r['a'] = 0 - - with r.pipeline(transaction=False) as pipe: - pipe.watch('a') - a = pipe.get('a') - - r['a'] = 'bad' - - pipe.multi() - pipe.set('a', int(a) + 1) - - with pytest.raises(redis.WatchError): - pipe.execute() - - assert r['a'] == b('bad') - - def test_exec_error_in_response(self, r): - """ - an invalid pipeline command at exec time adds the exception instance - to the list of returned values - """ - r['c'] = 'a' - with r.pipeline() as pipe: - pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) - result = pipe.execute(raise_on_error=False) - - assert result[0] - assert r['a'] == b('1') - assert result[1] - assert r['b'] == b('2') - - # we can't lpush to a key that's a string value, so this should - # be a ResponseError exception - assert isinstance(result[2], redis.ResponseError) - assert r['c'] == b('a') - - # since this isn't a transaction, the other commands after the - # error are still executed - assert result[3] - assert r['d'] == b('4') - - # make sure the pipe was restored to a working state - assert pipe.set('z', 'zzz').execute() == [True] - assert r['z'] == b('zzz') - - def test_exec_error_raised(self, r): - r['c'] = 'a' - with r.pipeline() as pipe: - pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) - with pytest.raises(redis.ResponseError) as ex: - pipe.execute() - assert unicode(ex.value).startswith('Command # 3 (LPUSH c 3) of ' - 'pipeline caused error: ') - - # make sure the pipe was restored to a working state - assert pipe.set('z', 'zzz').execute() == [True] - assert r['z'] == b('zzz') - - def test_parse_error_raised(self, r): - with r.pipeline() as pipe: - # the zrem is invalid because we don't pass any keys to it - pipe.set('a', 1).zrem('b').set('b', 2) - with pytest.raises(redis.ResponseError) as ex: - pipe.execute() - - assert unicode(ex.value).startswith('Command # 2 (ZREM b) of ' - 'pipeline caused error: ') - - # make sure the pipe was restored to a working state - assert pipe.set('z', 'zzz').execute() == [True] - assert r['z'] == b('zzz') - - def test_watch_succeed(self, r): - r['a'] = 1 - r['b'] = 2 - - with r.pipeline() as pipe: - pipe.watch('a', 'b') - assert pipe.watching - a_value = pipe.get('a') - b_value = pipe.get('b') - assert a_value == b('1') - assert b_value == b('2') - pipe.multi() - - pipe.set('c', 3) - assert pipe.execute() == [True] - assert not pipe.watching - - def test_watch_failure(self, r): - r['a'] = 1 - r['b'] = 2 - - with r.pipeline() as pipe: - pipe.watch('a', 'b') - r['b'] = 3 - pipe.multi() - pipe.get('a') - with pytest.raises(redis.WatchError): - pipe.execute() - - assert not pipe.watching - - def test_unwatch(self, r): - r['a'] = 1 - r['b'] = 2 - - with r.pipeline() as pipe: - pipe.watch('a', 'b') - r['b'] = 3 - pipe.unwatch() - assert not pipe.watching - pipe.get('a') - assert pipe.execute() == [b('1')] - - def test_transaction_callable(self, r): - r['a'] = 1 - r['b'] = 2 - has_run = [] - - def my_transaction(pipe): - a_value = pipe.get('a') - assert a_value in (b('1'), b('2')) - b_value = pipe.get('b') - assert b_value == b('2') - - # silly run-once code... incr's "a" so WatchError should be raised - # forcing this all to run again. this should incr "a" once to "2" - if not has_run: - r.incr('a') - has_run.append('it has') - - pipe.multi() - pipe.set('c', int(a_value) + int(b_value)) - - result = r.transaction(my_transaction, 'a', 'b') - assert result == [True] - assert r['c'] == b('4') - - def test_exec_error_in_no_transaction_pipeline(self, r): - r['a'] = 1 - with r.pipeline(transaction=False) as pipe: - pipe.llen('a') - pipe.expire('a', 100) - - with pytest.raises(redis.ResponseError) as ex: - pipe.execute() - - assert unicode(ex.value).startswith('Command # 1 (LLEN a) of ' - 'pipeline caused error: ') - - assert r['a'] == b('1') - - def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): - key = unichr(3456) + u('abcd') + unichr(3421) - r[key] = 1 - with r.pipeline(transaction=False) as pipe: - pipe.llen(key) - pipe.expire(key, 100) - - with pytest.raises(redis.ResponseError) as ex: - pipe.execute() - - expected = unicode('Command # 1 (LLEN %s) of pipeline caused ' - 'error: ') % key - assert unicode(ex.value).startswith(expected) - - assert r[key] == b('1') diff --git a/awx/lib/site-packages/redis/tests/test_pubsub.py b/awx/lib/site-packages/redis/tests/test_pubsub.py deleted file mode 100644 index 5486b75a86..0000000000 --- a/awx/lib/site-packages/redis/tests/test_pubsub.py +++ /dev/null @@ -1,392 +0,0 @@ -from __future__ import with_statement -import pytest -import time - -import redis -from redis.exceptions import ConnectionError -from redis._compat import basestring, u, unichr - -from .conftest import r as _redis_client - - -def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False): - now = time.time() - timeout = now + timeout - while now < timeout: - message = pubsub.get_message( - ignore_subscribe_messages=ignore_subscribe_messages) - if message is not None: - return message - time.sleep(0.01) - now = time.time() - return None - - -def make_message(type, channel, data, pattern=None): - return { - 'type': type, - 'pattern': pattern and pattern.encode('utf-8') or None, - 'channel': channel.encode('utf-8'), - 'data': data.encode('utf-8') if isinstance(data, basestring) else data - } - - -def make_subscribe_test_data(pubsub, type): - if type == 'channel': - return { - 'p': pubsub, - 'sub_type': 'subscribe', - 'unsub_type': 'unsubscribe', - 'sub_func': pubsub.subscribe, - 'unsub_func': pubsub.unsubscribe, - 'keys': ['foo', 'bar', u('uni') + unichr(4456) + u('code')] - } - elif type == 'pattern': - return { - 'p': pubsub, - 'sub_type': 'psubscribe', - 'unsub_type': 'punsubscribe', - 'sub_func': pubsub.psubscribe, - 'unsub_func': pubsub.punsubscribe, - 'keys': ['f*', 'b*', u('uni') + unichr(4456) + u('*')] - } - assert False, 'invalid subscribe type: %s' % type - - -class TestPubSubSubscribeUnsubscribe(object): - - def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func, - unsub_func, keys): - for key in keys: - assert sub_func(key) is None - - # should be a message for each channel/pattern we just subscribed to - for i, key in enumerate(keys): - assert wait_for_message(p) == make_message(sub_type, key, i + 1) - - for key in keys: - assert unsub_func(key) is None - - # should be a message for each channel/pattern we just unsubscribed - # from - for i, key in enumerate(keys): - i = len(keys) - 1 - i - assert wait_for_message(p) == make_message(unsub_type, key, i) - - def test_channel_subscribe_unsubscribe(self, r): - kwargs = make_subscribe_test_data(r.pubsub(), 'channel') - self._test_subscribe_unsubscribe(**kwargs) - - def test_pattern_subscribe_unsubscribe(self, r): - kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') - self._test_subscribe_unsubscribe(**kwargs) - - def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type, - sub_func, unsub_func, keys): - - for key in keys: - assert sub_func(key) is None - - # should be a message for each channel/pattern we just subscribed to - for i, key in enumerate(keys): - assert wait_for_message(p) == make_message(sub_type, key, i + 1) - - # manually disconnect - p.connection.disconnect() - - # calling get_message again reconnects and resubscribes - # note, we may not re-subscribe to channels in exactly the same order - # so we have to do some extra checks to make sure we got them all - messages = [] - for i in range(len(keys)): - messages.append(wait_for_message(p)) - - unique_channels = set() - assert len(messages) == len(keys) - for i, message in enumerate(messages): - assert message['type'] == sub_type - assert message['data'] == i + 1 - assert isinstance(message['channel'], bytes) - channel = message['channel'].decode('utf-8') - unique_channels.add(channel) - - assert len(unique_channels) == len(keys) - for channel in unique_channels: - assert channel in keys - - def test_resubscribe_to_channels_on_reconnection(self, r): - kwargs = make_subscribe_test_data(r.pubsub(), 'channel') - self._test_resubscribe_on_reconnection(**kwargs) - - def test_resubscribe_to_patterns_on_reconnection(self, r): - kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') - self._test_resubscribe_on_reconnection(**kwargs) - - def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func, - unsub_func, keys): - - assert p.subscribed is False - sub_func(keys[0]) - # we're now subscribed even though we haven't processed the - # reply from the server just yet - assert p.subscribed is True - assert wait_for_message(p) == make_message(sub_type, keys[0], 1) - # we're still subscribed - assert p.subscribed is True - - # unsubscribe from all channels - unsub_func() - # we're still technically subscribed until we process the - # response messages from the server - assert p.subscribed is True - assert wait_for_message(p) == make_message(unsub_type, keys[0], 0) - # now we're no longer subscribed as no more messages can be delivered - # to any channels we were listening to - assert p.subscribed is False - - # subscribing again flips the flag back - sub_func(keys[0]) - assert p.subscribed is True - assert wait_for_message(p) == make_message(sub_type, keys[0], 1) - - # unsubscribe again - unsub_func() - assert p.subscribed is True - # subscribe to another channel before reading the unsubscribe response - sub_func(keys[1]) - assert p.subscribed is True - # read the unsubscribe for key1 - assert wait_for_message(p) == make_message(unsub_type, keys[0], 0) - # we're still subscribed to key2, so subscribed should still be True - assert p.subscribed is True - # read the key2 subscribe message - assert wait_for_message(p) == make_message(sub_type, keys[1], 1) - unsub_func() - # haven't read the message yet, so we're still subscribed - assert p.subscribed is True - assert wait_for_message(p) == make_message(unsub_type, keys[1], 0) - # now we're finally unsubscribed - assert p.subscribed is False - - def test_subscribe_property_with_channels(self, r): - kwargs = make_subscribe_test_data(r.pubsub(), 'channel') - self._test_subscribed_property(**kwargs) - - def test_subscribe_property_with_patterns(self, r): - kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') - self._test_subscribed_property(**kwargs) - - def test_ignore_all_subscribe_messages(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - - checks = ( - (p.subscribe, 'foo'), - (p.unsubscribe, 'foo'), - (p.psubscribe, 'f*'), - (p.punsubscribe, 'f*'), - ) - - assert p.subscribed is False - for func, channel in checks: - assert func(channel) is None - assert p.subscribed is True - assert wait_for_message(p) is None - assert p.subscribed is False - - def test_ignore_individual_subscribe_messages(self, r): - p = r.pubsub() - - checks = ( - (p.subscribe, 'foo'), - (p.unsubscribe, 'foo'), - (p.psubscribe, 'f*'), - (p.punsubscribe, 'f*'), - ) - - assert p.subscribed is False - for func, channel in checks: - assert func(channel) is None - assert p.subscribed is True - message = wait_for_message(p, ignore_subscribe_messages=True) - assert message is None - assert p.subscribed is False - - -class TestPubSubMessages(object): - def setup_method(self, method): - self.message = None - - def message_handler(self, message): - self.message = message - - def test_published_message_to_channel(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.subscribe('foo') - assert r.publish('foo', 'test message') == 1 - - message = wait_for_message(p) - assert isinstance(message, dict) - assert message == make_message('message', 'foo', 'test message') - - def test_published_message_to_pattern(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.subscribe('foo') - p.psubscribe('f*') - # 1 to pattern, 1 to channel - assert r.publish('foo', 'test message') == 2 - - message1 = wait_for_message(p) - message2 = wait_for_message(p) - assert isinstance(message1, dict) - assert isinstance(message2, dict) - - expected = [ - make_message('message', 'foo', 'test message'), - make_message('pmessage', 'foo', 'test message', pattern='f*') - ] - - assert message1 in expected - assert message2 in expected - assert message1 != message2 - - def test_channel_message_handler(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.subscribe(foo=self.message_handler) - assert r.publish('foo', 'test message') == 1 - assert wait_for_message(p) is None - assert self.message == make_message('message', 'foo', 'test message') - - def test_pattern_message_handler(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.psubscribe(**{'f*': self.message_handler}) - assert r.publish('foo', 'test message') == 1 - assert wait_for_message(p) is None - assert self.message == make_message('pmessage', 'foo', 'test message', - pattern='f*') - - def test_unicode_channel_message_handler(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - channel = u('uni') + unichr(4456) + u('code') - channels = {channel: self.message_handler} - p.subscribe(**channels) - assert r.publish(channel, 'test message') == 1 - assert wait_for_message(p) is None - assert self.message == make_message('message', channel, 'test message') - - def test_unicode_pattern_message_handler(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - pattern = u('uni') + unichr(4456) + u('*') - channel = u('uni') + unichr(4456) + u('code') - p.psubscribe(**{pattern: self.message_handler}) - assert r.publish(channel, 'test message') == 1 - assert wait_for_message(p) is None - assert self.message == make_message('pmessage', channel, - 'test message', pattern=pattern) - - -class TestPubSubAutoDecoding(object): - "These tests only validate that we get unicode values back" - - channel = u('uni') + unichr(4456) + u('code') - pattern = u('uni') + unichr(4456) + u('*') - data = u('abc') + unichr(4458) + u('123') - - def make_message(self, type, channel, data, pattern=None): - return { - 'type': type, - 'channel': channel, - 'pattern': pattern, - 'data': data - } - - def setup_method(self, method): - self.message = None - - def message_handler(self, message): - self.message = message - - @pytest.fixture() - def r(self, request): - return _redis_client(request=request, decode_responses=True) - - def test_channel_subscribe_unsubscribe(self, r): - p = r.pubsub() - p.subscribe(self.channel) - assert wait_for_message(p) == self.make_message('subscribe', - self.channel, 1) - - p.unsubscribe(self.channel) - assert wait_for_message(p) == self.make_message('unsubscribe', - self.channel, 0) - - def test_pattern_subscribe_unsubscribe(self, r): - p = r.pubsub() - p.psubscribe(self.pattern) - assert wait_for_message(p) == self.make_message('psubscribe', - self.pattern, 1) - - p.punsubscribe(self.pattern) - assert wait_for_message(p) == self.make_message('punsubscribe', - self.pattern, 0) - - def test_channel_publish(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.subscribe(self.channel) - r.publish(self.channel, self.data) - assert wait_for_message(p) == self.make_message('message', - self.channel, - self.data) - - def test_pattern_publish(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.psubscribe(self.pattern) - r.publish(self.channel, self.data) - assert wait_for_message(p) == self.make_message('pmessage', - self.channel, - self.data, - pattern=self.pattern) - - def test_channel_message_handler(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.subscribe(**{self.channel: self.message_handler}) - r.publish(self.channel, self.data) - assert wait_for_message(p) is None - assert self.message == self.make_message('message', self.channel, - self.data) - - # test that we reconnected to the correct channel - p.connection.disconnect() - assert wait_for_message(p) is None # should reconnect - new_data = self.data + u('new data') - r.publish(self.channel, new_data) - assert wait_for_message(p) is None - assert self.message == self.make_message('message', self.channel, - new_data) - - def test_pattern_message_handler(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.psubscribe(**{self.pattern: self.message_handler}) - r.publish(self.channel, self.data) - assert wait_for_message(p) is None - assert self.message == self.make_message('pmessage', self.channel, - self.data, - pattern=self.pattern) - - # test that we reconnected to the correct pattern - p.connection.disconnect() - assert wait_for_message(p) is None # should reconnect - new_data = self.data + u('new data') - r.publish(self.channel, new_data) - assert wait_for_message(p) is None - assert self.message == self.make_message('pmessage', self.channel, - new_data, - pattern=self.pattern) - - -class TestPubSubRedisDown(object): - - def test_channel_subscribe(self, r): - r = redis.Redis(host='localhost', port=6390) - p = r.pubsub() - with pytest.raises(ConnectionError): - p.subscribe('foo') diff --git a/awx/lib/site-packages/redis/tests/test_scripting.py b/awx/lib/site-packages/redis/tests/test_scripting.py deleted file mode 100644 index 4849c81b7c..0000000000 --- a/awx/lib/site-packages/redis/tests/test_scripting.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import with_statement -import pytest - -from redis import exceptions -from redis._compat import b - - -multiply_script = """ -local value = redis.call('GET', KEYS[1]) -value = tonumber(value) -return value * ARGV[1]""" - - -class TestScripting(object): - @pytest.fixture(autouse=True) - def reset_scripts(self, r): - r.script_flush() - - def test_eval(self, r): - r.set('a', 2) - # 2 * 3 == 6 - assert r.eval(multiply_script, 1, 'a', 3) == 6 - - def test_evalsha(self, r): - r.set('a', 2) - sha = r.script_load(multiply_script) - # 2 * 3 == 6 - assert r.evalsha(sha, 1, 'a', 3) == 6 - - def test_evalsha_script_not_loaded(self, r): - r.set('a', 2) - sha = r.script_load(multiply_script) - # remove the script from Redis's cache - r.script_flush() - with pytest.raises(exceptions.NoScriptError): - r.evalsha(sha, 1, 'a', 3) - - def test_script_loading(self, r): - # get the sha, then clear the cache - sha = r.script_load(multiply_script) - r.script_flush() - assert r.script_exists(sha) == [False] - r.script_load(multiply_script) - assert r.script_exists(sha) == [True] - - def test_script_object(self, r): - r.set('a', 2) - multiply = r.register_script(multiply_script) - assert not multiply.sha - # test evalsha fail -> script load + retry - assert multiply(keys=['a'], args=[3]) == 6 - assert multiply.sha - assert r.script_exists(multiply.sha) == [True] - # test first evalsha - assert multiply(keys=['a'], args=[3]) == 6 - - def test_script_object_in_pipeline(self, r): - multiply = r.register_script(multiply_script) - assert not multiply.sha - pipe = r.pipeline() - pipe.set('a', 2) - pipe.get('a') - multiply(keys=['a'], args=[3], client=pipe) - # even though the pipeline wasn't executed yet, we made sure the - # script was loaded and got a valid sha - assert multiply.sha - assert r.script_exists(multiply.sha) == [True] - # [SET worked, GET 'a', result of multiple script] - assert pipe.execute() == [True, b('2'), 6] - - # purge the script from redis's cache and re-run the pipeline - # the multiply script object knows it's sha, so it shouldn't get - # reloaded until pipe.execute() - r.script_flush() - pipe = r.pipeline() - pipe.set('a', 2) - pipe.get('a') - assert multiply.sha - multiply(keys=['a'], args=[3], client=pipe) - assert r.script_exists(multiply.sha) == [False] - # [SET worked, GET 'a', result of multiple script] - assert pipe.execute() == [True, b('2'), 6] diff --git a/awx/lib/site-packages/redis/tests/test_sentinel.py b/awx/lib/site-packages/redis/tests/test_sentinel.py deleted file mode 100644 index 0a6e98b273..0000000000 --- a/awx/lib/site-packages/redis/tests/test_sentinel.py +++ /dev/null @@ -1,173 +0,0 @@ -from __future__ import with_statement -import pytest - -from redis import exceptions -from redis.sentinel import (Sentinel, SentinelConnectionPool, - MasterNotFoundError, SlaveNotFoundError) -from redis._compat import next -import redis.sentinel - - -class SentinelTestClient(object): - def __init__(self, cluster, id): - self.cluster = cluster - self.id = id - - def sentinel_masters(self): - self.cluster.connection_error_if_down(self) - return {self.cluster.service_name: self.cluster.master} - - def sentinel_slaves(self, master_name): - self.cluster.connection_error_if_down(self) - if master_name != self.cluster.service_name: - return [] - return self.cluster.slaves - - -class SentinelTestCluster(object): - def __init__(self, service_name='mymaster', ip='127.0.0.1', port=6379): - self.clients = {} - self.master = { - 'ip': ip, - 'port': port, - 'is_master': True, - 'is_sdown': False, - 'is_odown': False, - 'num-other-sentinels': 0, - } - self.service_name = service_name - self.slaves = [] - self.nodes_down = set() - - def connection_error_if_down(self, node): - if node.id in self.nodes_down: - raise exceptions.ConnectionError - - def client(self, host, port, **kwargs): - return SentinelTestClient(self, (host, port)) - - -@pytest.fixture() -def cluster(request): - def teardown(): - redis.sentinel.StrictRedis = saved_StrictRedis - cluster = SentinelTestCluster() - saved_StrictRedis = redis.sentinel.StrictRedis - redis.sentinel.StrictRedis = cluster.client - request.addfinalizer(teardown) - return cluster - - -@pytest.fixture() -def sentinel(request, cluster): - return Sentinel([('foo', 26379), ('bar', 26379)]) - - -def test_discover_master(sentinel): - address = sentinel.discover_master('mymaster') - assert address == ('127.0.0.1', 6379) - - -def test_discover_master_error(sentinel): - with pytest.raises(MasterNotFoundError): - sentinel.discover_master('xxx') - - -def test_discover_master_sentinel_down(cluster, sentinel): - # Put first sentinel 'foo' down - cluster.nodes_down.add(('foo', 26379)) - address = sentinel.discover_master('mymaster') - assert address == ('127.0.0.1', 6379) - # 'bar' is now first sentinel - assert sentinel.sentinels[0].id == ('bar', 26379) - - -def test_master_min_other_sentinels(cluster): - sentinel = Sentinel([('foo', 26379)], min_other_sentinels=1) - # min_other_sentinels - with pytest.raises(MasterNotFoundError): - sentinel.discover_master('mymaster') - cluster.master['num-other-sentinels'] = 2 - address = sentinel.discover_master('mymaster') - assert address == ('127.0.0.1', 6379) - - -def test_master_odown(cluster, sentinel): - cluster.master['is_odown'] = True - with pytest.raises(MasterNotFoundError): - sentinel.discover_master('mymaster') - - -def test_master_sdown(cluster, sentinel): - cluster.master['is_sdown'] = True - with pytest.raises(MasterNotFoundError): - sentinel.discover_master('mymaster') - - -def test_discover_slaves(cluster, sentinel): - assert sentinel.discover_slaves('mymaster') == [] - - cluster.slaves = [ - {'ip': 'slave0', 'port': 1234, 'is_odown': False, 'is_sdown': False}, - {'ip': 'slave1', 'port': 1234, 'is_odown': False, 'is_sdown': False}, - ] - assert sentinel.discover_slaves('mymaster') == [ - ('slave0', 1234), ('slave1', 1234)] - - # slave0 -> ODOWN - cluster.slaves[0]['is_odown'] = True - assert sentinel.discover_slaves('mymaster') == [ - ('slave1', 1234)] - - # slave1 -> SDOWN - cluster.slaves[1]['is_sdown'] = True - assert sentinel.discover_slaves('mymaster') == [] - - cluster.slaves[0]['is_odown'] = False - cluster.slaves[1]['is_sdown'] = False - - # node0 -> DOWN - cluster.nodes_down.add(('foo', 26379)) - assert sentinel.discover_slaves('mymaster') == [ - ('slave0', 1234), ('slave1', 1234)] - - -def test_master_for(cluster, sentinel): - master = sentinel.master_for('mymaster', db=9) - assert master.ping() - assert master.connection_pool.master_address == ('127.0.0.1', 6379) - - # Use internal connection check - master = sentinel.master_for('mymaster', db=9, check_connection=True) - assert master.ping() - - -def test_slave_for(cluster, sentinel): - cluster.slaves = [ - {'ip': '127.0.0.1', 'port': 6379, - 'is_odown': False, 'is_sdown': False}, - ] - slave = sentinel.slave_for('mymaster', db=9) - assert slave.ping() - - -def test_slave_for_slave_not_found_error(cluster, sentinel): - cluster.master['is_odown'] = True - slave = sentinel.slave_for('mymaster', db=9) - with pytest.raises(SlaveNotFoundError): - slave.ping() - - -def test_slave_round_robin(cluster, sentinel): - cluster.slaves = [ - {'ip': 'slave0', 'port': 6379, 'is_odown': False, 'is_sdown': False}, - {'ip': 'slave1', 'port': 6379, 'is_odown': False, 'is_sdown': False}, - ] - pool = SentinelConnectionPool('mymaster', sentinel) - rotator = pool.rotate_slaves() - assert next(rotator) in (('slave0', 6379), ('slave1', 6379)) - assert next(rotator) in (('slave0', 6379), ('slave1', 6379)) - # Fallback to master - assert next(rotator) == ('127.0.0.1', 6379) - with pytest.raises(SlaveNotFoundError): - next(rotator) diff --git a/awx/lib/site-packages/redis/redis/utils.py b/awx/lib/site-packages/redis/utils.py similarity index 100% rename from awx/lib/site-packages/redis/redis/utils.py rename to awx/lib/site-packages/redis/utils.py