From f1c860416a743e7f1180ee430660d3da7e736116 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 9 Dec 2016 11:54:17 +0100 Subject: [PATCH 001/263] Remove print statement from node manager class that cause alot of unwanted log messages --- docs/release-notes.rst | 5 +++++ rediscluster/connection.py | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 9bf53d9e..f52b66b8 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,6 +1,11 @@ Release Notes ============= +Next release (??? ??, 2016) +--------------------------- + + * Remove print statement that was faulty commited into release 1.3.2 that case logs to fill up with unwanted data. + 1.3.2 (Nov 27, 2016) -------------------- diff --git a/rediscluster/connection.py b/rediscluster/connection.py index da21c97b..f105ef23 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -89,8 +89,6 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=Cl 'port': str(connection_kwargs.pop('port')), }] - print(startup_nodes) - self.max_connections = max_connections or 2 ** 31 self.max_connections_per_node = max_connections_per_node From da7315713deedff84d5d50fcf98fe11977f3364f Mon Sep 17 00:00:00 2001 From: AngusP Date: Wed, 25 Jan 2017 20:33:50 +0000 Subject: [PATCH 002/263] Add PUBSUB * commands to `random' node allocation PUBSUB commands added to redis-py in 3f4ac6, which according to Redis docs (https://redis.io/commands/pubsub, https://redis.io/topics/pubsub) can be sent to any node --- rediscluster/client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index a4e388d5..d0d814f1 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -56,7 +56,8 @@ class StrictRedisCluster(StrictRedis): "FLUSHALL", "FLUSHDB", "SCRIPT LOAD", "SCRIPT FLUSH", "SCRIPT EXISTS", "SCAN", ], 'all-masters'), string_keys_to_dict([ - "RANDOMKEY", "CLUSTER NODES", 'CLUSTER SLOTS', + "RANDOMKEY", "CLUSTER NODES", "CLUSTER SLOTS", "PUBSUB CHANNELS", + "PUBSUB NUMSUB", "PUBSUB NUMPAT", ], 'random'), string_keys_to_dict([ "CLUSTER COUNTKEYSINSLOT", From a4653061e7e2fd926fd5941b30ff597f68d6cbf9 Mon Sep 17 00:00:00 2001 From: AngusP Date: Wed, 25 Jan 2017 21:09:17 +0000 Subject: [PATCH 003/263] (fix) Send PUBSUB * commands to _all nodes_ instead of a random one --- rediscluster/client.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index d0d814f1..266843a1 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -50,14 +50,14 @@ class StrictRedisCluster(StrictRedis): "ECHO", "CONFIG GET", "CONFIG SET", "SLOWLOG GET", "CLIENT KILL", "INFO", "BGREWRITEAOF", "BGSAVE", "CLIENT LIST", "CLIENT GETNAME", "CONFIG RESETSTAT", "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", - "TIME", "KEYS", "CLUSTER INFO", + "TIME", "KEYS", "CLUSTER INFO", "PUBSUB CHANNELS", + "PUBSUB NUMSUB", "PUBSUB NUMPAT", ], 'all-nodes'), string_keys_to_dict([ "FLUSHALL", "FLUSHDB", "SCRIPT LOAD", "SCRIPT FLUSH", "SCRIPT EXISTS", "SCAN", ], 'all-masters'), string_keys_to_dict([ - "RANDOMKEY", "CLUSTER NODES", "CLUSTER SLOTS", "PUBSUB CHANNELS", - "PUBSUB NUMSUB", "PUBSUB NUMPAT", + "RANDOMKEY", "CLUSTER NODES", "CLUSTER SLOTS", ], 'random'), string_keys_to_dict([ "CLUSTER COUNTKEYSINSLOT", @@ -71,6 +71,7 @@ class StrictRedisCluster(StrictRedis): "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", "TIME", "SCAN", "CLUSTER INFO", 'CLUSTER ADDSLOTS', 'CLUSTER COUNT-FAILURE-REPORTS', 'CLUSTER DELSLOTS', 'CLUSTER FAILOVER', 'CLUSTER FORGET', "FLUSHALL", "FLUSHDB", + "PUBSUB CHANNELS", "PUBSUB NUMSUB", "PUBSUB NUMPAT", ], lambda command, res: res), string_keys_to_dict([ "SCRIPT LOAD", From 3954a00a2e1eae5839da99ce07fe907fa2d91a46 Mon Sep 17 00:00:00 2001 From: AngusP Date: Sat, 4 Feb 2017 15:59:26 +0000 Subject: [PATCH 004/263] Switch to emulating `redis-py` behaviour by default Added parsing callbacks for all three `PUBSUB` sub-commands, which give these commands the same behaviour as in single-node Redis & `redis-py`. **Note** that the switches on each method to *not* aggregate responses into one appear to not be bubbling through to the callback. --- docs/authors.rst | 1 + rediscluster/client.py | 39 +++++++++++++++++++++++++++++++++++++-- rediscluster/utils.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 2 deletions(-) diff --git a/docs/authors.rst b/docs/authors.rst index 35087e31..bd99c6c4 100644 --- a/docs/authors.rst +++ b/docs/authors.rst @@ -21,3 +21,4 @@ Authors who contributed code or testing: - baranbartu - https://github.com/baranbartu - monklof - https://github.com/monklof - dutradda - https://github.com/dutradda + - AngusP - https://github.com/AngusP diff --git a/rediscluster/client.py b/rediscluster/client.py index 266843a1..60c240d5 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -23,6 +23,9 @@ clusterdown_wrapper, parse_cluster_slots, parse_cluster_nodes, + parse_pubsub_channels, + parse_pubsub_numsub, + parse_pubsub_numpat, ) # 3rd party imports from redis import StrictRedis @@ -71,7 +74,6 @@ class StrictRedisCluster(StrictRedis): "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", "TIME", "SCAN", "CLUSTER INFO", 'CLUSTER ADDSLOTS', 'CLUSTER COUNT-FAILURE-REPORTS', 'CLUSTER DELSLOTS', 'CLUSTER FAILOVER', 'CLUSTER FORGET', "FLUSHALL", "FLUSHDB", - "PUBSUB CHANNELS", "PUBSUB NUMSUB", "PUBSUB NUMPAT", ], lambda command, res: res), string_keys_to_dict([ "SCRIPT LOAD", @@ -88,6 +90,11 @@ class StrictRedisCluster(StrictRedis): string_keys_to_dict([ "SSCAN", "HSCAN", "ZSCAN", "RANDOMKEY", ], first_key), + { + "PUBSUB CHANNELS" : parse_pubsub_channels, + "PUBSUB NUMSUB" : parse_pubsub_numsub, + "PUBSUB NUMPAT" : parse_pubsub_numpat, + }, ) CLUSTER_COMMANDS_RESPONSE_CALLBACKS = { @@ -738,6 +745,34 @@ def renamenx(self, src, dst): return False + + def pubsub_channels(self, pattern='*', aggregate=True): + """ + Return a list of channels that have at least one subscriber. + Aggregate toggles merging of response. + """ + options = { 'aggregate': aggregate } + return self.execute_command('PUBSUB CHANNELS', pattern, **options) + + + def pubsub_numpat(self, aggregate=True): + """ + Returns the number of subscriptions to patterns. + Aggregate toggles merging of response. + """ + options = { 'aggregate': aggregate } + return self.execute_command('PUBSUB NUMPAT', **options) + + + def pubsub_numsub(self, *args, aggregate=True): + """ + Return a list of (channel, number of subscribers) tuples + for each channel given in ``*args``. + Aggregate toggles merging of response. + """ + options = { 'aggregate': aggregate } + return self.execute_command('PUBSUB NUMSUB', *args, **options) + #### # List commands @@ -1106,7 +1141,7 @@ def _random_good_hashslot_key(self, hashslot): Generate a good random key with a low probability of collision between any other key. """ # TODO: Check if the key exists or not. continue to randomize until a empty key is found - random_id = "{{0}}{1}".format(hashslot, self._random_id()) + random_id = "{{{0}}}{1}".format(hashslot, self._random_id()) return random_id def _random_id(self, size=16, chars=string.ascii_uppercase + string.digits): diff --git a/rediscluster/utils.py b/rediscluster/utils.py index f5475018..a4bfcba8 100644 --- a/rediscluster/utils.py +++ b/rediscluster/utils.py @@ -203,3 +203,45 @@ def parse_slots(s): nodes.append(node) return nodes + + +def parse_pubsub_channels(command, resp, **options): + aggregate = options.get('aggregate', True) + if not aggregate: + return resp + + nodes = resp.keys() + channels = set() + for node in nodes: + channels.update(resp[node]) + return list(channels) + + +def parse_pubsub_numpat(command, resp, **options): + aggregate = options.get('aggregate', True) + if not aggregate: + return resp + + numpat = 0 + for node, node_numpat in resp.items(): + numpat += node_numpat + return numpat + + +def parse_pubsub_numsub(command, resp, **options): + aggregate = options.get('aggregate', True) + if not aggregate: + return resp + + numsub_d = dict() + for _, numsub_tups in resp.items(): + for channel, numsubbed in numsub_tups: + try: + numsub_d[channel] += numsubbed + except KeyError: + numsub_d[channel] = numsubbed + + ret_numsub = [] + for channel, numsub in numsub_d.items(): + ret_numsub.append((channel, numsub)) + return ret_numsub From 3f30849c3cb657477667d3be27b577571dd3a9d2 Mon Sep 17 00:00:00 2001 From: AngusP Date: Sat, 4 Feb 2017 17:25:54 +0000 Subject: [PATCH 005/263] Add tests for pubsub_ methods, return sorted list from pubsub_channels --- rediscluster/utils.py | 2 +- tests/test_pubsub.py | 31 ++++++++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/rediscluster/utils.py b/rediscluster/utils.py index a4bfcba8..e88d7e9a 100644 --- a/rediscluster/utils.py +++ b/rediscluster/utils.py @@ -214,7 +214,7 @@ def parse_pubsub_channels(command, resp, **options): channels = set() for node in nodes: channels.update(resp[node]) - return list(channels) + return sorted(list(channels)) def parse_pubsub_numpat(command, resp, **options): diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index d0d6012a..f76901f7 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -14,8 +14,9 @@ # import redis from redis import StrictRedis, Redis from redis.exceptions import ConnectionError -from redis._compat import basestring, u, unichr +from redis._compat import basestring, u, unichr, b +from .conftest import skip_if_server_version_lt def wait_for_message(pubsub, timeout=0.5, ignore_subscribe_messages=False): now = time.time() @@ -474,3 +475,31 @@ def t_run(rc): t.start() except Exception: print("Error: unable to start thread") + + +class TestPubSubPubSubSubcommands(object): + + @skip_if_server_version_lt('2.8.0') + def test_pubsub_channels(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe('foo', 'bar', 'baz', 'quux') + channels = sorted(r.pubsub_channels()) + assert channels == [b('bar'), b('baz'), b('foo'), b('quux')] + + @skip_if_server_version_lt('2.8.0') + def test_pubsub_numsub(self, r): + p1 = r.pubsub(ignore_subscribe_messages=True) + p1.subscribe('foo', 'bar', 'baz') + p2 = r.pubsub(ignore_subscribe_messages=True) + p2.subscribe('bar', 'baz') + p3 = r.pubsub(ignore_subscribe_messages=True) + p3.subscribe('baz') + + channels = [(b('foo'), 1), (b('bar'), 2), (b('baz'), 3)] + assert channels == r.pubsub_numsub('foo', 'bar', 'baz') + + @skip_if_server_version_lt('2.8.0') + def test_pubsub_numpat(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.psubscribe('*oo', '*ar', 'b*z') + assert r.pubsub_numpat() == 3 From b6713d757d703117bd77b307c81d80d910b3789e Mon Sep 17 00:00:00 2001 From: AngusP Date: Sun, 5 Feb 2017 15:27:19 +0000 Subject: [PATCH 006/263] Provide ``aggregate`` keyword arg to toggle merging of results for PUBSUB commands ``**kwargs`` is not bubbled through to all result callbacks to enable the toggle to reach the PUBSUB parsing methods in `utils.py`. This adds `**kwargs` to the `StrictRedisCluster._merge_result` method's signature. --- rediscluster/client.py | 27 +++++++++++++-------------- rediscluster/utils.py | 36 ++++++++++++++++++++++-------------- 2 files changed, 35 insertions(+), 28 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 60c240d5..426422f5 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -91,10 +91,10 @@ class StrictRedisCluster(StrictRedis): "SSCAN", "HSCAN", "ZSCAN", "RANDOMKEY", ], first_key), { - "PUBSUB CHANNELS" : parse_pubsub_channels, - "PUBSUB NUMSUB" : parse_pubsub_numsub, - "PUBSUB NUMPAT" : parse_pubsub_numpat, - }, + "PUBSUB CHANNELS": parse_pubsub_channels, + "PUBSUB NUMSUB": parse_pubsub_numsub, + "PUBSUB NUMPAT": parse_pubsub_numpat, + } ) CLUSTER_COMMANDS_RESPONSE_CALLBACKS = { @@ -262,12 +262,12 @@ def _determine_slot(self, *args): return self.connection_pool.nodes.keyslot(key) - def _merge_result(self, command, res): + def _merge_result(self, command, res, **kwargs): """ `res` is a dict with the following structure Dict(NodeName, CommandResult) """ if command in self.result_callbacks: - return self.result_callbacks[command](command, res) + return self.result_callbacks[command](command, res, **kwargs) # Default way to handle result return first_key(command, res) @@ -399,7 +399,7 @@ def _execute_command_on_nodes(self, nodes, *args, **kwargs): finally: self.connection_pool.release(connection) - return self._merge_result(command, res) + return self._merge_result(command, res, **kwargs) ########## # Cluster management commands @@ -751,8 +751,7 @@ def pubsub_channels(self, pattern='*', aggregate=True): Return a list of channels that have at least one subscriber. Aggregate toggles merging of response. """ - options = { 'aggregate': aggregate } - return self.execute_command('PUBSUB CHANNELS', pattern, **options) + return self.execute_command('PUBSUB CHANNELS', pattern, aggregate=aggregate) def pubsub_numpat(self, aggregate=True): @@ -760,17 +759,17 @@ def pubsub_numpat(self, aggregate=True): Returns the number of subscriptions to patterns. Aggregate toggles merging of response. """ - options = { 'aggregate': aggregate } - return self.execute_command('PUBSUB NUMPAT', **options) + return self.execute_command('PUBSUB NUMPAT', aggregate=aggregate) - def pubsub_numsub(self, *args, aggregate=True): + def pubsub_numsub(self, *args, **kwargs): """ Return a list of (channel, number of subscribers) tuples for each channel given in ``*args``. - Aggregate toggles merging of response. + + ``aggregate`` keyword argument toggles merging of response. """ - options = { 'aggregate': aggregate } + options = { 'aggregate': kwargs.get('aggregate', True) } return self.execute_command('PUBSUB NUMSUB', *args, **options) #### diff --git a/rediscluster/utils.py b/rediscluster/utils.py index e88d7e9a..9cf5f26b 100644 --- a/rediscluster/utils.py +++ b/rediscluster/utils.py @@ -205,36 +205,43 @@ def parse_slots(s): return nodes -def parse_pubsub_channels(command, resp, **options): +def parse_pubsub_channels(command, res, **options): + """ + Result callback, handles different return types + switchable by the `aggregate` flag. + """ aggregate = options.get('aggregate', True) if not aggregate: - return resp + return res + return merge_result(command, res) - nodes = resp.keys() - channels = set() - for node in nodes: - channels.update(resp[node]) - return sorted(list(channels)) - -def parse_pubsub_numpat(command, resp, **options): +def parse_pubsub_numpat(command, res, **options): + """ + Result callback, handles different return types + switchable by the `aggregate` flag. + """ aggregate = options.get('aggregate', True) if not aggregate: - return resp + return res numpat = 0 - for node, node_numpat in resp.items(): + for node, node_numpat in res.items(): numpat += node_numpat return numpat -def parse_pubsub_numsub(command, resp, **options): +def parse_pubsub_numsub(command, res, **options): + """ + Result callback, handles different return types + switchable by the `aggregate` flag. + """ aggregate = options.get('aggregate', True) if not aggregate: - return resp + return res numsub_d = dict() - for _, numsub_tups in resp.items(): + for _, numsub_tups in res.items(): for channel, numsubbed in numsub_tups: try: numsub_d[channel] += numsubbed @@ -245,3 +252,4 @@ def parse_pubsub_numsub(command, resp, **options): for channel, numsub in numsub_d.items(): ret_numsub.append((channel, numsub)) return ret_numsub + From c12b23dc0aedcf12efea8ba9fe3d280825d3e2b4 Mon Sep 17 00:00:00 2001 From: AngusP Date: Sun, 5 Feb 2017 15:32:45 +0000 Subject: [PATCH 007/263] Test `TestPubSubPubSubSubCommands` in `test_pubsub` poorly defined Now tests on ordered list --- tests/test_pubsub.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index f76901f7..fd7effa8 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -495,8 +495,8 @@ def test_pubsub_numsub(self, r): p3 = r.pubsub(ignore_subscribe_messages=True) p3.subscribe('baz') - channels = [(b('foo'), 1), (b('bar'), 2), (b('baz'), 3)] - assert channels == r.pubsub_numsub('foo', 'bar', 'baz') + channels = [(b('bar'), 2), (b('baz'), 3), (b('foo'), 1)] + assert channels == sorted(r.pubsub_numsub('foo', 'bar', 'baz')) @skip_if_server_version_lt('2.8.0') def test_pubsub_numpat(self, r): From 3b296d80056a4c20fdb332d7ffe8f6f4da23a85d Mon Sep 17 00:00:00 2001 From: AngusP Date: Sun, 5 Feb 2017 17:43:52 +0000 Subject: [PATCH 008/263] Code Review Changes * Using `string_keys_to_dict` for all RESULT_CALLBACKS * Simplified `subscribe` calls in test cases to remove unnecessary variable declarations * Remove change to `_random_good_hashslot_key` which was not related to PR * Add `set_result_callback(command, callback)` to match Redis-Py feature `set_response_callback` * Skipping redis-py versions below `2.10.6` for `PUBSUB` tests * Add docstring to `TestPubSubPubSubSubcommands` class * Adding docs on pubsub subcommands, and to release notes. --- docs/commands.rst | 6 ++++++ docs/release-notes.rst | 6 ++++++ rediscluster/client.py | 20 ++++++++++++++------ tests/test_pubsub.py | 26 +++++++++++++------------- 4 files changed, 39 insertions(+), 19 deletions(-) diff --git a/docs/commands.rst b/docs/commands.rst index c906054d..57332057 100644 --- a/docs/commands.rst +++ b/docs/commands.rst @@ -33,6 +33,12 @@ The following commands will send the same request to all nodes in the cluster. R - slowlog_reset - time +The pubsub commands are sent to all nodes, and the resulting replies are merged together. They have an optional keyword argument `aggregate` which when set to `False` will return a dict with k,v pair (NodeID, Result) instead of the merged result. + + - pubsub_channels + - pubsub_numsub + - pubsub_numpat + This command will send the same request to all nodes in the cluster in sequence. Results is appended to a unified list. - keys diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 8cd18b29..7af8862b 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,6 +1,12 @@ Release Notes ============= +Unstable +-------- + + * Add support for `PUBSUB` subcommands `CHANNELS`, `NUMSUB [arg] [args...]` and `NUMPAT`. + * Add method `set_result_callback(command, callback)` allowing the default reply callbacks to be changed, in the same way `set_response_callback(command, callback)` inherited from Redis-Py does for responses. + 1.3.3 (Dec 15, 2016) -------------------- diff --git a/rediscluster/client.py b/rediscluster/client.py index 426422f5..ccc2e134 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -90,11 +90,15 @@ class StrictRedisCluster(StrictRedis): string_keys_to_dict([ "SSCAN", "HSCAN", "ZSCAN", "RANDOMKEY", ], first_key), - { - "PUBSUB CHANNELS": parse_pubsub_channels, - "PUBSUB NUMSUB": parse_pubsub_numsub, - "PUBSUB NUMPAT": parse_pubsub_numpat, - } + string_keys_to_dict([ + "PUBSUB CHANNELS", + ], parse_pubsub_channels), + string_keys_to_dict([ + "PUBSUB NUMSUB", + ], parse_pubsub_numsub), + string_keys_to_dict([ + "PUBSUB NUMPAT", + ], parse_pubsub_numpat), ) CLUSTER_COMMANDS_RESPONSE_CALLBACKS = { @@ -211,6 +215,10 @@ def __repr__(self): servers.sort() return "{0}<{1}>".format(type(self).__name__, ', '.join(servers)) + def set_result_callback(self, command, callback): + "Set a custom Result Callback" + self.result_callbacks[command] = callback + def pubsub(self, **kwargs): """ """ @@ -1140,7 +1148,7 @@ def _random_good_hashslot_key(self, hashslot): Generate a good random key with a low probability of collision between any other key. """ # TODO: Check if the key exists or not. continue to randomize until a empty key is found - random_id = "{{{0}}}{1}".format(hashslot, self._random_id()) + random_id = "{{0}}{1}".format(hashslot, self._random_id()) return random_id def _random_id(self, size=16, chars=string.ascii_uppercase + string.digits): diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index fd7effa8..6c90bfc4 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -478,28 +478,28 @@ def t_run(rc): class TestPubSubPubSubSubcommands(object): + """ + Test Pub/Sub subcommands of PUBSUB + @see https://redis.io/commands/pubsub + """ - @skip_if_server_version_lt('2.8.0') + @skip_if_redis_py_version_lt('2.10.6') def test_pubsub_channels(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.subscribe('foo', 'bar', 'baz', 'quux') + r.pubsub(ignore_subscribe_messages=True).subscribe('foo', 'bar', 'baz', 'quux') channels = sorted(r.pubsub_channels()) assert channels == [b('bar'), b('baz'), b('foo'), b('quux')] - @skip_if_server_version_lt('2.8.0') + @skip_if_redis_py_version_lt('2.10.6') def test_pubsub_numsub(self, r): - p1 = r.pubsub(ignore_subscribe_messages=True) - p1.subscribe('foo', 'bar', 'baz') - p2 = r.pubsub(ignore_subscribe_messages=True) - p2.subscribe('bar', 'baz') - p3 = r.pubsub(ignore_subscribe_messages=True) - p3.subscribe('baz') + r.pubsub(ignore_subscribe_messages=True).subscribe('foo', 'bar', 'baz') + r.pubsub(ignore_subscribe_messages=True).subscribe('bar', 'baz') + r.pubsub(ignore_subscribe_messages=True).subscribe('baz') channels = [(b('bar'), 2), (b('baz'), 3), (b('foo'), 1)] assert channels == sorted(r.pubsub_numsub('foo', 'bar', 'baz')) - @skip_if_server_version_lt('2.8.0') + @skip_if_redis_py_version_lt('2.10.6') def test_pubsub_numpat(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - p.psubscribe('*oo', '*ar', 'b*z') + r.pubsub(ignore_subscribe_messages=True).psubscribe('*oo', '*ar', 'b*z') assert r.pubsub_numpat() == 3 + From 5b6b9f33fe16df1f99162887c2fb264567f4f317 Mon Sep 17 00:00:00 2001 From: AngusP Date: Sun, 5 Feb 2017 17:47:46 +0000 Subject: [PATCH 009/263] Missed wrapper in `test_pubsub.py` --- tests/test_pubsub.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 6c90bfc4..686b55a3 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -16,7 +16,7 @@ from redis.exceptions import ConnectionError from redis._compat import basestring, u, unichr, b -from .conftest import skip_if_server_version_lt +from .conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt def wait_for_message(pubsub, timeout=0.5, ignore_subscribe_messages=False): now = time.time() From 68bdc18828b537d196aa9b9538e85e0935725837 Mon Sep 17 00:00:00 2001 From: "Ernest W. Durbin III" Date: Tue, 5 Sep 2017 11:07:50 -0400 Subject: [PATCH 010/263] fix link in documentation --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index b90dc393..04c460d9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,7 +11,7 @@ This project is a port of `redis-rb-cluster` by antirez, with alot of added func The source code is `available on github`_. -.. _available on github: http://github.com/grokzen/pykwalify +.. _available on github: http://github.com/grokzen/redis-py-cluster From 038ff1edc4d7297869f77f4f1574d7abd2cc5ba6 Mon Sep 17 00:00:00 2001 From: Mrn Om <31620258+mrnom@users.noreply.github.com> Date: Tue, 12 Dec 2017 12:30:30 +0200 Subject: [PATCH 011/263] Fix some typos --- docs/pipelines.rst | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/pipelines.rst b/docs/pipelines.rst index 68686866..32c53067 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -44,23 +44,23 @@ Why can't we reuse the pipeline code in `redis-py`? In short it is almost the sa In the normal pipeline implementation in `redis-py` we can batch send all the commands and send them to the server at once, thus speeding up the code by not issuing many requests one after another. We can say that we have defined and guaranteed execution order becuase of this. -One problem that appears when you want to do pipelines in a cluster environment is that you can't have guaranteed execution order in the same way as a single server pipeline. The problem is that because you can queue an command to any key, we will end up in most of the cases having to talk to 2 or more nodes in the cluster to execute the pipeline. The problem with that is that there is no single place/node/way to send the pipeline and redis will sort everything out by itself via some internal mechanisms. Because of that when we build a pipeline for a cluster we have to build several smaller pipelines that we each send to the desegnated node in the cluster. +One problem that appears when you want to do pipelines in a cluster environment is that you can't have guaranteed execution order in the same way as a single server pipeline. The problem is that because you can queue a command to any key, we will end up in most of the cases having to talk to 2 or more nodes in the cluster to execute the pipeline. The problem with that is that there is no single place/node/way to send the pipeline and redis will sort everything out by itself via some internal mechanisms. Because of that when we build a pipeline for a cluster we have to build several smaller pipelines that we each send to the designated node in the cluster. -When the pipeline is executed in the client each key is checked to what slot it shold be sent to and the pipelines is built up based on that information. One thing to note here is that there will be partial correct execution order if you look over the entire cluster because for each pipeline the ordering will be correct. It can also be argued that the correct execution order is applied/valid for each slot in the cluster. +When the pipeline is executed in the client each key is checked to what slot it should be sent to and the pipeline is built up based on that information. One thing to note here is that there will be partial correct execution order if you look over the entire cluster because for each pipeline the ordering will be correct. It can also be argued that the correct execution order is applied/valid for each slot in the cluster. The next thing to take into consideration is what commands should be available and which should be blocked/locked. -In most cases and in almost all solutions multi key commands have to be blocked hard from beeing execute inside a pipeline. This would only be possible in the case you have a pipeline implementation that allways executes immeditally each command is queued up. That solution would only give the interface of working like a pipeline to ensure old code will still work, but it would not give any benefits or advantages other than all commands would work and old code would work. +In most cases and in almost all solutions multi key commands have to be blocked hard from being executed inside a pipeline. This would only be possible in the case you have a pipeline implementation that always executes immediately each command is queued up. That solution would only give the interface of working like a pipeline to ensure old code will still work, but it would not give any benefits or advantages other than all commands would work and old code would work. -In the solution for this lib multikey commands is blocked hard and will probably not be enabled in pipelines. If you really need to use them you need to execute them through the normal cluster client if they are implemented and works in there. Why can't multi key commands work? In short again it is because they keys can live in different slots on different nodes in the cluster. It is possible in theory to have any command work in a cluster, but only if the keys operated on belongs to the same cluster slot. This lib have decided that currently no serious support for that will be attempted. +In the solution for this lib multikey commands are blocked hard and will probably not be enabled in pipelines. If you really need to use them you need to execute them through the normal cluster client if they are implemented and work in there. Why can't multi key commands work? In short again it is because the keys can live in different slots on different nodes in the cluster. It is possible in theory to have any command work in a cluster, but only if the keys operated on belong to the same cluster slot. This lib have decided that currently no serious support for that will be attempted. Examples on commands that do not work is `MGET`, `MSET`, `MOVE`. One good thing that comes out of blocking multi key commands is that correct execution order is less of a problem and as long as it applies to each slot in the cluster we shold be fine. -Consider the following example. Create a pipeline and issue 6 commands `A`, `B`, `C`, `D`, `E`, `F` and then execute it. The pipeline is calculated and 2 sub pipelines is created with `A`, `C`, `D`, `F` in the first and `B`, `E` in the second. Both pipelines is then sent to each node in the cluster and a response is sent back. For the first node `[True, MovedException(12345), MovedException(12345), True]` and from the second node [`True`, `True`]. After this response is parsed we see that 2 commands in the first pipeline did not work and must be sent to another node. This case happens if the client slots cache is wrong because a slot was migrated to another node in the cluster. After parsing the response we then build a third pipeline object with commands [`C`, `D`] to the second node. The third object is executed and passes and from the client perspective the entire pipeline was executed. +Consider the following example. Create a pipeline and issue 6 commands `A`, `B`, `C`, `D`, `E`, `F` and then execute it. The pipeline is calculated and 2 sub pipelines is created with `A`, `C`, `D`, `F` in the first and `B`, `E` in the second. Both pipelines are then sent to each node in the cluster and a response is sent back. For the first node `[True, MovedException(12345), MovedException(12345), True]` and from the second node [`True`, `True`]. After this response is parsed we see that 2 commands in the first pipeline did not work and must be sent to another node. This case happens if the client slots cache is wrong because a slot was migrated to another node in the cluster. After parsing the response we then build a third pipeline object with commands [`C`, `D`] to the second node. The third object is executed and passes and from the client perspective the entire pipeline was executed. -If we look back at the order we executed the commands we get `[A, F]` for the first node and `[B, E, C, D]` for the second node. At first glance this looks like it is out of order because command `E` is executed before `C` & `D`. Why do this not matter? Because no multi key operations can be done in a pipeline we only have to care the execution order is correct for each slot and in this case it was because `B` & `E` belongs to the same slot and `C` & `D` belongs to the same slot. There should be no possible way to corrupt any data between slots if multi key commands is blocked by the code. +If we look back at the order we executed the commands we get `[A, F]` for the first node and `[B, E, C, D]` for the second node. At first glance this looks like it is out of order because command `E` is executed before `C` & `D`. Why is this not matter? Because no multi key operations can be done in a pipeline, we only have to care the execution order is correct for each slot and in this case it was because `B` & `E` belongs to the same slot and `C` & `D` belongs to the same slot. There should be no possible way to corrupt any data between slots if multi key commands are blocked by the code. What is good with this pipeline solution? First we can actually have a pipeline solution that will work in most cases with few commands blocked (only multi key commands). Secondly we can run it in parralel to increase the performance of the pipeline even further, making the benefits even greater. @@ -69,22 +69,22 @@ What is good with this pipeline solution? First we can actually have a pipeline Transactions and WATCH ---------------------- -Support for transactions and WATCH:es in pipelines. If we look on the entire pipeline across all nodes in the cluster there is no possible way to have a complete transaction across all nodes because if we need to issue commands to 3 servers, each server is handled by its own and there is no way to tell other nodes to abort a transaction if only one of the nodes fail but not the others. A possible solution for that could be to implement a 2 step commit process. The 2 steps would consist of building 2 batches of commands for each node where the first batch would consist of validating the state of each slot that the pipeline wants to operate on. If any of the slots is migrating or moved then the client can correct its slots cache and issue a more correct pipeline batch. The second step would be to issue the acctuall commands and the data would be commited to redis. The big problem with this is that 99% of the time this would work really well if you have a very stable cluster with no migrations/resharding/servers down. But there can be times where a slot has begun migration in between the 2 steps of the pipeline and that would cause a race condition where the client thinks it has corrected the pipeline and wants to commit the data but when it does it will still fail. +Support for transactions and WATCH:es in pipelines. If we look on the entire pipeline across all nodes in the cluster there is no possible way to have a complete transaction across all nodes because if we need to issue commands to 3 servers, each server is handled by its own and there is no way to tell other nodes to abort a transaction if only one of the nodes fail but not the others. A possible solution for that could be to implement a 2 step commit process. The 2 steps would consist of building 2 batches of commands for each node where the first batch would consist of validating the state of each slot that the pipeline wants to operate on. If any of the slots is migrating or moved then the client can correct its slots cache and issue a more correct pipeline batch. The second step would be to issue the actual commands and the data would be commited to redis. The big problem with this is that 99% of the time this would work really well if you have a very stable cluster with no migrations/resharding/servers down. But there can be times where a slot has begun migration in between the 2 steps of the pipeline and that would cause a race condition where the client thinks it has corrected the pipeline and wants to commit the data but when it does it will still fail. -Why `MULTI/EXEC` support won't work in a cluster environment. There is some test code in the second `MULTI/EXEC cluster test code` of this document that tests is `MULTI/EXEC` is possible to use in a cluster pipeline. The tests shows a huge problem when errors occus. If we wrap `MULTI/EXEC` in a packed set of commands then if a slot is migrating we will not get a good error we can parse and use. Currently it will only report `True` or `False` so we can narrow down what command failed but not why it failed. This might work really well if used on a non clustered node becuase it do not have to take care of `ASK` or `MOVED` errors. But for a cluster we need to know what cluster error occured so the correct action to fix the problem can be taken. Sinc there is more then 1 error to take care of it is not possible to take action based on just `True` or `False`. +Why `MULTI/EXEC` support won't work in a cluster environment. There is some test code in the second `MULTI/EXEC cluster test code` of this document that tests if `MULTI/EXEC` is possible to use in a cluster pipeline. The test shows a huge problem when errors occur. If we wrap `MULTI/EXEC` in a packed set of commands then if a slot is migrating we will not get a good error we can parse and use. Currently it will only report `True` or `False` so we can narrow down what command failed but not why it failed. This might work really well if used on a non clustered node becuase it does not have to take care of `ASK` or `MOVED` errors. But for a cluster we need to know what cluster error occured so the correct action to fix the problem can be taken. Since there is more then 1 error to take care of it is not possible to take action based on just `True` or `False`. -Because of this problem with error handling `MULTI/EXEC` is blocked hard in the code from beeing used in a pipeline because the current implementation can't handle the errors. +Because of this problem with error handling `MULTI/EXEC` is blocked hard in the code from being used in a pipeline because the current implementation can't handle the errors. -In theory it could be possible to design a pipeline implementation that can handle this case by trying to determined by itself what it should do with the error by either asking the cluster after a `False` value was found in the response about the current state of the slot or just default to `MOVED` error handling and hope for the best. The problem is that this is not 100% guaranteed to work and can easily cause problems when wrong action was taken on the response. +In theory it could be possible to design a pipeline implementation that can handle this case by trying to determine by itself what it should do with the error by either asking the cluster after a `False` value was found in the response about the current state of the slot or just default to `MOVED` error handling and hope for the best. The problem is that this is not 100% guaranteed to work and can easily cause problems when wrong action was taken on the response. -Currently `WATCH` requires more studying if it possible to use or not, but sinc it is tied into `MULTI/EXEC` pattern it probably will not be supported for now. +Currently `WATCH` requires more studying is it possible to use or not, but since it is tied into `MULTI/EXEC` pattern it probably will not be supported for now. MULTI/EXEC cluster test code ---------------------------- -This code do NOT wrap `MULTI/EXEC` around the commands when packed +This code does NOT wrap `MULTI/EXEC` around the commands when packed .. code-block:: python From 05d5c0f354242b0357d496d5085c023fd71dba84 Mon Sep 17 00:00:00 2001 From: leegle Date: Fri, 23 Mar 2018 17:18:09 +0800 Subject: [PATCH 012/263] Remove the magic value 'max_connections=32' --- rediscluster/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 4d6b4657..a7f05101 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -125,7 +125,7 @@ class StrictRedisCluster(StrictRedis): 'READWRITE': bool_ok, } - def __init__(self, host=None, port=None, startup_nodes=None, max_connections=32, max_connections_per_node=False, init_slot_cache=True, + def __init__(self, host=None, port=None, startup_nodes=None, max_connections=None, max_connections_per_node=False, init_slot_cache=True, readonly_mode=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, connection_class=None, **kwargs): """ From 79f993c01d2a49b3c401dc0789ae1ad8f1c215f8 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 20 May 2018 11:45:22 +0200 Subject: [PATCH 013/263] Bump requirements for redis to latest version 2.10.6 --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 0ed85c32..345a0b84 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -redis>=2.10.4 +redis>=2.10.6 diff --git a/setup.py b/setup.py index 05595b3a..525b9c57 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,7 @@ url='http://github.com/grokzen/redis-py-cluster', license='MIT', install_requires=[ - 'redis>=2.10.2' + 'redis>=2.10.6' ], keywords=[ 'redis', From 5b0b1095b2bad6c102801562580cb6a37f48d7fc Mon Sep 17 00:00:00 2001 From: Artur Stawiarski Date: Sat, 2 Jun 2018 17:35:39 +0100 Subject: [PATCH 014/263] Make `from_url` method respect the `readonly_mode` parameter to use the correct `ConnectionPool` class --- docs/authors.rst | 1 + docs/release-notes.rst | 3 ++- docs/upgrading.rst | 7 ++++++- rediscluster/client.py | 9 +++++++-- tests/test_cluster_obj.py | 4 ++++ 5 files changed, 20 insertions(+), 4 deletions(-) diff --git a/docs/authors.rst b/docs/authors.rst index 30ed940c..2b82204f 100644 --- a/docs/authors.rst +++ b/docs/authors.rst @@ -25,3 +25,4 @@ Authors who contributed code or testing: - Doug Kent - https://github.com/dkent - VascoVisser - https://github.com/VascoVisser - astrohsy - https://github.com/astrohsy + - Artur Stawiarski - https://github.com/astawiarski \ No newline at end of file diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 719bc25c..ed4e444b 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -7,6 +7,7 @@ Future Release * Add Redis 4 compatability fix to CLUSTER NODES command (See issue #217) * Fixed bug with command "CLUSTER GETKEYSINSLOT" that was throwing exceptions * Added new methods cluster_get_keys_in_slot() to client + * Fixed bug with `StrictRedisCluster.from_url` that was ignoring the `readonly_mode` parameter 1.3.4 (Mar 5, 2017) ------------------- @@ -86,7 +87,7 @@ Future Release * Implement all "CLUSTER ..." commands as methods in the client class * Client now follows the service side setting 'cluster-require-full-coverage=yes/no' (baranbartu) * Change the pubsub implementation (PUBLISH/SUBSCRIBE commands) from using one single node to now determine the hashslot for the channel name and use that to connect to - a node in the cluster. Other clients that do not use this pattern will not be fully compatible with this client. Known limitations is pattern + a node in the cluster. Other clients that do not use this pattern will not be fully compatible with this client. Known limitations is pattern subscription that do not work properly because a pattern can't know all the possible channel names in advance. * Convert all docs to ReadTheDocs * Rework connection pool logic to be more similar to redis-py. This also fixes an issue with pubsub and that connections diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 70096e29..66d20c7d 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -3,11 +3,16 @@ Upgrading redis-py-cluster This document describes what must be done when upgrading between different versions to ensure that code still works. +1.3.2 --> Next Release +---------------------- + +If you created the `StrictRedisCluster` (or `RedisCluster`) instance via the `from_url` method and were passing `readonly_mode` to it, the connection pool created will now properly allow selecting read-only slaves from the pool. Previously it always used master nodes only, even in the case of `readonly_mode=True`. Make sure your code don't attempt any write commands over connections with `readonly_mode=True`. + 1.3.1 --> 1.3.2 --------------- -If your redis instance is configured to not have the `CONFIG ...` comannds enabled due to security reasons you need to pass this into the client object `skip_full_coverage_check=True`. Benefits is that the client class no longer requires the `CONFIG ...` commands to be enabled on the server. Downsides is that you can't use the option in your redis server and still use the same feature in this client. +If your redis instance is configured to not have the `CONFIG ...` commands enabled due to security reasons you need to pass this into the client object `skip_full_coverage_check=True`. Benefits is that the client class no longer requires the `CONFIG ...` commands to be enabled on the server. Downsides is that you can't use the option in your redis server and still use the same feature in this client. diff --git a/rediscluster/client.py b/rediscluster/client.py index a7f05101..2331a8f3 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -192,7 +192,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non self.response_callbacks = dict_merge(self.response_callbacks, self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS) @classmethod - def from_url(cls, url, db=None, skip_full_coverage_check=False, **kwargs): + def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=False, **kwargs): """ Return a Redis client object configured from the given URL, which must use either `the ``redis://`` scheme @@ -212,7 +212,12 @@ def from_url(cls, url, db=None, skip_full_coverage_check=False, **kwargs): passed along to the ConnectionPool class's initializer. In the case of conflicting arguments, querystring arguments always win. """ - connection_pool = ClusterConnectionPool.from_url(url, db=db, skip_full_coverage_check=skip_full_coverage_check,**kwargs) + if readonly_mode: + connection_pool_cls = ClusterReadOnlyConnectionPool + else: + connection_pool_cls = ClusterConnectionPool + + connection_pool = connection_pool_cls.from_url(url, db=db, skip_full_coverage_check=skip_full_coverage_check, **kwargs) return cls(connection_pool=connection_pool, skip_full_coverage_check=skip_full_coverage_check) def __repr__(self): diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index c9b08084..8675df9b 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -446,3 +446,7 @@ def test_access_correct_slave_with_readonly_mode_client(sr): readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') assert return_master_mock.call_count == 0 + + readonly_client = StrictRedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) + assert b('foo') == readonly_client.get('foo16706') + assert return_master_mock.call_count == 0 From b12c3538b3b1c46e7ba7ce9f78c245a80ced5f7f Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 Jun 2018 00:40:05 +0200 Subject: [PATCH 015/263] Fixed a bug when using ssl=True would not set the correct SSLConnectionClass --- rediscluster/client.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 2331a8f3..80b7083d 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -7,7 +7,10 @@ import time # rediscluster imports -from .connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool +from .connection import ( + ClusterConnectionPool, ClusterReadOnlyConnectionPool, + SSLClusterConnection, +) from .exceptions import ( RedisClusterException, AskError, MovedError, ClusterDownError, ClusterError, TryAgainError, @@ -157,6 +160,9 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non if "db" in kwargs: raise RedisClusterException("Argument 'db' is not possible to use in cluster mode") + if kwargs.get('ssl', False): + connection_class = SSLClusterConnection + if "connection_pool" in kwargs: pool = kwargs.pop('connection_pool') else: From 8f3a844e64299646802ffd61888aa6a94cb658c4 Mon Sep 17 00:00:00 2001 From: Artem Krylysov Date: Thu, 12 Jul 2018 14:29:36 -0400 Subject: [PATCH 016/263] Try another node on TimeoutError when sending CLUSTER SLOTS --- rediscluster/nodemanager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 0db8cc60..bd79bb4f 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -10,7 +10,7 @@ # 3rd party imports from redis import StrictRedis from redis._compat import b, unicode, bytes, long, basestring -from redis import ConnectionError +from redis import ConnectionError, TimeoutError class NodeManager(object): @@ -177,7 +177,7 @@ def initialize(self): r = self.get_redis_link(host=node["host"], port=node["port"], decode_responses=True) cluster_slots = r.execute_command("cluster", "slots") startup_nodes_reachable = True - except ConnectionError: + except (ConnectionError, TimeoutError): continue except Exception: raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node)) From f5af94746a4a8be563b07a320c0cba29602833c3 Mon Sep 17 00:00:00 2001 From: Ioseph Kim Date: Tue, 26 Jun 2018 00:27:11 +0900 Subject: [PATCH 017/263] set CLUSTER GETKEYSINSLOT command as slot-id to node_flags --- rediscluster/client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/rediscluster/client.py b/rediscluster/client.py index 80b7083d..ec6cca65 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -67,6 +67,7 @@ class StrictRedisCluster(StrictRedis): ], 'random'), string_keys_to_dict([ "CLUSTER COUNTKEYSINSLOT", + "CLUSTER GETKEYSINSLOT", ], 'slot-id'), ) From 8bfe9c8cdeb89222a14b8f8deda32ae0b0b2b438 Mon Sep 17 00:00:00 2001 From: Matthew Anderson Date: Sat, 9 Jun 2018 14:08:16 -0700 Subject: [PATCH 018/263] Fix failing tests --- rediscluster/client.py | 2 +- tests/test_cluster_connection_pool.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index ec6cca65..7a1708d2 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -349,7 +349,7 @@ def execute_command(self, *args, **kwargs): if asking: node = self.connection_pool.nodes.nodes[redirect_addr] - r = self.connection_pool.get_connection_by_node(node, command) + r = self.connection_pool.get_connection_by_node(node) elif try_random_node: r = self.connection_pool.get_random_connection() try_random_node = False diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index e5e752ec..45efe007 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -243,7 +243,7 @@ def test_get_node_by_slot_random(self): """ pool = self.get_pool(connection_kwargs={}) - expected_ports = {7000, 7005} + expected_ports = {7000, 7003} actual_ports = set() for _ in range(0, 100): node = pool.get_node_by_slot_random(0) From 8e877ee3647892d94896e80ecad7cbf2cd75d03e Mon Sep 17 00:00:00 2001 From: Matthew Anderson Date: Sat, 9 Jun 2018 15:41:11 -0700 Subject: [PATCH 019/263] Refresh node table when using specific nodes --- docs/authors.rst | 3 ++- docs/release-notes.rst | 2 ++ rediscluster/client.py | 14 +++++++---- rediscluster/connection.py | 1 + rediscluster/exceptions.py | 5 ++++ rediscluster/nodemanager.py | 12 ++++++---- tests/test_cluster_obj.py | 46 +++++++++++++++++++++++++++++++++++++ 7 files changed, 73 insertions(+), 10 deletions(-) diff --git a/docs/authors.rst b/docs/authors.rst index 2b82204f..e2464a2a 100644 --- a/docs/authors.rst +++ b/docs/authors.rst @@ -25,4 +25,5 @@ Authors who contributed code or testing: - Doug Kent - https://github.com/dkent - VascoVisser - https://github.com/VascoVisser - astrohsy - https://github.com/astrohsy - - Artur Stawiarski - https://github.com/astawiarski \ No newline at end of file + - Artur Stawiarski - https://github.com/astawiarski + - Matthew Anderson - https://github.com/mc3ander diff --git a/docs/release-notes.rst b/docs/release-notes.rst index ed4e444b..40992dd4 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -8,6 +8,8 @@ Future Release * Fixed bug with command "CLUSTER GETKEYSINSLOT" that was throwing exceptions * Added new methods cluster_get_keys_in_slot() to client * Fixed bug with `StrictRedisCluster.from_url` that was ignoring the `readonly_mode` parameter + * NodeManager will now ignore nodes showing cluster errors when initializing the cluster + * Fix bug where RedisCluster wouldn't refresh the cluster table when executing commands on specific nodes 1.3.4 (Mar 5, 2017) ------------------- diff --git a/rediscluster/client.py b/rediscluster/client.py index 7a1708d2..773c1cfd 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -328,15 +328,15 @@ def execute_command(self, *args, **kwargs): command = args[0] - node = self.determine_node(*args, **kwargs) - if node: - return self._execute_command_on_nodes(node, *args, **kwargs) - # If set externally we must update it before calling any commands if self.refresh_table_asap: self.connection_pool.nodes.initialize() self.refresh_table_asap = False + node = self.determine_node(*args, **kwargs) + if node: + return self._execute_command_on_nodes(node, *args, **kwargs) + redirect_addr = None asking = False @@ -423,6 +423,12 @@ def _execute_command_on_nodes(self, nodes, *args, **kwargs): connection.send_command(*args) res[node["name"]] = self.parse_response(connection, command, **kwargs) + except ClusterDownError as e: + self.connection_pool.disconnect() + self.connection_pool.reset() + self.refresh_table_asap = True + + raise finally: self.connection_pool.release(connection) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index a5e19bb1..2624f580 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -12,6 +12,7 @@ from .exceptions import ( RedisClusterException, AskError, MovedError, TryAgainError, ClusterDownError, ClusterCrossSlotError, + MasterDownError, ) # 3rd party imports diff --git a/rediscluster/exceptions.py b/rediscluster/exceptions.py index 5019c98e..ea4a60a0 100644 --- a/rediscluster/exceptions.py +++ b/rediscluster/exceptions.py @@ -74,3 +74,8 @@ class MovedError(AskError): """ """ pass + +class MasterDownError(ClusterDownError): + """ + """ + pass diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index bd79bb4f..b16877d1 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -10,7 +10,7 @@ # 3rd party imports from redis import StrictRedis from redis._compat import b, unicode, bytes, long, basestring -from redis import ConnectionError, TimeoutError +from redis import ConnectionError, TimeoutError, ResponseError class NodeManager(object): @@ -154,10 +154,6 @@ def get_redis_link(self, host, port, decode_responses=False): def initialize(self): """ Init the slots cache by asking all startup nodes what the current cluster configuration is - - TODO: Currently the last node will have the last say about how the configuration is setup. - Maybe it should stop to try after it have correctly covered all slots or when one node is reached - and it could execute CLUSTER SLOTS command. """ nodes_cache = {} tmp_slots = {} @@ -179,6 +175,12 @@ def initialize(self): startup_nodes_reachable = True except (ConnectionError, TimeoutError): continue + except ResponseError as e: + # Isn't a cluster connection, so it won't parse these exceptions automatically + if 'CLUSTERDOWN' in e.message or 'MASTERDOWN' in e.message: + continue + else: + raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node)) except Exception: raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node)) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 8675df9b..7b6f90a4 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -450,3 +450,49 @@ def test_access_correct_slave_with_readonly_mode_client(sr): readonly_client = StrictRedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') assert return_master_mock.call_count == 0 + + +def test_refresh_using_specific_nodes(r): + """ + Test making calls on specific nodes when the cluster has failed over to + another node + """ + with patch.object(StrictRedisCluster, 'parse_response') as parse_response_mock: + with patch.object(NodeManager, 'initialize', autospec=True) as init_mock: + # simulate 7006 as a failed node + def side_effect(self, *args, **kwargs): + if self.port == 7006: + raise ClusterDownError('CLUSTERDOWN The cluster is down. Use CLUSTER INFO for more information') + + def side_effect_rebuild_slots_cache(self): + # start with all slots mapped to 7006 + self.nodes = {'127.0.0.1:7006': {'host': '127.0.0.1', 'server_type': 'master', 'port': 7006, 'name': '127.0.0.1:7006'}} + self.slots = {} + + for i in range(0, 16383): + self.slots[i] = [{ + 'host': '127.0.0.1', + 'server_type': 'master', + 'port': 7006, + 'name': '127.0.0.1:7006', + }] + + # After the first connection fails, a reinitialize should follow the cluster to 7007 + def map_7007(self): + self.nodes = {'127.0.0.1:7007': {'host': '127.0.0.1', 'server_type': 'master', 'port': 7007, 'name': '127.0.0.1:7007'}} + self.slots = {} + + for i in range(0, 16383): + self.slots[i] = [{ + 'host': '127.0.0.1', + 'server_type': 'master', + 'port': 7007, + 'name': '127.0.0.1:7007', + }] + init_mock.side_effect = map_7007 + + parse_response_mock.side_effect = side_effect + init_mock.side_effect = side_effect_rebuild_slots_cache + + rc = StrictRedisCluster(host='127.0.0.1', port=7006) + rc.ping() From bef416bd4c716a9362d226261d692754bb3d5cb3 Mon Sep 17 00:00:00 2001 From: Matthew Anderson Date: Sun, 10 Jun 2018 14:47:02 -0700 Subject: [PATCH 020/263] Add detail to test_refresh_using_specific_nodes Also add MasterDownError to exception parser --- rediscluster/connection.py | 1 + tests/test_cluster_obj.py | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 2624f580..4a773486 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -32,6 +32,7 @@ class ClusterParser(DefaultParser): 'MOVED': MovedError, 'CLUSTERDOWN': ClusterDownError, 'CROSSSLOT': ClusterCrossSlotError, + 'MASTERDOWN': MasterDownError, }) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 7b6f90a4..2e715f25 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -462,7 +462,10 @@ def test_refresh_using_specific_nodes(r): # simulate 7006 as a failed node def side_effect(self, *args, **kwargs): if self.port == 7006: + parse_response_mock.failed_calls += 1 raise ClusterDownError('CLUSTERDOWN The cluster is down. Use CLUSTER INFO for more information') + elif self.port == 7007: + parse_response_mock.successful_calls += 1 def side_effect_rebuild_slots_cache(self): # start with all slots mapped to 7006 @@ -492,7 +495,19 @@ def map_7007(self): init_mock.side_effect = map_7007 parse_response_mock.side_effect = side_effect + parse_response_mock.successful_calls = 0 + parse_response_mock.failed_calls = 0 + init_mock.side_effect = side_effect_rebuild_slots_cache rc = StrictRedisCluster(host='127.0.0.1', port=7006) + assert len(rc.connection_pool.nodes.nodes) == 1 + assert '127.0.0.1:7006' in rc.connection_pool.nodes.nodes + rc.ping() + + # Cluster should now point to 7006, and there should be one failed and one succesful call + assert len(rc.connection_pool.nodes.nodes) == 1 + assert '127.0.0.1:7007' in rc.connection_pool.nodes.nodes + assert parse_response_mock.failed_calls == 1 + assert parse_response_mock.successful_calls == 1 From 5028dc571d2c8c84121956c119d536f9ac782033 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Jul 2018 20:23:55 +0200 Subject: [PATCH 021/263] Add redis 5.0 branch to travis tests --- .travis.yml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index 14c52791..3a397203 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,22 +13,23 @@ install: - "if [[ $REDIS_VERSION == '3.0' ]]; then REDIS_VERSION=3.0 make redis-install; fi" - "if [[ $REDIS_VERSION == '3.2' ]]; then REDIS_VERSION=3.2 make redis-install; fi" - "if [[ $REDIS_VERSION == '4.0' ]]; then REDIS_VERSION=4.0 make redis-install; fi" + - "if [[ $REDIS_VERSION == '5.0' ]]; then REDIS_VERSION=5.0 make redis-install; fi" - pip install -r dev-requirements.txt - pip install -e . - "if [[ $HIREDIS == '1' ]]; then pip install hiredis; fi" env: - # Redis 3.0 + # Redis 3.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=3.0 - # Redis 3.0 and HIREDIS - HIREDIS=1 REDIS_VERSION=3.0 - # Redis 3.2 + # Redis 3.2 & HIREDIS - HIREDIS=0 REDIS_VERSION=3.2 - # Redis 3.2 and HIREDIS - HIREDIS=1 REDIS_VERSION=3.2 - # Redis 4.0 + # Redis 4.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=4.0 - # Redis 4.0 and HIREDIS - HIREDIS=1 REDIS_VERSION=4.0 + # Redis 5.0 & HIREDIS + - HIREDIS=0 REDIS_VERSION=5.0 + - HIREDIS=1 REDIS_VERSION=5.0 script: - make start - coverage erase From bcdb6e6fd7a6f31b7d45f1f0cbb18053fb81ae85 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Jul 2018 20:28:12 +0200 Subject: [PATCH 022/263] Move some docs from threads to pipelines documentation --- docs/pipelines.rst | 7 +++++++ docs/threads.rst | 9 --------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/docs/pipelines.rst b/docs/pipelines.rst index fd035c28..94fad872 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -65,6 +65,13 @@ If we look back at the order we executed the commands we get `[A, F]` for the fi What is good with this pipeline solution? First we can actually have a pipeline solution that will work in most cases with few commands blocked (only multi key commands). Secondly we can run it in parallel to increase the performance of the pipeline even further, making the benefits even greater. +Packing Commands +---------------- + +When issuing only a single command, there is only one network round trip to be made. But what if you issue 100 pipelined commands? In a single-instance redis configuration, you still only need to make one network hop. The commands are packed into a single request and the server responds with all the data for those requests in a single response. But with redis cluster, those keys could be spread out over many different nodes. + +The client is responsible for figuring out which commands map to which nodes. Let's say for example that your 100 pipelined commands need to route to 3 different nodes? The first thing the client does is break out the commands that go to each node, so it only has 3 network requests to make instead of 100. + Transactions and WATCH ---------------------- diff --git a/docs/threads.rst b/docs/threads.rst index c5af63dc..d6f2d869 100644 --- a/docs/threads.rst +++ b/docs/threads.rst @@ -13,15 +13,6 @@ The advantage to this design is that a smart client can communicate with the clu -Packing Commands ----------------- - -When issuing only a single command, there is only one network round trip to be made. But what if you issue 100 pipelined commands? In a single-instance redis configuration, you still only need to make one network hop. The commands are packed into a single request and the server responds with all the data for those requests in a single response. But with redis cluster, those keys could be spread out over many different nodes. - -The client is responsible for figuring out which commands map to which nodes. Let's say for example that your 100 pipelined commands need to route to 3 different nodes? The first thing the client does is break out the commands that go to each node, so it only has 3 network requests to make instead of 100. - - - Parallel network i/o using threads ---------------------------------- From a1632cd72bd90db6fde741c9d6b95dfa162299dc Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Jul 2018 20:30:06 +0200 Subject: [PATCH 023/263] Update redis version to 4.0.10 in Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e44f0787..e0fbac7a 100644 --- a/Makefile +++ b/Makefile @@ -216,7 +216,7 @@ ifndef REDIS_TRIB_RB endif ifndef REDIS_VERSION - REDIS_VERSION=4.0.8 + REDIS_VERSION=4.0.10 endif export REDIS_CLUSTER_NODE1_CONF From 1bb580172b91cec4d635feeb4691de34f5e8b585 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Jul 2018 20:36:14 +0200 Subject: [PATCH 024/263] Prepare release notes for next release 1.3.5 --- docs/release-notes.rst | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 40992dd4..6d40994b 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,7 +1,7 @@ Release Notes ============= -Future Release +1.3.5 (July 22, 2018) -------------- * Add Redis 4 compatability fix to CLUSTER NODES command (See issue #217) @@ -10,6 +10,16 @@ Future Release * Fixed bug with `StrictRedisCluster.from_url` that was ignoring the `readonly_mode` parameter * NodeManager will now ignore nodes showing cluster errors when initializing the cluster * Fix bug where RedisCluster wouldn't refresh the cluster table when executing commands on specific nodes + * Add redis 5.0 to travis-ci tests + * Change default redis version from 3.0.7 to 4.0.10 + * Increase accepted ranges of dependencies specefied in dev-requirements.txt + * Several major and minor documentation updates and tweaks + * Add example script "from_url_password_protected.py" + * command "CLUSTER GETKEYSINSLOT" is now returned as a list and not int + * Improve support for ssl connections + * Retry on Timeout errors when doing cluster discovery + * Added new error class "MasterDownError" + * Updated requirements for dependency of redis-py to latest version 1.3.4 (Mar 5, 2017) ------------------- From 8fe8b718a3f4b50ae0f4144205dc6d9f504e7ffc Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Jul 2018 20:36:55 +0200 Subject: [PATCH 025/263] Bump version to 1.3.5 --- rediscluster/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index c7d0a6d3..7bf3c76e 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -16,7 +16,7 @@ setattr(redis, "StrictClusterPipeline", StrictClusterPipeline) # Major, Minor, Fix version -__version__ = (1, 3, 4) +__version__ = (1, 3, 5) if sys.version_info[0:3] == (3, 4, 0): raise RuntimeError("CRITICAL: rediscluster do not work with python 3.4.0. Please use 3.4.1 or higher.") diff --git a/setup.py b/setup.py index 525b9c57..22ee03a9 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="1.3.4", + version="1.3.5", description="Cluster library for redis 3.0.0 built on top of redis-py lib", long_description=readme + '\n\n' + history, author="Johan Andersson", From 973998927cfdfe9cbd6938c90611d152ac6b6cc3 Mon Sep 17 00:00:00 2001 From: Jeff Widman Date: Thu, 11 Oct 2018 20:18:51 -0700 Subject: [PATCH 026/263] Do not hardcode redis version in description Now that redis 4 and 5 are supported, the 3.0.0 is no longer true. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 22ee03a9..67ec1e93 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ setup( name="redis-py-cluster", version="1.3.5", - description="Cluster library for redis 3.0.0 built on top of redis-py lib", + description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, author="Johan Andersson", author_email="Grokzen@gmail.com", From a49d21a0d1d4f71cdf17142d1334f3206618162c Mon Sep 17 00:00:00 2001 From: Jeff Widman Date: Thu, 11 Oct 2018 20:12:42 -0700 Subject: [PATCH 027/263] Fix spelling, typos, grammar, etc --- docs/benchmarks.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/benchmarks.rst b/docs/benchmarks.rst index 7c25d18f..ec64b290 100644 --- a/docs/benchmarks.rst +++ b/docs/benchmarks.rst @@ -1,7 +1,7 @@ Benchmarks ========== -There is a few benchmarks that is designed to test specific parts of the code that will show how big of a performance difference there is between using this lib and the normal Redis client. +These are a few benchmarks that are designed to test specific parts of the code to demonstrate the performance difference between using this lib and the normal Redis client. @@ -16,21 +16,21 @@ Install with pip install -e . -You also need a few redis servers to test against. It is required to have 1 cluster with atleast one node on port `7001` and it also required to have a non-clustered server on port `7007`. +You also need a few redis servers to test against. You must have one cluster with at least one node on port `7001` and you must also have a non-clustered server on port `7007`. -Implemented Bencmarks +Implemented benchmarks --------------------- -- `simple.py`, This benchmark can be used to messure a simple `set` and `get` operation chain. It also support running pipelines bu adding the flag `--pipeline` +- `simple.py`, This benchmark can be used to measure a simple `set` and `get` operation chain. It also supports running pipelines by adding the flag `--pipeline`. Run predefined benchmarks ------------------------- -There is a set of predefined benchmarks that can be runned to messure performance drop from using this library. +These are a set of predefined benchmarks that can be run to measure the performance drop from using this library. To run the benchmarks run From 813b9346ed2923eae83e14f18c9a0ab3933638a2 Mon Sep 17 00:00:00 2001 From: Jeff Widman Date: Tue, 16 Oct 2018 00:47:16 -0700 Subject: [PATCH 028/263] Use https + .io domain for RTD --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1d3b6f23..87b53d17 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ Gitter chat room: [![Gitter](https://badges.gitter.im/Grokzen/redis-py-cluster.s # Documentation -All documentation can be found at http://redis-py-cluster.readthedocs.org/en/master +All documentation can be found at https://redis-py-cluster.readthedocs.io/en/master This Readme contains a reduced version of the full documentation. From 01e2c4632f469b4567e47712add0bfaaceaa2830 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 16 Nov 2018 15:51:20 +0100 Subject: [PATCH 029/263] Pin redis-py version to 2.10.6. Update version to 1.3.6 and add note to release notes --- docs/release-notes.rst | 8 +++++++- rediscluster/__init__.py | 2 +- requirements.txt | 2 +- setup.py | 4 ++-- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 6d40994b..e0c89498 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,8 +1,14 @@ Release Notes ============= +1.3.6 (Nov 16, 2018) +-------------------- + + * Pin upstream redis-py package to release 2.10.6 to avoid issues with incompatible version 3.0.0 + + 1.3.5 (July 22, 2018) --------------- +--------------------- * Add Redis 4 compatability fix to CLUSTER NODES command (See issue #217) * Fixed bug with command "CLUSTER GETKEYSINSLOT" that was throwing exceptions diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 7bf3c76e..cb3d40f0 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -16,7 +16,7 @@ setattr(redis, "StrictClusterPipeline", StrictClusterPipeline) # Major, Minor, Fix version -__version__ = (1, 3, 5) +__version__ = (1, 3, 6) if sys.version_info[0:3] == (3, 4, 0): raise RuntimeError("CRITICAL: rediscluster do not work with python 3.4.0. Please use 3.4.1 or higher.") diff --git a/requirements.txt b/requirements.txt index 345a0b84..91015469 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -redis>=2.10.6 +redis==2.10.6 diff --git a/setup.py b/setup.py index 67ec1e93..641674c3 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="1.3.5", + version="1.3.6", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, author="Johan Andersson", @@ -31,7 +31,7 @@ url='http://github.com/grokzen/redis-py-cluster', license='MIT', install_requires=[ - 'redis>=2.10.6' + 'redis==2.10.6' ], keywords=[ 'redis', From 4e11b2dac5573b2bb655e23e9e648a9aff3edb97 Mon Sep 17 00:00:00 2001 From: kai Date: Fri, 14 Sep 2018 22:11:13 +0800 Subject: [PATCH 030/263] Improve benchmark script. Add [-n ] and [-c ] options, multi clients test use multi processes modle --- benchmarks/simple.py | 106 +++++++++++++++++++++---------------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/benchmarks/simple.py b/benchmarks/simple.py index 9c4dca89..44bce0c0 100644 --- a/benchmarks/simple.py +++ b/benchmarks/simple.py @@ -1,11 +1,26 @@ -# -*- coding: utf-8 -*- +#!/bin/env python3 +""" +Usage: + redis-cluster-benchmark.py [--host IP] [--port PORT] [--nocluster] [-n ] [--timeit] [--pipeline] [--resetlastkey] [-c ] [-h] [--version] -# python std lib +Options: + -c concurrent client number [default: 1] + -n request number [default: 100000] + --nocluster If flag is set then StrictRedis will be used instead of cluster lib + --host IP Redis server to test against [default: 127.0.0.1] + --port PORT Port on redis server [default: 6379] + --timeit run a mini benchmark to test performance + --pipeline Only usable with --timeit flag. Runs SET/GET inside pipelines. + --resetlastkey reset __last__ key + -h --help show this help and exit + -v --version show version and exit +""" import time +from multiprocessing import Process # 3rd party imports from docopt import docopt -from redis._compat import xrange +# from redis._compat import xrange def loop(rc, reset_last_key=None): @@ -25,7 +40,7 @@ def loop(rc, reset_last_key=None): print("error {0}".format(e)) time.sleep(1) - for i in xrange(last, 1000000000): # noqa + for i in range(last, 1000000000): # noqa try: print("SET foo{0} {1}".format(i, i)) rc.set("foo{0}".format(i), i) @@ -38,78 +53,63 @@ def loop(rc, reset_last_key=None): time.sleep(0.05) -def timeit(rc, itterations=50000): +def timeit(rc, num): """ Time how long it take to run a number of set/get:s """ - t0 = time.time() - for i in xrange(0, itterations): # noqa + # t0 = time.time() + for i in range(0, num): # noqa s = "foo{0}".format(i) rc.set(s, i) rc.get(s) - - t1 = time.time() - t0 - print("{0}k SET/GET operations took: {1} seconds... {2} operations per second".format((itterations / 1000) * 2, t1, (itterations / t1) * 2)) + # t1 = time.time() - t0 + # print("{0}k SET/GET operations took: {1} seconds... {2} operations per second".format((num / 1000) * 2, t1, (num / t1) * 2)) -def timeit_pipeline(rc, itterations=50000): +def timeit_pipeline(rc, num): """ Time how long it takes to run a number of set/get:s inside a cluster pipeline """ - t0 = time.time() - for i in xrange(0, itterations): # noqa + # t0 = time.time() + for i in range(0, num): # noqa s = "foo{0}".format(i) - p = rc.pipeline() p.set(s, i) p.get(s) p.execute() - - t1 = time.time() - t0 - print("{0}k SET/GET operations inside pipelines took: {1} seconds... {2} operations per second".format( - (itterations / 1000) * 2, t1, (itterations / t1) * 2) - ) + # t1 = time.time() - t0 + # print("{0}k SET/GET operations inside pipelines took: {1} seconds... {2} operations per second".format((num / 1000) * 2, t1, (num / t1) * 2)) if __name__ == "__main__": - __docopt__ = """ -Usage: - simple [--host IP] [--port PORT] [--nocluster] [--timeit] [--pipeline] [--resetlastkey] [-h] [--version] - -Options: - --nocluster If flag is set then StrictRedis will be used instead of cluster lib - --host IP Redis server to test against [default: 127.0.0.1] - --port PORT Port on redis server [default: 7000] - --timeit run a mini benchmark to test performance - --pipeline Only usable with --timeit flag. Runs SET/GET inside pipelines. - --resetlastkey reset __last__ key - -h --help show this help and exit - -v --version show version and exit - """ - - args = docopt(__docopt__, version="0.3.0") - - startup_nodes = [{"host": args["--host"], "port": args["--port"]}] - + args = docopt(__doc__, version="0.3.1") + # print(args) + #startup_nodes = [{"host": '172.16.166.31', "port": 6379}, {"host": '172.16.166.32', "port": 6379}, {"host": '172.16.166.33', "port": 6379}] + startup_nodes = [{"host": args['--host'], "port": args['--port']}] if not args["--nocluster"]: from rediscluster import StrictRedisCluster rc = StrictRedisCluster(startup_nodes=startup_nodes, max_connections=32, socket_timeout=0.1, decode_responses=True) + # print(rc) else: from redis import StrictRedis rc = StrictRedis(host=args["--host"], port=args["--port"], socket_timeout=0.1, decode_responses=True) - - if args["--timeit"]: - test_itterstions = [ - 5000, - 10000, - 20000, - ] - - if args["--pipeline"]: - for itterations in test_itterstions: - timeit_pipeline(rc, itterations=itterations) + # print(rc) + # create specified number processes + processes = [] + single_request = int(args["-n"]) // int(args["-c"]) + for j in range(int(args["-c"])): + if args["--timeit"]: + if args["--pipeline"]: + p = Process(target=timeit_pipeline, args=(rc, single_request)) + else: + p = Process(target=timeit, args=(rc, single_request)) else: - for itterations in test_itterstions: - timeit(rc, itterations=itterations) - else: - loop(rc, reset_last_key=args["--resetlastkey"]) + p = Process(target=loop, args=(rc, args["--resetlastkey"])) + processes.append(p) + t1 = time.time() + for p in processes: + p.start() + for p in processes: + p.join() + t2 = time.time() - t1 + print("{0}k SET/GET operations took: {1} seconds... {2} operations per second".format(int(args["-n"]) / 1000 * 2, t2, int(args["-n"]) / t2 * 2)) \ No newline at end of file From fb6bda314d14fc69d1f3c1f14f02cf271131782f Mon Sep 17 00:00:00 2001 From: kai Date: Sun, 7 Oct 2018 19:37:34 +0800 Subject: [PATCH 031/263] clean up benchmarks/simple.py --- benchmarks/simple.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/benchmarks/simple.py b/benchmarks/simple.py index 44bce0c0..a3b918d5 100644 --- a/benchmarks/simple.py +++ b/benchmarks/simple.py @@ -5,7 +5,7 @@ Options: -c concurrent client number [default: 1] - -n request number [default: 100000] + -n request number [default: 100000] --nocluster If flag is set then StrictRedis will be used instead of cluster lib --host IP Redis server to test against [default: 127.0.0.1] --port PORT Port on redis server [default: 6379] @@ -17,10 +17,8 @@ """ import time from multiprocessing import Process - # 3rd party imports from docopt import docopt -# from redis._compat import xrange def loop(rc, reset_last_key=None): @@ -57,43 +55,33 @@ def timeit(rc, num): """ Time how long it take to run a number of set/get:s """ - # t0 = time.time() for i in range(0, num): # noqa s = "foo{0}".format(i) rc.set(s, i) rc.get(s) - # t1 = time.time() - t0 - # print("{0}k SET/GET operations took: {1} seconds... {2} operations per second".format((num / 1000) * 2, t1, (num / t1) * 2)) def timeit_pipeline(rc, num): """ Time how long it takes to run a number of set/get:s inside a cluster pipeline """ - # t0 = time.time() for i in range(0, num): # noqa s = "foo{0}".format(i) p = rc.pipeline() p.set(s, i) p.get(s) p.execute() - # t1 = time.time() - t0 - # print("{0}k SET/GET operations inside pipelines took: {1} seconds... {2} operations per second".format((num / 1000) * 2, t1, (num / t1) * 2)) if __name__ == "__main__": args = docopt(__doc__, version="0.3.1") - # print(args) - #startup_nodes = [{"host": '172.16.166.31', "port": 6379}, {"host": '172.16.166.32', "port": 6379}, {"host": '172.16.166.33', "port": 6379}] startup_nodes = [{"host": args['--host'], "port": args['--port']}] if not args["--nocluster"]: from rediscluster import StrictRedisCluster rc = StrictRedisCluster(startup_nodes=startup_nodes, max_connections=32, socket_timeout=0.1, decode_responses=True) - # print(rc) else: from redis import StrictRedis rc = StrictRedis(host=args["--host"], port=args["--port"], socket_timeout=0.1, decode_responses=True) - # print(rc) # create specified number processes processes = [] single_request = int(args["-n"]) // int(args["-c"]) From 441b7b8aa71327bef86259362bbdf403a43d6282 Mon Sep 17 00:00:00 2001 From: kai Date: Tue, 9 Oct 2018 22:54:17 +0800 Subject: [PATCH 032/263] update benchmarks.rst --- docs/benchmarks.rst | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/docs/benchmarks.rst b/docs/benchmarks.rst index ec64b290..f0415967 100644 --- a/docs/benchmarks.rst +++ b/docs/benchmarks.rst @@ -42,26 +42,18 @@ Example output and comparison of different runmodes .. code-block:: - -- Running Simple benchmark with StrictRedis lib and non cluster server -- - python benchmarks/simple.py --port 7007 --timeit --nocluster - 10.0k SET/GET operations took: 0.9711470603942871 seconds... 10297.10165208139 operations per second - 20.0k SET/GET operations took: 1.9136295318603516 seconds... 10451.343725113202 operations per second - 40.0k SET/GET operations took: 3.8409764766693115 seconds... 10414.018477584079 operations per second - - -- Running Simple benchmark with StrictRedisCluster lib and cluster server -- - python benchmarks/simple.py --port 7001 --timeit - 10.0k SET/GET operations took: 0.760077714920044 seconds... 13156.549394494412 operations per second - 20.0k SET/GET operations took: 1.5251967906951904 seconds... 13113.061948474155 operations per second - 40.0k SET/GET operations took: 3.05112361907959 seconds... 13109.924406165655 operations per second + -- Running Simple benchmark with StrictRedis lib and non cluster server, 50 concurrent processes and total 50000*2 requests -- + python benchmarks/simple.py --host 172.16.166.31 --timeit --nocluster -c 50 -n 50000 + 100.0k SET/GET operations took: 2.45 seconds... 40799.93 operations per second + + -- Running Simple benchmark with StrictRedisCluster lib and cluster server, 50 concurrent processes and total 50000*2 requests -- + python benchmarks/simple.py --host 172.16.166.31 --timeit -c 50 -n 50000 + 100.0k SET/GET operations took: 9.51 seconds... 31513.71 operations per second -- Running Simple benchmark with pipelines & StrictRedis lib and non cluster server -- - python benchmarks/simple.py --port 7007 --timeit --pipeline --nocluster - 10.0k SET/GET operations inside pipelines took: 0.8831894397735596 seconds... 11322.599149921782 operations per second - 20.0k SET/GET operations inside pipelines took: 1.6283915042877197 seconds... 12282.058674058404 operations per second - 40.0k SET/GET operations inside pipelines took: 3.2882907390594482 seconds... 12164.374495498905 operations per second + python benchmarks/simple.py --host 172.16.166.31 --timeit --nocluster -c 50 -n 50000 --pipeline + 100.0k SET/GET operations took: 2.1728243827819824 seconds... 46023.047602201834 operations per second -- Running Simple benchmark with StrictRedisCluster lib and cluster server - python benchmarks/simple.py --port 7001 --timeit --pipeline - 10.0k SET/GET operations inside pipelines took: 0.709221601486206 seconds... 14099.965340937933 operations per second - 20.0k SET/GET operations inside pipelines took: 1.3776116371154785 seconds... 14517.879684783395 operations per second - 40.0k SET/GET operations inside pipelines took: 2.794893980026245 seconds... 14311.813001087214 operations per second + python benchmarks/simple.py --host 172.16.166.31 --timeit -c 50 -n 50000 --pipeline + 100.0k SET/GET operations took: 1.7181339263916016 seconds... 58202.68051514381 operations per second \ No newline at end of file From 793734cb58e664393463760a06834ba429cce555 Mon Sep 17 00:00:00 2001 From: kai Date: Mon, 15 Oct 2018 23:24:43 +0800 Subject: [PATCH 033/263] typo error fix & reformat help information refer to the redis-cli & eliminate ambiguity of the output --- benchmarks/simple.py | 32 +++++++++++++++++--------------- docs/benchmarks.rst | 16 ++++++++-------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/benchmarks/simple.py b/benchmarks/simple.py index a3b918d5..1aac44b9 100644 --- a/benchmarks/simple.py +++ b/benchmarks/simple.py @@ -1,20 +1,22 @@ -#!/bin/env python3 +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- """ Usage: - redis-cluster-benchmark.py [--host IP] [--port PORT] [--nocluster] [-n ] [--timeit] [--pipeline] [--resetlastkey] [-c ] [-h] [--version] + redis-cluster-benchmark.py [--host ] [-p ] [-n ] [-c ] [--nocluster] [--timeit] [--pipeline] [--resetlastkey] [-h] [--version] Options: - -c concurrent client number [default: 1] - -n request number [default: 100000] + --host Redis server to test against [default: 127.0.0.1] + -p Port on redis server [default: 7000] + -n Request number [default: 100000] + -c Concurrent client number [default: 1] --nocluster If flag is set then StrictRedis will be used instead of cluster lib - --host IP Redis server to test against [default: 127.0.0.1] - --port PORT Port on redis server [default: 6379] - --timeit run a mini benchmark to test performance + --timeit Run a mini benchmark to test performance --pipeline Only usable with --timeit flag. Runs SET/GET inside pipelines. - --resetlastkey reset __last__ key - -h --help show this help and exit - -v --version show version and exit + --resetlastkey Reset __last__ key + -h --help Output this help and exit + --version Output version and exit """ + import time from multiprocessing import Process # 3rd party imports @@ -55,7 +57,7 @@ def timeit(rc, num): """ Time how long it take to run a number of set/get:s """ - for i in range(0, num): # noqa + for i in range(0, num//2): # noqa s = "foo{0}".format(i) rc.set(s, i) rc.get(s) @@ -65,7 +67,7 @@ def timeit_pipeline(rc, num): """ Time how long it takes to run a number of set/get:s inside a cluster pipeline """ - for i in range(0, num): # noqa + for i in range(0, num//2): # noqa s = "foo{0}".format(i) p = rc.pipeline() p.set(s, i) @@ -75,13 +77,13 @@ def timeit_pipeline(rc, num): if __name__ == "__main__": args = docopt(__doc__, version="0.3.1") - startup_nodes = [{"host": args['--host'], "port": args['--port']}] + startup_nodes = [{"host": args['--host'], "port": args['-p']}] if not args["--nocluster"]: from rediscluster import StrictRedisCluster rc = StrictRedisCluster(startup_nodes=startup_nodes, max_connections=32, socket_timeout=0.1, decode_responses=True) else: from redis import StrictRedis - rc = StrictRedis(host=args["--host"], port=args["--port"], socket_timeout=0.1, decode_responses=True) + rc = StrictRedis(host=args["--host"], port=args["-p"], socket_timeout=0.1, decode_responses=True) # create specified number processes processes = [] single_request = int(args["-n"]) // int(args["-c"]) @@ -100,4 +102,4 @@ def timeit_pipeline(rc, num): for p in processes: p.join() t2 = time.time() - t1 - print("{0}k SET/GET operations took: {1} seconds... {2} operations per second".format(int(args["-n"]) / 1000 * 2, t2, int(args["-n"]) / t2 * 2)) \ No newline at end of file + print("Tested {0}k SET & GET (each 50%) operations took: {1} seconds... {2} operations per second".format(int(args["-n"]) / 1000, t2, int(args["-n"]) / t2 * 2)) \ No newline at end of file diff --git a/docs/benchmarks.rst b/docs/benchmarks.rst index f0415967..dc2dc770 100644 --- a/docs/benchmarks.rst +++ b/docs/benchmarks.rst @@ -43,17 +43,17 @@ Example output and comparison of different runmodes .. code-block:: -- Running Simple benchmark with StrictRedis lib and non cluster server, 50 concurrent processes and total 50000*2 requests -- - python benchmarks/simple.py --host 172.16.166.31 --timeit --nocluster -c 50 -n 50000 - 100.0k SET/GET operations took: 2.45 seconds... 40799.93 operations per second + python benchmarks/simple.py --host 127.0.0.1 --timeit --nocluster -c 50 -n 50000 + 50.0k SET/GET operations took: 2.45 seconds... 40799.93 operations per second -- Running Simple benchmark with StrictRedisCluster lib and cluster server, 50 concurrent processes and total 50000*2 requests -- - python benchmarks/simple.py --host 172.16.166.31 --timeit -c 50 -n 50000 - 100.0k SET/GET operations took: 9.51 seconds... 31513.71 operations per second + python benchmarks/simple.py --host 127.0.0.1 --timeit -c 50 -n 50000 + 50.0k SET & GET (each 50%) operations took: 9.51 seconds... 31513.71 operations per second -- Running Simple benchmark with pipelines & StrictRedis lib and non cluster server -- - python benchmarks/simple.py --host 172.16.166.31 --timeit --nocluster -c 50 -n 50000 --pipeline - 100.0k SET/GET operations took: 2.1728243827819824 seconds... 46023.047602201834 operations per second + python benchmarks/simple.py --host 127.0.0.1 --timeit --nocluster -c 50 -n 50000 --pipeline + 50.0k SET & GET (each 50%) operations took: 2.1728243827819824 seconds... 46023.047602201834 operations per second -- Running Simple benchmark with StrictRedisCluster lib and cluster server - python benchmarks/simple.py --host 172.16.166.31 --timeit -c 50 -n 50000 --pipeline - 100.0k SET/GET operations took: 1.7181339263916016 seconds... 58202.68051514381 operations per second \ No newline at end of file + python benchmarks/simple.py --host 127.0.0.1 --timeit -c 50 -n 50000 --pipeline + 50.0k SET & GET (each 50%) operations took: 1.7181339263916016 seconds... 58202.68051514381 operations per second \ No newline at end of file From 4a499a9d13b856675071ea2c101426d7aacba310 Mon Sep 17 00:00:00 2001 From: Kien Date: Tue, 25 Dec 2018 18:40:42 +0700 Subject: [PATCH 034/263] fix issue when having ssl=False --- rediscluster/client.py | 2 +- rediscluster/connection.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 773c1cfd..30b66ade 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -161,7 +161,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non if "db" in kwargs: raise RedisClusterException("Argument 'db' is not possible to use in cluster mode") - if kwargs.get('ssl', False): + if kwargs.pop('ssl', False): # Needs to be removed to avoid exception in redis Connection init connection_class = SSLClusterConnection if "connection_pool" in kwargs: diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 4a773486..e6ea74f3 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -71,7 +71,6 @@ class SSLClusterConnection(SSLConnection): def __init__(self, **kwargs): self.readonly = kwargs.pop('readonly', False) kwargs['parser_class'] = ClusterParser - kwargs.pop('ssl', None) # Needs to be removed to avoid exception in redis Connection init super(SSLClusterConnection, self).__init__(**kwargs) def on_connect(self): From 754c25d038f7492a1d33bc8ad4e073733dd70f83 Mon Sep 17 00:00:00 2001 From: wouter bolsterlee Date: Tue, 8 Jan 2019 16:47:05 +0100 Subject: [PATCH 035/263] fix typo --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 04c460d9..5ce2fa15 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,7 +6,7 @@ Welcome to redis-py-cluster's documentation! ============================================ -This project is a port of `redis-rb-cluster` by antirez, with alot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster. +This project is a port of `redis-rb-cluster` by antirez, with a lot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster. The source code is `available on github`_. From b9aefcac27424dc4e59d3d5cac9204106fd43a2a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 8 Feb 2019 10:03:43 +0100 Subject: [PATCH 036/263] Disable Coveralls as it is broken When it pushes like 10+ callbacks back to the PR it just tested --- .travis.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3a397203..b9b5dd8c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,12 +32,13 @@ env: - HIREDIS=1 REDIS_VERSION=5.0 script: - make start - - coverage erase - - coverage run --source rediscluster -p -m py.test +# - coverage erase +# - coverage run --source rediscluster -p -m py.test + - py.test - make stop -after_success: - - coverage combine - - coveralls +# after_success: +# - coverage combine +# - coveralls matrix: allow_failures: - python: "nightly" From d4243615b5cff57eeef93b31063eea79e1b87ae6 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Feb 2019 16:18:45 +0100 Subject: [PATCH 037/263] Remove hard verion mapping for all development requirements --- dev-requirements.txt | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index 7f686527..57454c80 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,11 +1,11 @@ -r requirements.txt -coverage>=4.0,<5.0 -pytest>=2.8.3,<4.0.0 -testfixtures>=4.5.0,<5.5.0 -mock>=1.3.0,<2.1.0 -docopt>=0.6.2,<1.0.0 -tox>=2.2.0,<3.0.0 -python-coveralls>=2.5.0,<3.0.0 -ptpdb>=0.16,<1.0 -ptpython>=0.31,<1.0 +coverage +pytest +testfixtures +mock +docopt +tox +python-coveralls +ptpdb +ptpython From 5e2868f03e56c15e0b9dbfe2205f35b0c1bfecf8 Mon Sep 17 00:00:00 2001 From: Nicolas Flacco Date: Fri, 17 Aug 2018 11:08:06 -0700 Subject: [PATCH 038/263] Add read replica support for single commands --- rediscluster/client.py | 25 +++++++++++++++++++++++-- rediscluster/connection.py | 25 ++++++++++++++++++++----- rediscluster/pipeline.py | 2 +- 3 files changed, 44 insertions(+), 8 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 773c1cfd..fccddcc7 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -5,6 +5,7 @@ import random import string import time +from sets import Set # rediscluster imports from .connection import ( @@ -71,6 +72,20 @@ class StrictRedisCluster(StrictRedis): ], 'slot-id'), ) + # Not complete, but covers the major ones + # https://redis.io/commands + READ_COMMANDS = Set(["BITPOS", "BITCOUNT", "EXISTS", + "GEOHASH", "GEOPOS", "GEODIST", "GEORADIUS", "GEORADIUSBYMEMBER", + "GET", "GETBIT", "GETRANGE", + "HEXISTS", "HGET", "HGETALL", "HKEYS", "HLEN", "HMGET", "HSTRLEN", "HVALS", + "KEYS", + "LINDEX", "LLEN", "LRANGE", + "MGET", "PTTL", "RANDOMKEY", + "SCARD", "SDIFF", "SINTER", "SISMEMBER", "SMEMBERS", "SRANDMEMBER", + "STRLEN", "SUNION", "TTL", + "ZCARD", "ZCOUNT", "ZRANGE", "ZSCORE" + ]) + RESULT_CALLBACKS = dict_merge( string_keys_to_dict([ "ECHO", "CONFIG GET", "CONFIG SET", "SLOWLOG GET", "CLIENT KILL", "INFO", @@ -131,7 +146,7 @@ class StrictRedisCluster(StrictRedis): def __init__(self, host=None, port=None, startup_nodes=None, max_connections=None, max_connections_per_node=False, init_slot_cache=True, readonly_mode=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, - connection_class=None, **kwargs): + connection_class=None, enable_read_from_replicas=False, **kwargs): """ :startup_nodes: List of nodes that initial bootstrapping can be done from @@ -197,6 +212,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non self.result_callbacks = self.__class__.RESULT_CALLBACKS.copy() self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy() self.response_callbacks = dict_merge(self.response_callbacks, self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS) + self.enable_read_from_replicas = enable_read_from_replicas @classmethod def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=False, **kwargs): @@ -339,6 +355,7 @@ def execute_command(self, *args, **kwargs): redirect_addr = None asking = False + is_read_replica = False try_random_node = False slot = self._determine_slot(*args) @@ -358,7 +375,7 @@ def execute_command(self, *args, **kwargs): # MOVED node = self.connection_pool.get_master_node_by_slot(slot) else: - node = self.connection_pool.get_node_by_slot(slot) + node, is_read_replica = self.connection_pool.get_node_by_slot(slot, self.enable_read_from_replicas and (command in self.READ_COMMANDS)) r = self.connection_pool.get_connection_by_node(node) try: @@ -366,6 +383,10 @@ def execute_command(self, *args, **kwargs): r.send_command('ASKING') self.parse_response(r, "ASKING", **kwargs) asking = False + if is_read_replica: + # Ask read replica to accept reads (see https://redis.io/commands/readonly) + r.send_command('READONLY') + is_read_replica = False r.send_command(*args) return self.parse_response(r, command, **kwargs) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 4a773486..d8eea881 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -6,6 +6,7 @@ import threading from contextlib import contextmanager from itertools import chain +import random # rediscluster imports from .nodemanager import NodeManager @@ -306,7 +307,8 @@ def get_connection_by_slot(self, slot): self._checkpid() try: - return self.get_connection_by_node(self.get_node_by_slot(slot)) + node, is_read_replica = self.get_node_by_slot(slot) + return self.get_connection_by_node() except KeyError: return self.get_random_connection() @@ -332,10 +334,22 @@ def get_master_node_by_slot(self, slot): """ return self.nodes.slots[slot][0] - def get_node_by_slot(self, slot): + def get_random_node_by_slot(self, slot): + """ + Get a random node from the slot, including master """ + nodes_in_slot = self.nodes.slots[slot] + random_index = random.randrange(0,len(nodes_in_slot)) + is_read_replica = random_index > 0 + return nodes_in_slot[random_index], is_read_replica + + def get_node_by_slot(self, slot, enable_read_from_replicas): """ - return self.get_master_node_by_slot(slot) + """ + if enable_read_from_replicas: + return self.get_random_node_by_slot(slot) + else: + return self.get_master_node_by_slot(slot), False class ClusterReadOnlyConnectionPool(ClusterConnectionPool): @@ -378,7 +392,7 @@ def get_master_connection_by_slot(self, slot): Do not return a random node if master node is not available for any reason. """ self._checkpid() - return self.get_connection_by_node(self.get_node_by_slot(slot)) + return self.get_connection_by_node(self.get_master_node_by_slot(slot)) def get_random_master_slave_connection_by_slot(self, slot): """ @@ -388,7 +402,8 @@ def get_random_master_slave_connection_by_slot(self, slot): self._checkpid() try: - return self.get_node_by_slot_random(self.get_node_by_slot(slot)) + node, is_read_replica = self.get_random_node_by_slot(slot) + return node except KeyError: return self.get_random_connection() diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index 9b1d0283..1e588ff9 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -155,7 +155,7 @@ def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=T # refer to our internal node -> slot table that tells us where a given # command should route to. slot = self._determine_slot(*c.args) - node = self.connection_pool.get_node_by_slot(slot) + node = self.connection_pool.get_node_by_slot(slot, False) # little hack to make sure the node name is populated. probably could clean this up. self.connection_pool.nodes.set_node_name(node) From f4f4673bc3501a9b01bea33c80ded5993ba1a044 Mon Sep 17 00:00:00 2001 From: Nicolas Flacco Date: Fri, 17 Aug 2018 11:47:06 -0700 Subject: [PATCH 039/263] should be master --- rediscluster/connection.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index d8eea881..d5efefa3 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -307,8 +307,7 @@ def get_connection_by_slot(self, slot): self._checkpid() try: - node, is_read_replica = self.get_node_by_slot(slot) - return self.get_connection_by_node() + return self.get_master_node_by_slot() except KeyError: return self.get_random_connection() From fb0a44cf13cbd7280070e7cc18ac174ed2ec8c6f Mon Sep 17 00:00:00 2001 From: Nicolas Flacco Date: Fri, 17 Aug 2018 11:48:06 -0700 Subject: [PATCH 040/263] should be master - fixed --- rediscluster/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index d5efefa3..60870dd9 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -307,7 +307,7 @@ def get_connection_by_slot(self, slot): self._checkpid() try: - return self.get_master_node_by_slot() + return self.get_master_connection_by_slot() except KeyError: return self.get_random_connection() From 60402d49e9e875c5e3264ea6a3a91d1402dd6cdf Mon Sep 17 00:00:00 2001 From: Nicolas Flacco Date: Mon, 27 Aug 2018 09:23:05 -0700 Subject: [PATCH 041/263] lints --- rediscluster/client.py | 10 +++++----- rediscluster/connection.py | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index fccddcc7..52c80bc7 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -74,7 +74,7 @@ class StrictRedisCluster(StrictRedis): # Not complete, but covers the major ones # https://redis.io/commands - READ_COMMANDS = Set(["BITPOS", "BITCOUNT", "EXISTS", + READ_COMMANDS = ["BITPOS", "BITCOUNT", "EXISTS", "GEOHASH", "GEOPOS", "GEODIST", "GEORADIUS", "GEORADIUSBYMEMBER", "GET", "GETBIT", "GETRANGE", "HEXISTS", "HGET", "HGETALL", "HKEYS", "HLEN", "HMGET", "HSTRLEN", "HVALS", @@ -84,7 +84,7 @@ class StrictRedisCluster(StrictRedis): "SCARD", "SDIFF", "SINTER", "SISMEMBER", "SMEMBERS", "SRANDMEMBER", "STRLEN", "SUNION", "TTL", "ZCARD", "ZCOUNT", "ZRANGE", "ZSCORE" - ]) + ] RESULT_CALLBACKS = dict_merge( string_keys_to_dict([ @@ -146,7 +146,7 @@ class StrictRedisCluster(StrictRedis): def __init__(self, host=None, port=None, startup_nodes=None, max_connections=None, max_connections_per_node=False, init_slot_cache=True, readonly_mode=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, - connection_class=None, enable_read_from_replicas=False, **kwargs): + connection_class=None, read_from_replicas=False, **kwargs): """ :startup_nodes: List of nodes that initial bootstrapping can be done from @@ -212,7 +212,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non self.result_callbacks = self.__class__.RESULT_CALLBACKS.copy() self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy() self.response_callbacks = dict_merge(self.response_callbacks, self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS) - self.enable_read_from_replicas = enable_read_from_replicas + self.read_from_replicas = read_from_replicas @classmethod def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=False, **kwargs): @@ -375,7 +375,7 @@ def execute_command(self, *args, **kwargs): # MOVED node = self.connection_pool.get_master_node_by_slot(slot) else: - node, is_read_replica = self.connection_pool.get_node_by_slot(slot, self.enable_read_from_replicas and (command in self.READ_COMMANDS)) + node, is_read_replica = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and (command in self.READ_COMMANDS)) r = self.connection_pool.get_connection_by_node(node) try: diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 60870dd9..003a9a3d 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -342,10 +342,10 @@ def get_random_node_by_slot(self, slot): is_read_replica = random_index > 0 return nodes_in_slot[random_index], is_read_replica - def get_node_by_slot(self, slot, enable_read_from_replicas): + def get_node_by_slot(self, slot, read_from_replicas): """ """ - if enable_read_from_replicas: + if read_from_replicas: return self.get_random_node_by_slot(slot) else: return self.get_master_node_by_slot(slot), False From a8fe8b6e492e30c9ba909d2b48ced2cc40efb0ee Mon Sep 17 00:00:00 2001 From: Nicolas Flacco Date: Mon, 27 Aug 2018 11:05:50 -0700 Subject: [PATCH 042/263] Improved code a bit, but read replica mode broken --- rediscluster/client.py | 14 ++++++++---- rediscluster/connection.py | 44 +++++++++++++++++++++---------------- rediscluster/nodemanager.py | 3 +++ rediscluster/pipeline.py | 2 +- 4 files changed, 39 insertions(+), 24 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 52c80bc7..2b02696c 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -5,11 +5,10 @@ import random import string import time -from sets import Set # rediscluster imports from .connection import ( - ClusterConnectionPool, ClusterReadOnlyConnectionPool, + ClusterConnectionPool, ClusterWithReadReplicasConnectionPool, ClusterReadOnlyConnectionPool, SSLClusterConnection, ) from .exceptions import ( @@ -190,6 +189,8 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non if readonly_mode: connection_pool_cls = ClusterReadOnlyConnectionPool + elif read_from_replicas: + connection_pool_cls = ClusterWithReadReplicasConnectionPool else: connection_pool_cls = ClusterConnectionPool @@ -215,7 +216,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non self.read_from_replicas = read_from_replicas @classmethod - def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=False, **kwargs): + def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=False, read_from_replicas=False, **kwargs): """ Return a Redis client object configured from the given URL, which must use either `the ``redis://`` scheme @@ -237,6 +238,8 @@ def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=Fa """ if readonly_mode: connection_pool_cls = ClusterReadOnlyConnectionPool + elif read_from_replicas: + connection_pool_cls = ClusterWithReadReplicasConnectionPool else: connection_pool_cls = ClusterConnectionPool @@ -375,7 +378,10 @@ def execute_command(self, *args, **kwargs): # MOVED node = self.connection_pool.get_master_node_by_slot(slot) else: - node, is_read_replica = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and (command in self.READ_COMMANDS)) + node = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and (command in self.READ_COMMANDS)) + is_read_replica = node['server_type'] == 'slave' + print "node:", node + r = self.connection_pool.get_connection_by_node(node) try: diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 003a9a3d..e06405e9 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -6,7 +6,6 @@ import threading from contextlib import contextmanager from itertools import chain -import random # rediscluster imports from .nodemanager import NodeManager @@ -307,7 +306,7 @@ def get_connection_by_slot(self, slot): self._checkpid() try: - return self.get_master_connection_by_slot() + return self.get_connection_by_node(self.get_node_by_slot(slot)) except KeyError: return self.get_random_connection() @@ -333,22 +332,10 @@ def get_master_node_by_slot(self, slot): """ return self.nodes.slots[slot][0] - def get_random_node_by_slot(self, slot): + def get_node_by_slot(self, slot, read_command=False): """ - Get a random node from the slot, including master """ - nodes_in_slot = self.nodes.slots[slot] - random_index = random.randrange(0,len(nodes_in_slot)) - is_read_replica = random_index > 0 - return nodes_in_slot[random_index], is_read_replica - - def get_node_by_slot(self, slot, read_from_replicas): - """ - """ - if read_from_replicas: - return self.get_random_node_by_slot(slot) - else: - return self.get_master_node_by_slot(slot), False + return self.get_master_node_by_slot(slot) class ClusterReadOnlyConnectionPool(ClusterConnectionPool): @@ -391,7 +378,7 @@ def get_master_connection_by_slot(self, slot): Do not return a random node if master node is not available for any reason. """ self._checkpid() - return self.get_connection_by_node(self.get_master_node_by_slot(slot)) + return self.get_connection_by_node(self.get_node_by_slot(slot)) def get_random_master_slave_connection_by_slot(self, slot): """ @@ -401,8 +388,7 @@ def get_random_master_slave_connection_by_slot(self, slot): self._checkpid() try: - node, is_read_replica = self.get_random_node_by_slot(slot) - return node + return self.get_node_by_slot_random(self.get_node_by_slot(slot)) except KeyError: return self.get_random_connection() @@ -413,6 +399,26 @@ def get_node_by_slot_random(self, slot): return random.choice(self.nodes.slots[slot]) +class ClusterWithReadReplicasConnectionPool(ClusterConnectionPool): + """ + Custom connection pool for rediscluster with load balancing across read replicas + """ + + def get_node_by_slot(self, slot, read_command=False): + """ + Get a random node from the slot, including master + """ + print "Choosing slot from:", self.nodes.slots[slot], "with reads on:", read_command + nodes_in_slot = self.nodes.slots[slot] + + if read_command: + random_index = random.randrange(0, len(nodes_in_slot)) + is_read_replica = random_index > 0 + return nodes_in_slot[random_index], is_read_replica + else: + return nodes_in_slot[0], False + + @contextmanager def by_node_context(pool, node): """ diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index b16877d1..b13c58a0 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -282,6 +282,9 @@ def set_node_name(self, n): # TODO: This shold not be constructed this way. It should update the name of the node in the node cache dict """ + + print "set_node_name n:", n + if "name" not in n: n["name"] = "{0}:{1}".format(n["host"], n["port"]) diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index 1e588ff9..9b1d0283 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -155,7 +155,7 @@ def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=T # refer to our internal node -> slot table that tells us where a given # command should route to. slot = self._determine_slot(*c.args) - node = self.connection_pool.get_node_by_slot(slot, False) + node = self.connection_pool.get_node_by_slot(slot) # little hack to make sure the node name is populated. probably could clean this up. self.connection_pool.nodes.set_node_name(node) From 71766c0ac61879b232f05933154247a887f8c64d Mon Sep 17 00:00:00 2001 From: Nicolas Flacco Date: Mon, 27 Aug 2018 11:13:23 -0700 Subject: [PATCH 043/263] works --- rediscluster/client.py | 2 -- rediscluster/connection.py | 7 ++----- rediscluster/nodemanager.py | 3 --- 3 files changed, 2 insertions(+), 10 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 2b02696c..c8ca92d3 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -380,8 +380,6 @@ def execute_command(self, *args, **kwargs): else: node = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and (command in self.READ_COMMANDS)) is_read_replica = node['server_type'] == 'slave' - print "node:", node - r = self.connection_pool.get_connection_by_node(node) try: diff --git a/rediscluster/connection.py b/rediscluster/connection.py index e06405e9..407465a0 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -408,15 +408,12 @@ def get_node_by_slot(self, slot, read_command=False): """ Get a random node from the slot, including master """ - print "Choosing slot from:", self.nodes.slots[slot], "with reads on:", read_command nodes_in_slot = self.nodes.slots[slot] - if read_command: random_index = random.randrange(0, len(nodes_in_slot)) - is_read_replica = random_index > 0 - return nodes_in_slot[random_index], is_read_replica + return nodes_in_slot[random_index] else: - return nodes_in_slot[0], False + return nodes_in_slot[0] @contextmanager diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index b13c58a0..b16877d1 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -282,9 +282,6 @@ def set_node_name(self, n): # TODO: This shold not be constructed this way. It should update the name of the node in the node cache dict """ - - print "set_node_name n:", n - if "name" not in n: n["name"] = "{0}:{1}".format(n["host"], n["port"]) From f6f5a190f01b13065657b46ceaab3d69133f5ce9 Mon Sep 17 00:00:00 2001 From: Nicolas Flacco Date: Thu, 18 Oct 2018 12:51:45 -0700 Subject: [PATCH 044/263] add property to constructor --- rediscluster/pipeline.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index 9b1d0283..f6a21491 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -24,7 +24,7 @@ class StrictClusterPipeline(StrictRedisCluster): """ def __init__(self, connection_pool, result_callbacks=None, - response_callbacks=None, startup_nodes=None): + response_callbacks=None, startup_nodes=None, read_from_replicas=False): """ """ self.command_stack = [] @@ -32,6 +32,7 @@ def __init__(self, connection_pool, result_callbacks=None, self.connection_pool = connection_pool self.result_callbacks = result_callbacks or self.__class__.RESULT_CALLBACKS.copy() self.startup_nodes = startup_nodes if startup_nodes else [] + self.read_from_replicas = read_from_replicas self.nodes_flags = self.__class__.NODES_FLAGS.copy() self.response_callbacks = dict_merge(response_callbacks or self.__class__.RESPONSE_CALLBACKS.copy(), self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS) From aff4be4f6f792fb7c6aaa37850a32aa20d80b09d Mon Sep 17 00:00:00 2001 From: Eric Shyong Date: Sun, 11 Nov 2018 17:15:16 -0800 Subject: [PATCH 045/263] Fixed tests --- rediscluster/client.py | 2 ++ tests/test_cluster_connection_pool.py | 2 +- tests/test_cluster_obj.py | 6 +++--- tests/test_pipeline.py | 2 -- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index c8ca92d3..c33abf26 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -389,7 +389,9 @@ def execute_command(self, *args, **kwargs): asking = False if is_read_replica: # Ask read replica to accept reads (see https://redis.io/commands/readonly) + # TODO: do we need to handle errors from this response? r.send_command('READONLY') + self.parse_response(r, 'READONLY', **kwargs) is_read_replica = False r.send_command(*args) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 45efe007..b47a932d 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -243,7 +243,7 @@ def test_get_node_by_slot_random(self): """ pool = self.get_pool(connection_kwargs={}) - expected_ports = {7000, 7003} + expected_ports = {7000, 7004} actual_ports = set() for _ in range(0, 100): node = pool.get_node_by_slot_random(0) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 2e715f25..da975442 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -393,7 +393,7 @@ def assert_moved_redirection_on_slave(sr, connection_pool_cls, cluster_obj): with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: return_master_mock.return_value = master_value assert cluster_obj.get('foo16706') == b('foo') - assert return_master_mock.call_count == 1 + assert return_slave_mock.call_count == 1 def test_moved_redirection_on_slave_with_default_client(sr): @@ -445,11 +445,11 @@ def test_access_correct_slave_with_readonly_mode_client(sr): return_value=master_value) as return_master_mock: readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') - assert return_master_mock.call_count == 0 + assert return_master_mock.call_count == 1 readonly_client = StrictRedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') - assert return_master_mock.call_count == 0 + assert return_master_mock.call_count == 2 def test_refresh_using_specific_nodes(r): diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index f0129630..2f034d6b 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -519,7 +519,6 @@ def get_mock_node(role, port): with cluster_obj.pipeline() as pipe: # we assume this key is set on 127.0.0.1:7001(7004) pipe.get('foo87').get('foo88').execute() == [None, None] - assert return_master_mock.call_count == 2 def test_moved_redirection_on_slave_with_default(self): """ @@ -568,4 +567,3 @@ def test_access_correct_slave_with_readonly_mode_client(self, sr): readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) with readonly_client.pipeline() as readonly_pipe: assert readonly_pipe.get('foo88').get('foo87').execute() == [b('bar'), b('foo')] - assert return_master_mock.call_count == 0 From ea773101c95dcf2660f8560f4ce84a15e536717e Mon Sep 17 00:00:00 2001 From: Eric Shyong Date: Sun, 11 Nov 2018 17:44:48 -0800 Subject: [PATCH 046/263] Fix tests --- tests/test_cluster_connection_pool.py | 2 +- tests/test_cluster_obj.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index b47a932d..45efe007 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -243,7 +243,7 @@ def test_get_node_by_slot_random(self): """ pool = self.get_pool(connection_kwargs={}) - expected_ports = {7000, 7004} + expected_ports = {7000, 7003} actual_ports = set() for _ in range(0, 100): node = pool.get_node_by_slot_random(0) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index da975442..db560bf3 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -445,11 +445,9 @@ def test_access_correct_slave_with_readonly_mode_client(sr): return_value=master_value) as return_master_mock: readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') - assert return_master_mock.call_count == 1 readonly_client = StrictRedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') - assert return_master_mock.call_count == 2 def test_refresh_using_specific_nodes(r): From 7486ce5e44e0dca44dc06e6e299a2a68bd880ce0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 8 Jan 2019 19:58:17 +0100 Subject: [PATCH 047/263] Use args, kwargs --- rediscluster/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 407465a0..703cb2ae 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -332,7 +332,7 @@ def get_master_node_by_slot(self, slot): """ return self.nodes.slots[slot][0] - def get_node_by_slot(self, slot, read_command=False): + def get_node_by_slot(self, slot, *args, **kwargs): """ """ return self.get_master_node_by_slot(slot) From 0b3b0dbd6165961061cac819a4df1a316b92aa6c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 8 Jan 2019 20:00:56 +0100 Subject: [PATCH 048/263] Code lint fixes --- rediscluster/client.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index c33abf26..ba3eb0c1 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -8,7 +8,9 @@ # rediscluster imports from .connection import ( - ClusterConnectionPool, ClusterWithReadReplicasConnectionPool, ClusterReadOnlyConnectionPool, + ClusterConnectionPool, + ClusterReadOnlyConnectionPool, + ClusterWithReadReplicasConnectionPool, SSLClusterConnection, ) from .exceptions import ( @@ -73,15 +75,20 @@ class StrictRedisCluster(StrictRedis): # Not complete, but covers the major ones # https://redis.io/commands - READ_COMMANDS = ["BITPOS", "BITCOUNT", "EXISTS", + READ_COMMANDS = [ + "BITPOS", "BITCOUNT", + "EXISTS", "GEOHASH", "GEOPOS", "GEODIST", "GEORADIUS", "GEORADIUSBYMEMBER", "GET", "GETBIT", "GETRANGE", "HEXISTS", "HGET", "HGETALL", "HKEYS", "HLEN", "HMGET", "HSTRLEN", "HVALS", "KEYS", "LINDEX", "LLEN", "LRANGE", - "MGET", "PTTL", "RANDOMKEY", + "MGET", + "PTTL", + "RANDOMKEY", "SCARD", "SDIFF", "SINTER", "SISMEMBER", "SMEMBERS", "SRANDMEMBER", - "STRLEN", "SUNION", "TTL", + "STRLEN", "SUNION", + "TTL", "ZCARD", "ZCOUNT", "ZRANGE", "ZSCORE" ] From b78139c46530a6342404604c9f980298430ca934 Mon Sep 17 00:00:00 2001 From: James Ward Date: Mon, 25 Feb 2019 00:36:14 -0500 Subject: [PATCH 049/263] ensure OBJECT command is directed to correct node --- rediscluster/client.py | 5 ++++- tests/test_commands.py | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index ba3eb0c1..4d00f32a 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -311,7 +311,10 @@ def _determine_slot(self, *args): raise RedisClusterException("{0} - all keys must map to the same key slot".format(command)) return slots.pop() - key = args[1] + if command == 'OBJECT': + key = args[2] + else: + key = args[1] return self.connection_pool.nodes.keyslot(key) diff --git a/tests/test_commands.py b/tests/test_commands.py index b10dfcf3..2e349f6b 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -85,8 +85,8 @@ def test_echo(self, r): def test_object(self, r): r['a'] = 'foo' assert isinstance(r.object('refcount', 'a'), int) - # assert isinstance(r.object('idletime', 'a'), int) - # assert r.object('encoding', 'a') in (b('raw'), b('embstr')) + assert isinstance(r.object('idletime', 'a'), int) + assert r.object('encoding', 'a') in (b('raw'), b('embstr')) assert r.object('idletime', 'invalid-key') is None def test_ping(self, r): From f6619a7aa969a89fb5cc301a6aa658433e1dec6a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 2 Mar 2019 12:15:20 +0100 Subject: [PATCH 050/263] Code lint and added a comment on reason for the OBJECT deviation --- rediscluster/client.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 4d00f32a..e146e793 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -311,10 +311,11 @@ def _determine_slot(self, *args): raise RedisClusterException("{0} - all keys must map to the same key slot".format(command)) return slots.pop() - if command == 'OBJECT': + key = args[1] + + # OBJEECT command uses a special keyword as first positional argument + if command = 'OBJECT': key = args[2] - else: - key = args[1] return self.connection_pool.nodes.keyslot(key) From 83fcb56765c873d0988e55a9255e55367ac2975e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 2 Mar 2019 12:30:45 +0100 Subject: [PATCH 051/263] Implement optimization to rename command where it will send a RENAME command to the slot owning node if both keys is in the same slot. If in different slots then use fallback solution. --- rediscluster/client.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index e146e793..a94c740a 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -769,12 +769,30 @@ def rename(self, src, dst): Rename key ``src`` to ``dst`` Cluster impl: - This operation is no longer atomic because each key must be querried - then set in separate calls because they maybe will change cluster node + If the src and dsst keys is in the same slot then send a plain RENAME + command to that node to do the rename inside the server. + + If the keys is in crossslots then use the client side implementation + as fallback method. In this case this operation is no longer atomic as + the key is dumped and posted back to the server through the client. """ if src == dst: raise ResponseError("source and destination objects are the same") + # + # Optimization where if both keys is in the same slot then we can use the + # plain upstream rename method. + # + src_slot = self.connection_pool.keyslot(src) + dst_slot = self.connection_pool.keyslot(dst) + + if src_slot == dst_slot: + return self.execute_command('RENAME', src, dst) + + # + # To provide cross slot support we implement rename by doing the internal command + # redis server runs but in the client instead. + # data = self.dump(src) if data is None: From 569d8d37e4f71985ace6769c7542c3e7e5db27d0 Mon Sep 17 00:00:00 2001 From: James Ward Date: Fri, 29 Mar 2019 22:18:11 -0400 Subject: [PATCH 052/263] update python versions in travis drop Python 3.3 support and add Python 3.7 support --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index b9b5dd8c..ffe87a12 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,10 +2,10 @@ sudo: false language: python python: - "2.7" - - "3.3" - "3.4" - "3.5" - - "3.6-dev" + - "3.6" + - "3.7" - "nightly" services: - redis-server From 5cb5262ad3d25f1df4aab73560b313c7374a2b4f Mon Sep 17 00:00:00 2001 From: James Ward Date: Fri, 29 Mar 2019 22:20:07 -0400 Subject: [PATCH 053/263] update python versions in tox drop python 3.3 and add python 3.6 and python 3.7 to tox --- tox.ini | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/tox.ini b/tox.ini index 119860ae..5e63762f 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ # install tox" and then run "tox" from this directory. [tox] -envlist = py27, py33, py34, py35, py36, hi27, hi33, hi34, hi35, flake8-py34, flake8-py27 +envlist = py27, py34, py35, py36, py37, hi27, hi34, hi35, hi36, hi37, flake8-py34, flake8-py27 [testenv] deps = -r{toxinidir}/dev-requirements.txt @@ -16,12 +16,6 @@ deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 -[testenv:hi33] -basepython = python3.3 -deps = - -r{toxinidir}/dev-requirements.txt - hiredis == 0.2.0 - [testenv:hi34] basepython = python3.4 deps = @@ -34,6 +28,18 @@ deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 +[testenv:hi36] +basepython = python3.6 +deps = + -r{toxinidir}/dev-requirements.txt + hiredis == 0.2.0 + +[testenv:hi37] +basepython = python3.7 +deps = + -r{toxinidir}/dev-requirements.txt + hiredis == 0.2.0 + [testenv:flake8-py34] basepython= python3.4 deps = From 3c2aead10cf94ebfdbb4a1540b2b8c6ad55bf2af Mon Sep 17 00:00:00 2001 From: James Ward Date: Thu, 28 Mar 2019 03:34:59 -0400 Subject: [PATCH 054/263] Fix typo and syntax error --- rediscluster/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index a94c740a..1c2a26ef 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -313,8 +313,8 @@ def _determine_slot(self, *args): key = args[1] - # OBJEECT command uses a special keyword as first positional argument - if command = 'OBJECT': + # OBJECT command uses a special keyword as first positional argument + if command == 'OBJECT': key = args[2] return self.connection_pool.nodes.keyslot(key) From d8112506a0404e29afb7ab922c25a58877ba33ac Mon Sep 17 00:00:00 2001 From: James Ward Date: Thu, 28 Mar 2019 11:42:33 -0400 Subject: [PATCH 055/263] keyslot is nodemanager not connectionpool --- rediscluster/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 1c2a26ef..49046dde 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -783,8 +783,8 @@ def rename(self, src, dst): # Optimization where if both keys is in the same slot then we can use the # plain upstream rename method. # - src_slot = self.connection_pool.keyslot(src) - dst_slot = self.connection_pool.keyslot(dst) + src_slot = self.connection_pool.nodes.keyslot(src) + dst_slot = self.connection_pool.nodes.keyslot(dst) if src_slot == dst_slot: return self.execute_command('RENAME', src, dst) From ef93212fcd78f34456ef25ab326de0153b87b689 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 10 Apr 2019 00:49:42 +0200 Subject: [PATCH 056/263] Update docs about supported python versions --- docs/index.rst | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 5ce2fa15..5c376a09 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -71,15 +71,10 @@ Supported python versions ------------------------- - 2.7 -- 3.3 - 3.4.1+ (See note) - 3.5 - 3.6 - -Experimental: - -- 3.7-dev - +- 3.7 .. note:: Python 3.4.0 From 2a105a222584666ba068c113f432954b26c58d2e Mon Sep 17 00:00:00 2001 From: Maxime Beauchemin Date: Sun, 14 Jul 2019 08:02:28 -0700 Subject: [PATCH 057/263] Fix rendering in pypi.org --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 641674c3..40bbc3f9 100644 --- a/setup.py +++ b/setup.py @@ -23,6 +23,7 @@ version="1.3.6", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, + long_description_content_type="text/markdown", author="Johan Andersson", author_email="Grokzen@gmail.com", maintainer='Johan Andersson', From 0c81059598db578efb188d93e6d85d0fda03930b Mon Sep 17 00:00:00 2001 From: engstrom Date: Tue, 28 May 2019 09:55:13 -0600 Subject: [PATCH 058/263] Add xenial dist to support python 3.7+. --- .travis.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index ffe87a12..6080ddb2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,5 @@ sudo: false +dist: xenial language: python python: - "2.7" @@ -32,13 +33,13 @@ env: - HIREDIS=1 REDIS_VERSION=5.0 script: - make start -# - coverage erase -# - coverage run --source rediscluster -p -m py.test + - coverage erase + - coverage run --source rediscluster -p -m py.test - py.test - make stop -# after_success: -# - coverage combine -# - coveralls +after_success: + - coverage combine + - coveralls matrix: allow_failures: - python: "nightly" From 4794e70c716ab3b9c272a0eaa6e200e50b3d7105 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 22:34:50 +0100 Subject: [PATCH 059/263] Remove ptyhon 3.3 support. Bump version to 2.0.0. Update requirements to only support redis-py 3.0.x release track --- rediscluster/__init__.py | 2 +- requirements.txt | 2 +- setup.py | 5 ++--- tox.ini | 10 +++++----- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index cb3d40f0..0b270efb 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -16,7 +16,7 @@ setattr(redis, "StrictClusterPipeline", StrictClusterPipeline) # Major, Minor, Fix version -__version__ = (1, 3, 6) +__version__ = (2, 0, 0) if sys.version_info[0:3] == (3, 4, 0): raise RuntimeError("CRITICAL: rediscluster do not work with python 3.4.0. Please use 3.4.1 or higher.") diff --git a/requirements.txt b/requirements.txt index 91015469..53e431b5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -redis==2.10.6 +redis>=3.0.0,<3.1.0 diff --git a/setup.py b/setup.py index 40bbc3f9..8215d9fa 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="1.3.6", + version="2.0.0", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, long_description_content_type="text/markdown", @@ -32,7 +32,7 @@ url='http://github.com/grokzen/redis-py-cluster', license='MIT', install_requires=[ - 'redis==2.10.6' + 'redis>=3.0.0,<3.1.0' ], keywords=[ 'redis', @@ -51,7 +51,6 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', diff --git a/tox.ini b/tox.ini index 5e63762f..0c3125fa 100644 --- a/tox.ini +++ b/tox.ini @@ -24,30 +24,30 @@ deps = [testenv:hi35] basepython = python3.5 -deps = +deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:hi36] basepython = python3.6 -deps = +deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:hi37] basepython = python3.7 -deps = +deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:flake8-py34] basepython= python3.4 -deps = +deps = flake8==2.2.5 commands = flake8 --show-source --exclude=.venv,.tox,dist,docs,build,.git --ignore=E501,E731,E402 . [testenv:flake8-py27] basepython= python2.7 -deps = +deps = flake8==2.2.5 commands = flake8 --show-source --exclude=.venv,.tox,dist,docs,build,.git --ignore=E501,E731,E402 . From 211659a9cee6295dad645ebd722ad6221b657698 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 22:36:20 +0100 Subject: [PATCH 060/263] Use pip cache for travis build --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 6080ddb2..1b8ad1d1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,7 @@ sudo: false dist: xenial language: python +cache: pip python: - "2.7" - "3.4" From b3100ce8fe43086a09f05a986c04e0c8735b92cb Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:15:09 +0100 Subject: [PATCH 061/263] Rename StrictRedisCluster -> RedisCluster and Removed old RedisCluster class to conform to new redis-py class names. Updated all usages of the 2 classes throughout the entire codebase --- Makefile | 8 +- README.md | 4 +- benchmarks/simple.py | 13 +-- docs/benchmarks.rst | 14 +-- docs/commands.rst | 6 +- docs/index.rst | 4 +- docs/limitations-and-differences.rst | 6 +- docs/pipelines.rst | 4 +- docs/pubsub.rst | 6 +- docs/readonly-mode.rst | 10 +- docs/threads.rst | 4 +- examples/basic.py | 4 +- examples/basic_password_protected.py | 4 +- examples/from_url_password_protected.py | 4 +- ptp-debug.py | 6 +- rediscluster/__init__.py | 3 +- rediscluster/client.py | 105 ++----------------- rediscluster/connection.py | 4 +- rediscluster/nodemanager.py | 6 +- rediscluster/pipeline.py | 128 ++++++++++++------------ tests/conftest.py | 26 ++--- tests/test_cluster_connection_pool.py | 2 +- tests/test_cluster_obj.py | 56 +++++------ tests/test_node_manager.py | 18 ++-- tests/test_pipeline.py | 8 +- tests/test_pubsub.py | 10 +- 26 files changed, 188 insertions(+), 275 deletions(-) diff --git a/Makefile b/Makefile index e0fbac7a..0c2e1766 100644 --- a/Makefile +++ b/Makefile @@ -378,16 +378,16 @@ redis-install: benchmark: @echo "" - @echo " -- Running Simple benchmark with StrictRedis lib and non cluster server --" + @echo " -- Running Simple benchmark with Redis lib and non cluster server --" python benchmarks/simple.py --port 7007 --timeit --nocluster @echo "" - @echo " -- Running Simple benchmark with StrictRedisCluster lib and cluster server --" + @echo " -- Running Simple benchmark with RedisCluster lib and cluster server --" python benchmarks/simple.py --port 7001 --timeit @echo "" - @echo " -- Running Simple benchmark with pipelines & StrictRedis lib and non cluster server --" + @echo " -- Running Simple benchmark with pipelines & Redis lib and non cluster server --" python benchmarks/simple.py --port 7007 --timeit --pipeline --nocluster @echo "" - @echo " -- Running Simple benchmark with StrictRedisCluster lib and cluster server" + @echo " -- Running Simple benchmark with RedisCluster lib and cluster server" python benchmarks/simple.py --port 7001 --timeit --pipeline ptp: diff --git a/README.md b/README.md index 87b53d17..432b8b2f 100644 --- a/README.md +++ b/README.md @@ -37,12 +37,12 @@ $ pip install redis-py-cluster Small sample script that shows how to get started with RedisCluster. It can also be found in [examples/basic.py](examples/basic.py) ```python ->>> from rediscluster import StrictRedisCluster +>>> from rediscluster import RedisCluster >>> # Requires at least one node for cluster discovery. Multiple nodes is recommended. >>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] ->>> rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) +>>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) >>> rc.set("foo", "bar") True diff --git a/benchmarks/simple.py b/benchmarks/simple.py index 1aac44b9..bb6a7175 100644 --- a/benchmarks/simple.py +++ b/benchmarks/simple.py @@ -9,7 +9,7 @@ -p Port on redis server [default: 7000] -n Request number [default: 100000] -c Concurrent client number [default: 1] - --nocluster If flag is set then StrictRedis will be used instead of cluster lib + --nocluster If flag is set then Redis will be used instead of cluster lib --timeit Run a mini benchmark to test performance --pipeline Only usable with --timeit flag. Runs SET/GET inside pipelines. --resetlastkey Reset __last__ key @@ -78,12 +78,13 @@ def timeit_pipeline(rc, num): if __name__ == "__main__": args = docopt(__doc__, version="0.3.1") startup_nodes = [{"host": args['--host'], "port": args['-p']}] + if not args["--nocluster"]: - from rediscluster import StrictRedisCluster - rc = StrictRedisCluster(startup_nodes=startup_nodes, max_connections=32, socket_timeout=0.1, decode_responses=True) + from rediscluster import RedisCluster + rc = RedisCluster(startup_nodes=startup_nodes, max_connections=32, socket_timeout=0.1, decode_responses=True) else: - from redis import StrictRedis - rc = StrictRedis(host=args["--host"], port=args["-p"], socket_timeout=0.1, decode_responses=True) + from redis import Redis + rc = Redis(host=args["--host"], port=args["-p"], socket_timeout=0.1, decode_responses=True) # create specified number processes processes = [] single_request = int(args["-n"]) // int(args["-c"]) @@ -102,4 +103,4 @@ def timeit_pipeline(rc, num): for p in processes: p.join() t2 = time.time() - t1 - print("Tested {0}k SET & GET (each 50%) operations took: {1} seconds... {2} operations per second".format(int(args["-n"]) / 1000, t2, int(args["-n"]) / t2 * 2)) \ No newline at end of file + print("Tested {0}k SET & GET (each 50%) operations took: {1} seconds... {2} operations per second".format(int(args["-n"]) / 1000, t2, int(args["-n"]) / t2 * 2)) diff --git a/docs/benchmarks.rst b/docs/benchmarks.rst index dc2dc770..de8ce060 100644 --- a/docs/benchmarks.rst +++ b/docs/benchmarks.rst @@ -8,12 +8,12 @@ These are a few benchmarks that are designed to test specific parts of the code Setup benchmarks ---------------- -Before running any benchmark you should install this lib in editable mode inside a virtualenv so it can import `StrictRedisCluster` lib. +Before running any benchmark you should install this lib in editable mode inside a virtualenv so it can import `RedisCluster` lib. Install with .. code-block:: bash - + pip install -e . You also need a few redis servers to test against. You must have one cluster with at least one node on port `7001` and you must also have a non-clustered server on port `7007`. @@ -42,18 +42,18 @@ Example output and comparison of different runmodes .. code-block:: - -- Running Simple benchmark with StrictRedis lib and non cluster server, 50 concurrent processes and total 50000*2 requests -- + -- Running Simple benchmark with Redis lib and non cluster server, 50 concurrent processes and total 50000*2 requests -- python benchmarks/simple.py --host 127.0.0.1 --timeit --nocluster -c 50 -n 50000 50.0k SET/GET operations took: 2.45 seconds... 40799.93 operations per second - -- Running Simple benchmark with StrictRedisCluster lib and cluster server, 50 concurrent processes and total 50000*2 requests -- + -- Running Simple benchmark with RedisCluster lib and cluster server, 50 concurrent processes and total 50000*2 requests -- python benchmarks/simple.py --host 127.0.0.1 --timeit -c 50 -n 50000 50.0k SET & GET (each 50%) operations took: 9.51 seconds... 31513.71 operations per second - -- Running Simple benchmark with pipelines & StrictRedis lib and non cluster server -- + -- Running Simple benchmark with pipelines & Redis lib and non cluster server -- python benchmarks/simple.py --host 127.0.0.1 --timeit --nocluster -c 50 -n 50000 --pipeline 50.0k SET & GET (each 50%) operations took: 2.1728243827819824 seconds... 46023.047602201834 operations per second - -- Running Simple benchmark with StrictRedisCluster lib and cluster server + -- Running Simple benchmark with RedisCluster lib and cluster server python benchmarks/simple.py --host 127.0.0.1 --timeit -c 50 -n 50000 --pipeline - 50.0k SET & GET (each 50%) operations took: 1.7181339263916016 seconds... 58202.68051514381 operations per second \ No newline at end of file + 50.0k SET & GET (each 50%) operations took: 1.7181339263916016 seconds... 58202.68051514381 operations per second diff --git a/docs/commands.rst b/docs/commands.rst index 57332057..42f26931 100644 --- a/docs/commands.rst +++ b/docs/commands.rst @@ -1,9 +1,9 @@ Implemented commands ==================== -This will describe all changes that StrictRedisCluster have done to make a command to work in a cluster environment. +This will describe all changes that RedisCluster have done to make a command to work in a cluster environment. -If a command is not listed here then the default implementation from `StrictRedis` in the `redis-py` library is used. +If a command is not listed here then the default implementation from `Redis` in the `redis-py` library is used. @@ -107,7 +107,7 @@ Either because they do not work, there is no working implementation or it is not Overridden methods ------------------ -The following methods is overridden from StrictRedis with a custom implementation. +The following methods is overridden from Redis with a custom implementation. They can operate on keys that exists in different hashslots and require a client side implementation to work. diff --git a/docs/index.rst b/docs/index.rst index 5c376a09..9041cbcf 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -39,13 +39,13 @@ Small sample script that shows how to get started with RedisCluster. It can also .. code-block:: python - >>> from rediscluster import StrictRedisCluster + >>> from rediscluster import RedisCluster >>> # Requires at least one node for cluster discovery. Multiple nodes is recommended. >>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] >>> # Note: See note on Python 3 for decode_responses behaviour - >>> rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) + >>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) >>> rc.set("foo", "bar") True diff --git a/docs/limitations-and-differences.rst b/docs/limitations-and-differences.rst index d9208e1a..7b37e62d 100644 --- a/docs/limitations-and-differences.rst +++ b/docs/limitations-and-differences.rst @@ -5,11 +5,11 @@ This will compare against `redis-py` There is alot of differences that have to be taken into consideration when using redis cluster. -Any method that can operate on multiple keys have to be reimplemented in the client and in some cases that is not possible to do. In general any method that is overriden in StrictRedisCluster have lost the ability of being atomic. +Any method that can operate on multiple keys have to be reimplemented in the client and in some cases that is not possible to do. In general any method that is overriden in RedisCluster have lost the ability of being atomic. -Pipelines do not work the same way in a cluster. In `StrictRedis` it batch all commands so that they can be executed at the same time when requested. But with RedisCluster pipelines will send the command directly to the server when it is called, but it will still store the result internally and return the same data from .execute(). This is done so that the code still behaves like a pipeline and no code will break. A better solution will be implemented in the future. +Pipelines do not work the same way in a cluster. In `Redis` it batch all commands so that they can be executed at the same time when requested. But with RedisCluster pipelines will send the command directly to the server when it is called, but it will still store the result internally and return the same data from .execute(). This is done so that the code still behaves like a pipeline and no code will break. A better solution will be implemented in the future. -Alot of methods will behave very different when using RedisCluster. Some methods send the same request to all servers and return the result in another format then `StrictRedis` do. Some methods is blocked because they do not work / is not implemented / is dangerous to use in redis cluster. +Alot of methods will behave very different when using RedisCluster. Some methods send the same request to all servers and return the result in another format then `Redis` do. Some methods is blocked because they do not work / is not implemented / is dangerous to use in redis cluster. Some of the commands are only partially supported when using RedisCluster. The commands ``zinterstore`` and ``zunionstore`` are only supported if all the keys map to the same key slot in the cluster. This can be achieved by namespacing related keys with a prefix followed by a bracketed common key. Example: diff --git a/docs/pipelines.rst b/docs/pipelines.rst index 94fad872..c092b604 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -101,7 +101,7 @@ This code do NOT wrap `MULTI/EXEC` around the commands when packed .. code-block:: python - >>> from rediscluster import StrictRedisCluster as s + >>> from rediscluster import RedisCluster as s >>> r = s(startup_nodes=[{"host": "127.0.0.1", "port": "7002"}]) >>> # Simulate that a slot is migrating to another node >>> r.connection_pool.nodes.slots[14226] = {'host': '127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001'} @@ -120,7 +120,7 @@ This code DO wrap MULTI/EXEC around the commands when packed .. code-block:: python - >>> from rediscluster import StrictRedisCluster as s + >>> from rediscluster import RedisCluster as s >>> r = s(startup_nodes=[{"host": "127.0.0.1", "port": "7002"}]) >>> # Simulate that a slot is migrating to another node >>> r.connection_pool.nodes.slots[14226] = {'host': '127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001'} diff --git a/docs/pubsub.rst b/docs/pubsub.rst index 928e6efe..9bb76074 100644 --- a/docs/pubsub.rst +++ b/docs/pubsub.rst @@ -40,8 +40,8 @@ The following part is from this discussion https://groups.google.com/forum/?hl=s -How pubsub works in StrictRedisCluster --------------------------------------- +How pubsub works in RedisCluster +-------------------------------- In release `1.2.0` the pubsub was code was reworked to now work like this. @@ -69,4 +69,4 @@ The implemented solution will only work if other clients use/adopt the same beha Other solutions --------------- -The simplest solution is to have a seperate non clustered redis instance that you have a regular `StrictRedis` instance that works with your pubsub code. It is not recommended to use pubsub until `redis` fixes the implementation in the server itself. +The simplest solution is to have a seperate non clustered redis instance that you have a regular `Redis` instance that works with your pubsub code. It is not recommended to use pubsub until `redis` fixes the implementation in the server itself. diff --git a/docs/readonly-mode.rst b/docs/readonly-mode.rst index 5ca197e1..29d885f4 100644 --- a/docs/readonly-mode.rst +++ b/docs/readonly-mode.rst @@ -3,17 +3,17 @@ Readonly mode By default, Redis Cluster always returns MOVE redirection response on accessing slave node. You can overcome this limitation [for scaling read with READONLY mode](http://redis.io/topics/cluster-spec#scaling-reads-using-slave-nodes). -redis-py-cluster also implements this mode. You can access slave by passing `readonly_mode=True` to StrictRedisCluster (or RedisCluster) constructor. +redis-py-cluster also implements this mode. You can access slave by passing `readonly_mode=True` to RedisCluster (or RedisCluster) constructor. .. code-block:: python - >>> from rediscluster import StrictRedisCluster + >>> from rediscluster import RedisCluster >>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] - >>> rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) + >>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) >>> rc.set("foo16706", "bar") >>> rc.set("foo81", "foo") True - >>> rc_readonly = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) + >>> rc_readonly = RedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) >>> rc_readonly.get("foo16706") u'bar' >>> rc_readonly.get("foo81") @@ -39,7 +39,7 @@ But this mode has some downside or limitations. .. code-block:: python - >>> rc_readonly = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) + >>> rc_readonly = RedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) >>> # NO: This works in almost case, but possibly emits Too many Cluster redirections error... >>> rc_readonly.set('foo', 'bar') >>> # OK: You should always use get related stuff... diff --git a/docs/threads.rst b/docs/threads.rst index d6f2d869..790db8c7 100644 --- a/docs/threads.rst +++ b/docs/threads.rst @@ -25,7 +25,7 @@ You can disable threaded execution either in the class constructor: .. code-block:: python - r = rediscluster.StrictRedisCluster( ... pipeline_use_threads=False) #true by default + r = rediscluster.RedisCluster( ... pipeline_use_threads=False) #true by default pipe = r.pipeline() Or you can disable it on a case by case basis as you instantiate the pipeline object. @@ -34,7 +34,7 @@ Or you can disable it on a case by case basis as you instantiate the pipeline ob pipe = r.pipeline(use_threads=False) -The later example always overrides if explicitly set. Otherwise, it falls back on the value passed to the StrictRedisCluster constructor. +The later example always overrides if explicitly set. Otherwise, it falls back on the value passed to the RedisCluster constructor. diff --git a/examples/basic.py b/examples/basic.py index 4569a965..d64a277e 100644 --- a/examples/basic.py +++ b/examples/basic.py @@ -1,9 +1,9 @@ -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] # Note: decode_responses must be set to True when used with python3 -rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) +rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) rc.set("foo", "bar") diff --git a/examples/basic_password_protected.py b/examples/basic_password_protected.py index 0bd6a9e5..59d3ce0b 100644 --- a/examples/basic_password_protected.py +++ b/examples/basic_password_protected.py @@ -1,9 +1,9 @@ -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7100"}] # Note: decode_responses must be set to True when used with python3 -rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, password='password_is_protected') +rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True, password='password_is_protected') rc.set("foo", "bar") diff --git a/examples/from_url_password_protected.py b/examples/from_url_password_protected.py index e908b024..526aeec1 100644 --- a/examples/from_url_password_protected.py +++ b/examples/from_url_password_protected.py @@ -1,8 +1,8 @@ -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster url="redis://:R1NFTBWTE1@10.127.91.90:6572/0" -rc = StrictRedisCluster.from_url(url, skip_full_coverage_check=True) +rc = RedisCluster.from_url(url, skip_full_coverage_check=True) rc.set("foo", "bar") diff --git a/ptp-debug.py b/ptp-debug.py index 0c4b6e31..65d6dc4f 100644 --- a/ptp-debug.py +++ b/ptp-debug.py @@ -1,9 +1,9 @@ -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] # Note: decode_responses must be set to True when used with python3 -rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) -url_client = StrictRedisCluster.from_url('http://127.0.0.1:7000') +rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) +url_client = RedisCluster.from_url('http://127.0.0.1:7000') __import__('ptpdb').set_trace() diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 0b270efb..5024086d 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -4,13 +4,12 @@ import sys # Import shortcut -from .client import StrictRedisCluster, RedisCluster +from .client import RedisCluster from .pipeline import StrictClusterPipeline from .pubsub import ClusterPubSub # Monkey patch RedisCluster class into redis for easy access import redis -setattr(redis, "StrictRedisCluster", StrictRedisCluster) setattr(redis, "RedisCluster", RedisCluster) setattr(redis, "ClusterPubSub", ClusterPubSub) setattr(redis, "StrictClusterPipeline", StrictClusterPipeline) diff --git a/rediscluster/client.py b/rediscluster/client.py index 49046dde..e37178eb 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -33,16 +33,16 @@ parse_pubsub_numpat, ) # 3rd party imports -from redis import StrictRedis +from redis import Redis from redis.client import list_or_args, parse_info from redis.connection import Token from redis._compat import iteritems, basestring, b, izip, nativestr, long from redis.exceptions import RedisError, ResponseError, TimeoutError, DataError, ConnectionError, BusyLoadingError -class StrictRedisCluster(StrictRedis): +class RedisCluster(Redis): """ - If a command is implemented over the one in StrictRedis then it requires some changes compared to + If a command is implemented over the one in Redis then it requires some changes compared to the regular implementation of the method. """ RedisClusterRequestTTL = 16 @@ -172,13 +172,13 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non it was operating on. This will allow the client to drift along side the cluster if the cluster nodes move around alot. :**kwargs: - Extra arguments that will be sent into StrictRedis instance when created + Extra arguments that will be sent into Redis instance when created (See Official redis-py doc for supported kwargs [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py]) Some kwargs is not supported and will raise RedisClusterException - db (Redis do not support database SELECT in cluster mode) """ - # Tweaks to StrictRedis client arguments when running in cluster mode + # Tweaks to Redis client arguments when running in cluster mode if "db" in kwargs: raise RedisClusterException("Argument 'db' is not possible to use in cluster mode") @@ -213,7 +213,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non **kwargs ) - super(StrictRedisCluster, self).__init__(connection_pool=pool, **kwargs) + super(RedisCluster, self).__init__(connection_pool=pool, **kwargs) self.refresh_table_asap = False self.nodes_flags = self.__class__.NODES_FLAGS.copy() @@ -293,7 +293,7 @@ def transaction(self, *args, **kwargs): """ Transaction is not implemented in cluster mode yet. """ - raise RedisClusterException("method StrictRedisCluster.transaction() is not implemented") + raise RedisClusterException("method RedisCluster.transaction() is not implemented") def _determine_slot(self, *args): """ @@ -716,7 +716,7 @@ def mget(self, keys, *args): Cluster impl: Itterate all keys and send GET for each key. - This will go alot slower than a normal mget call in StrictRedis. + This will go alot slower than a normal mget call in Redis. Operation is no longer atomic. """ @@ -815,7 +815,7 @@ def delete(self, *names): Cluster impl: Iterate all keys and send DELETE for each key. - This will go a lot slower than a normal delete call in StrictRedis. + This will go a lot slower than a normal delete call in Redis. Operation is no longer atomic. """ @@ -1245,91 +1245,4 @@ def _random_id(self, size=16, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) -class RedisCluster(StrictRedisCluster): - """ - Provides backwards compatibility with older versions of redis-py that - changed arguments to some commands to be more Pythonic, sane, or by - accident. - """ - # Overridden callbacks - RESPONSE_CALLBACKS = dict_merge( - StrictRedis.RESPONSE_CALLBACKS, - { - 'TTL': lambda r: r >= 0 and r or None, - 'PTTL': lambda r: r >= 0 and r or None, - } - ) - - def pipeline(self, transaction=True, shard_hint=None): - """ - Return a new pipeline object that can queue multiple commands for - later execution. ``transaction`` indicates whether all commands - should be executed atomically. Apart from making a group of operations - atomic, pipelines are useful for reducing the back-and-forth overhead - between the client and server. - """ - if shard_hint: - raise RedisClusterException("shard_hint is deprecated in cluster mode") - - if transaction: - raise RedisClusterException("transaction is deprecated in cluster mode") - - return StrictClusterPipeline( - connection_pool=self.connection_pool, - startup_nodes=self.connection_pool.nodes.startup_nodes, - response_callbacks=self.response_callbacks - ) - - def setex(self, name, value, time): - """ - Set the value of key ``name`` to ``value`` that expires in ``time`` - seconds. ``time`` can be represented by an integer or a Python - timedelta object. - """ - if isinstance(time, datetime.timedelta): - time = time.seconds + time.days * 24 * 3600 - - return self.execute_command('SETEX', name, time, value) - - def lrem(self, name, value, num=0): - """ - Remove the first ``num`` occurrences of elements equal to ``value`` - from the list stored at ``name``. - The ``num`` argument influences the operation in the following ways: - num > 0: Remove elements equal to value moving from head to tail. - num < 0: Remove elements equal to value moving from tail to head. - num = 0: Remove all elements equal to value. - """ - return self.execute_command('LREM', name, num, value) - - def zadd(self, name, *args, **kwargs): - """ - NOTE: The order of arguments differs from that of the official ZADD - command. For backwards compatability, this method accepts arguments - in the form of name1, score1, name2, score2, while the official Redis - documents expects score1, name1, score2, name2. - If you're looking to use the standard syntax, consider using the - StrictRedis class. See the API Reference section of the docs for more - information. - Set any number of element-name, score pairs to the key ``name``. Pairs - can be specified in two ways: - As *args, in the form of: name1, score1, name2, score2, ... - or as **kwargs, in the form of: name1=score1, name2=score2, ... - The following example would add four values to the 'my-key' key: - redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4) - """ - pieces = [] - - if args: - if len(args) % 2 != 0: - raise RedisError("ZADD requires an equal number of values and scores") - pieces.extend(reversed(args)) - - for pair in iteritems(kwargs): - pieces.append(pair[1]) - pieces.append(pair[0]) - - return self.execute_command('ZADD', name, *pieces) - - from rediscluster.pipeline import StrictClusterPipeline diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 703cb2ae..ac6abc74 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -64,7 +64,7 @@ class SSLClusterConnection(SSLConnection): Manages TCP communication over TLS/SSL to and from a Redis cluster Usage: pool = ClusterConnectionPool(connection_class=SSLClusterConnection, ...) - client = StrictRedisCluster(connection_pool=pool) + client = RedisCluster(connection_pool=pool) """ description_format = "SSLClusterConnection" @@ -130,7 +130,7 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No self.max_connections_per_node = max_connections_per_node if connection_class == SSLClusterConnection: - connection_kwargs['ssl'] = True # needed in StrictRedis init + connection_kwargs['ssl'] = True # needed in Redis init self.nodes = NodeManager( startup_nodes, diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index b16877d1..ed936c94 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -8,7 +8,7 @@ from .exceptions import RedisClusterException # 3rd party imports -from redis import StrictRedis +from redis import Redis from redis._compat import b, unicode, bytes, long, basestring from redis import ConnectionError, TimeoutError, ResponseError @@ -149,7 +149,7 @@ def get_redis_link(self, host, port, decode_responses=False): 'decode_responses', ) connection_kwargs = {k: v for k, v in self.connection_kwargs.items() if k in set(allowed_keys) - set(disabled_keys)} - return StrictRedis(host=host, port=port, decode_responses=decode_responses, **connection_kwargs) + return Redis(host=host, port=port, decode_responses=decode_responses, **connection_kwargs) def initialize(self): """ @@ -191,7 +191,7 @@ def initialize(self): if (len(cluster_slots) == 1 and len(cluster_slots[0][2][0]) == 0 and len(self.startup_nodes) == 1): cluster_slots[0][2][0] = self.startup_nodes[0]['host'] - # No need to decode response because StrictRedis should handle that for us... + # No need to decode response because Redis should handle that for us... for slot in cluster_slots: master_node = slot[2] diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index f6a21491..2e16dac7 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -4,14 +4,14 @@ import sys # rediscluster imports -from .client import StrictRedisCluster +from .client import RedisCluster from .exceptions import ( RedisClusterException, AskError, MovedError, TryAgainError, ) from .utils import clusterdown_wrapper, dict_merge # 3rd party imports -from redis import StrictRedis +from redis import Redis from redis.exceptions import ConnectionError, RedisError, TimeoutError from redis._compat import imap, unicode @@ -19,7 +19,7 @@ ERRORS_ALLOW_RETRY = (ConnectionError, TimeoutError, MovedError, AskError, TryAgainError) -class StrictClusterPipeline(StrictRedisCluster): +class ClusterPipeline(RedisCluster): """ """ @@ -296,66 +296,66 @@ def inner(*args, **kwargs): # Blocked pipeline commands -StrictClusterPipeline.bgrewriteaof = block_pipeline_command(StrictRedis.bgrewriteaof) -StrictClusterPipeline.bgsave = block_pipeline_command(StrictRedis.bgsave) -StrictClusterPipeline.bitop = block_pipeline_command(StrictRedis.bitop) -StrictClusterPipeline.brpoplpush = block_pipeline_command(StrictRedis.brpoplpush) -StrictClusterPipeline.client_getname = block_pipeline_command(StrictRedis.client_getname) -StrictClusterPipeline.client_kill = block_pipeline_command(StrictRedis.client_kill) -StrictClusterPipeline.client_list = block_pipeline_command(StrictRedis.client_list) -StrictClusterPipeline.client_setname = block_pipeline_command(StrictRedis.client_setname) -StrictClusterPipeline.config_get = block_pipeline_command(StrictRedis.config_get) -StrictClusterPipeline.config_resetstat = block_pipeline_command(StrictRedis.config_resetstat) -StrictClusterPipeline.config_rewrite = block_pipeline_command(StrictRedis.config_rewrite) -StrictClusterPipeline.config_set = block_pipeline_command(StrictRedis.config_set) -StrictClusterPipeline.dbsize = block_pipeline_command(StrictRedis.dbsize) -StrictClusterPipeline.echo = block_pipeline_command(StrictRedis.echo) -StrictClusterPipeline.evalsha = block_pipeline_command(StrictRedis.evalsha) -StrictClusterPipeline.flushall = block_pipeline_command(StrictRedis.flushall) -StrictClusterPipeline.flushdb = block_pipeline_command(StrictRedis.flushdb) -StrictClusterPipeline.info = block_pipeline_command(StrictRedis.info) -StrictClusterPipeline.keys = block_pipeline_command(StrictRedis.keys) -StrictClusterPipeline.lastsave = block_pipeline_command(StrictRedis.lastsave) -StrictClusterPipeline.mget = block_pipeline_command(StrictRedis.mget) -StrictClusterPipeline.move = block_pipeline_command(StrictRedis.move) -StrictClusterPipeline.mset = block_pipeline_command(StrictRedis.mset) -StrictClusterPipeline.msetnx = block_pipeline_command(StrictRedis.msetnx) -StrictClusterPipeline.pfmerge = block_pipeline_command(StrictRedis.pfmerge) -StrictClusterPipeline.pfcount = block_pipeline_command(StrictRedis.pfcount) -StrictClusterPipeline.ping = block_pipeline_command(StrictRedis.ping) -StrictClusterPipeline.publish = block_pipeline_command(StrictRedis.publish) -StrictClusterPipeline.randomkey = block_pipeline_command(StrictRedis.randomkey) -StrictClusterPipeline.rename = block_pipeline_command(StrictRedis.rename) -StrictClusterPipeline.renamenx = block_pipeline_command(StrictRedis.renamenx) -StrictClusterPipeline.rpoplpush = block_pipeline_command(StrictRedis.rpoplpush) -StrictClusterPipeline.save = block_pipeline_command(StrictRedis.save) -StrictClusterPipeline.scan = block_pipeline_command(StrictRedis.scan) -StrictClusterPipeline.script_exists = block_pipeline_command(StrictRedis.script_exists) -StrictClusterPipeline.script_flush = block_pipeline_command(StrictRedis.script_flush) -StrictClusterPipeline.script_kill = block_pipeline_command(StrictRedis.script_kill) -StrictClusterPipeline.script_load = block_pipeline_command(StrictRedis.script_load) -StrictClusterPipeline.sdiff = block_pipeline_command(StrictRedis.sdiff) -StrictClusterPipeline.sdiffstore = block_pipeline_command(StrictRedis.sdiffstore) -StrictClusterPipeline.sentinel_get_master_addr_by_name = block_pipeline_command(StrictRedis.sentinel_get_master_addr_by_name) -StrictClusterPipeline.sentinel_master = block_pipeline_command(StrictRedis.sentinel_master) -StrictClusterPipeline.sentinel_masters = block_pipeline_command(StrictRedis.sentinel_masters) -StrictClusterPipeline.sentinel_monitor = block_pipeline_command(StrictRedis.sentinel_monitor) -StrictClusterPipeline.sentinel_remove = block_pipeline_command(StrictRedis.sentinel_remove) -StrictClusterPipeline.sentinel_sentinels = block_pipeline_command(StrictRedis.sentinel_sentinels) -StrictClusterPipeline.sentinel_set = block_pipeline_command(StrictRedis.sentinel_set) -StrictClusterPipeline.sentinel_slaves = block_pipeline_command(StrictRedis.sentinel_slaves) -StrictClusterPipeline.shutdown = block_pipeline_command(StrictRedis.shutdown) -StrictClusterPipeline.sinter = block_pipeline_command(StrictRedis.sinter) -StrictClusterPipeline.sinterstore = block_pipeline_command(StrictRedis.sinterstore) -StrictClusterPipeline.slaveof = block_pipeline_command(StrictRedis.slaveof) -StrictClusterPipeline.slowlog_get = block_pipeline_command(StrictRedis.slowlog_get) -StrictClusterPipeline.slowlog_len = block_pipeline_command(StrictRedis.slowlog_len) -StrictClusterPipeline.slowlog_reset = block_pipeline_command(StrictRedis.slowlog_reset) -StrictClusterPipeline.smove = block_pipeline_command(StrictRedis.smove) -StrictClusterPipeline.sort = block_pipeline_command(StrictRedis.sort) -StrictClusterPipeline.sunion = block_pipeline_command(StrictRedis.sunion) -StrictClusterPipeline.sunionstore = block_pipeline_command(StrictRedis.sunionstore) -StrictClusterPipeline.time = block_pipeline_command(StrictRedis.time) +StrictClusterPipeline.bgrewriteaof = block_pipeline_command(Redis.bgrewriteaof) +StrictClusterPipeline.bgsave = block_pipeline_command(Redis.bgsave) +StrictClusterPipeline.bitop = block_pipeline_command(Redis.bitop) +StrictClusterPipeline.brpoplpush = block_pipeline_command(Redis.brpoplpush) +StrictClusterPipeline.client_getname = block_pipeline_command(Redis.client_getname) +StrictClusterPipeline.client_kill = block_pipeline_command(Redis.client_kill) +StrictClusterPipeline.client_list = block_pipeline_command(Redis.client_list) +StrictClusterPipeline.client_setname = block_pipeline_command(Redis.client_setname) +StrictClusterPipeline.config_get = block_pipeline_command(Redis.config_get) +StrictClusterPipeline.config_resetstat = block_pipeline_command(Redis.config_resetstat) +StrictClusterPipeline.config_rewrite = block_pipeline_command(Redis.config_rewrite) +StrictClusterPipeline.config_set = block_pipeline_command(Redis.config_set) +StrictClusterPipeline.dbsize = block_pipeline_command(Redis.dbsize) +StrictClusterPipeline.echo = block_pipeline_command(Redis.echo) +StrictClusterPipeline.evalsha = block_pipeline_command(Redis.evalsha) +StrictClusterPipeline.flushall = block_pipeline_command(Redis.flushall) +StrictClusterPipeline.flushdb = block_pipeline_command(Redis.flushdb) +StrictClusterPipeline.info = block_pipeline_command(Redis.info) +StrictClusterPipeline.keys = block_pipeline_command(Redis.keys) +StrictClusterPipeline.lastsave = block_pipeline_command(Redis.lastsave) +StrictClusterPipeline.mget = block_pipeline_command(Redis.mget) +StrictClusterPipeline.move = block_pipeline_command(Redis.move) +StrictClusterPipeline.mset = block_pipeline_command(Redis.mset) +StrictClusterPipeline.msetnx = block_pipeline_command(Redis.msetnx) +StrictClusterPipeline.pfmerge = block_pipeline_command(Redis.pfmerge) +StrictClusterPipeline.pfcount = block_pipeline_command(Redis.pfcount) +StrictClusterPipeline.ping = block_pipeline_command(Redis.ping) +StrictClusterPipeline.publish = block_pipeline_command(Redis.publish) +StrictClusterPipeline.randomkey = block_pipeline_command(Redis.randomkey) +StrictClusterPipeline.rename = block_pipeline_command(Redis.rename) +StrictClusterPipeline.renamenx = block_pipeline_command(Redis.renamenx) +StrictClusterPipeline.rpoplpush = block_pipeline_command(Redis.rpoplpush) +StrictClusterPipeline.save = block_pipeline_command(Redis.save) +StrictClusterPipeline.scan = block_pipeline_command(Redis.scan) +StrictClusterPipeline.script_exists = block_pipeline_command(Redis.script_exists) +StrictClusterPipeline.script_flush = block_pipeline_command(Redis.script_flush) +StrictClusterPipeline.script_kill = block_pipeline_command(Redis.script_kill) +StrictClusterPipeline.script_load = block_pipeline_command(Redis.script_load) +StrictClusterPipeline.sdiff = block_pipeline_command(Redis.sdiff) +StrictClusterPipeline.sdiffstore = block_pipeline_command(Redis.sdiffstore) +StrictClusterPipeline.sentinel_get_master_addr_by_name = block_pipeline_command(Redis.sentinel_get_master_addr_by_name) +StrictClusterPipeline.sentinel_master = block_pipeline_command(Redis.sentinel_master) +StrictClusterPipeline.sentinel_masters = block_pipeline_command(Redis.sentinel_masters) +StrictClusterPipeline.sentinel_monitor = block_pipeline_command(Redis.sentinel_monitor) +StrictClusterPipeline.sentinel_remove = block_pipeline_command(Redis.sentinel_remove) +StrictClusterPipeline.sentinel_sentinels = block_pipeline_command(Redis.sentinel_sentinels) +StrictClusterPipeline.sentinel_set = block_pipeline_command(Redis.sentinel_set) +StrictClusterPipeline.sentinel_slaves = block_pipeline_command(Redis.sentinel_slaves) +StrictClusterPipeline.shutdown = block_pipeline_command(Redis.shutdown) +StrictClusterPipeline.sinter = block_pipeline_command(Redis.sinter) +StrictClusterPipeline.sinterstore = block_pipeline_command(Redis.sinterstore) +StrictClusterPipeline.slaveof = block_pipeline_command(Redis.slaveof) +StrictClusterPipeline.slowlog_get = block_pipeline_command(Redis.slowlog_get) +StrictClusterPipeline.slowlog_len = block_pipeline_command(Redis.slowlog_len) +StrictClusterPipeline.slowlog_reset = block_pipeline_command(Redis.slowlog_reset) +StrictClusterPipeline.smove = block_pipeline_command(Redis.smove) +StrictClusterPipeline.sort = block_pipeline_command(Redis.sort) +StrictClusterPipeline.sunion = block_pipeline_command(Redis.sunion) +StrictClusterPipeline.sunionstore = block_pipeline_command(Redis.sunionstore) +StrictClusterPipeline.time = block_pipeline_command(Redis.time) class PipelineCommand(object): @@ -391,7 +391,7 @@ def append(self, c): def write(self): """ - Code borrowed from StrictRedis so it can be fixed + Code borrowed from Redis so it can be fixed """ connection = self.connection commands = self.commands diff --git a/tests/conftest.py b/tests/conftest.py index 87cbc01c..f359ccaa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,11 +6,11 @@ import json # rediscluster imports -from rediscluster import StrictRedisCluster, RedisCluster +from rediscluster import RedisCluster # 3rd party imports import pytest -from redis import StrictRedis +from redis import Redis from distutils.version import StrictVersion # put our path in front so we can be sure we are testing locally not against the global package @@ -100,33 +100,33 @@ def skip_if_redis_py_version_lt(min_version): @pytest.fixture() def o(request, *args, **kwargs): """ - Create a StrictRedisCluster instance with decode_responses set to True. + Create a RedisCluster instance with decode_responses set to True. """ - return _init_client(request, cls=StrictRedisCluster, decode_responses=True, **kwargs) + return _init_client(request, cls=RedisCluster, decode_responses=True, **kwargs) @pytest.fixture() def r(request, *args, **kwargs): """ - Create a StrictRedisCluster instance with default settings. + Create a RedisCluster instance with default settings. """ - return _init_client(request, cls=StrictRedisCluster, **kwargs) + return _init_client(request, cls=RedisCluster, **kwargs) @pytest.fixture() def ro(request, *args, **kwargs): """ - Create a StrictRedisCluster instance with readonly mode + Create a RedisCluster instance with readonly mode """ params = {'readonly_mode': True} params.update(kwargs) - return _init_client(request, cls=StrictRedisCluster, **params) + return _init_client(request, cls=RedisCluster, **params) @pytest.fixture() def s(*args, **kwargs): """ - Create a StrictRedisCluster instance with 'init_slot_cache' set to false + Create a RedisCluster instance with 'init_slot_cache' set to false """ s = _get_client(init_slot_cache=False, **kwargs) assert s.connection_pool.nodes.slots == {} @@ -137,14 +137,14 @@ def s(*args, **kwargs): @pytest.fixture() def t(*args, **kwargs): """ - Create a regular StrictRedis object instance + Create a regular Redis object instance """ - return StrictRedis(*args, **kwargs) + return Redis(*args, **kwargs) @pytest.fixture() def sr(request, *args, **kwargs): """ - Returns a instance of StrictRedisCluster + Returns a instance of RedisCluster """ - return _init_client(request, reinitialize_steps=1, cls=StrictRedisCluster, **kwargs) + return _init_client(request, reinitialize_steps=1, cls=RedisCluster, **kwargs) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 45efe007..5a432c3e 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -446,7 +446,7 @@ def test_calling_from_subclass_returns_correct_instance(self): assert isinstance(pool, redis.BlockingConnectionPool) def test_client_creates_connection_pool(self): - r = redis.StrictRedis.from_url('redis://myhost') + r = redis.Redis.from_url('redis://myhost') assert r.connection_pool.connection_class == redis.Connection assert r.connection_pool.connection_kwargs == { 'host': 'myhost', diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index db560bf3..34bf0c5b 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -6,7 +6,7 @@ import time # rediscluster imports -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool from rediscluster.exceptions import ( RedisClusterException, MovedError, AskError, ClusterDownError, @@ -17,7 +17,7 @@ # 3rd party imports from mock import patch, Mock, MagicMock from redis._compat import b, unicode -from redis import StrictRedis +from redis import Redis import pytest pytestmark = skip_if_server_version_lt('2.9.0') @@ -32,7 +32,7 @@ class DummyConnection(object): def test_representation(r): - assert re.search('^StrictRedisCluster<[0-9\.\:\,].+>$', str(r)) + assert re.search('^RedisCluster<[0-9\.\:\,].+>$', str(r)) def test_blocked_strict_redis_args(): @@ -40,7 +40,7 @@ def test_blocked_strict_redis_args(): Some arguments should explicitly be blocked because they will not work in a cluster setup """ params = {'startup_nodes': [{'host': '127.0.0.1', 'port': 7000}]} - c = StrictRedisCluster(**params) + c = RedisCluster(**params) assert c.connection_pool.connection_kwargs["socket_timeout"] == ClusterConnectionPool.RedisClusterDefaultTimeout with pytest.raises(RedisClusterException) as ex: @@ -72,7 +72,7 @@ def test_host_port_startup_node(): """ h = "192.168.0.1" p = 7000 - c = StrictRedisCluster(host=h, port=p, init_slot_cache=False) + c = RedisCluster(host=h, port=p, init_slot_cache=False) assert {"host": h, "port": p} in c.connection_pool.nodes.startup_nodes @@ -95,25 +95,25 @@ def test_readonly_instance(ro): def test_custom_connectionpool(): """ - Test that a custom connection pool will be used by StrictRedisCluster + Test that a custom connection pool will be used by RedisCluster """ h = "192.168.0.1" p = 7001 pool = DummyConnectionPool(host=h, port=p, connection_class=DummyConnection, startup_nodes=[{'host': h, 'port': p}], init_slot_cache=False) - c = StrictRedisCluster(connection_pool=pool, init_slot_cache=False) + c = RedisCluster(connection_pool=pool, init_slot_cache=False) assert c.connection_pool is pool assert c.connection_pool.connection_class == DummyConnection assert {"host": h, "port": p} in c.connection_pool.nodes.startup_nodes -@patch('rediscluster.nodemanager.StrictRedis', new=MagicMock()) +@patch('rediscluster.nodemanager.Redis', new=MagicMock()) def test_skip_full_coverage_check(): """ Test if the cluster_require_full_coverage NodeManager method was not called with the flag activated """ - c = StrictRedisCluster("192.168.0.1", 7001, init_slot_cache=False, skip_full_coverage_check=True) + c = RedisCluster("192.168.0.1", 7001, init_slot_cache=False, skip_full_coverage_check=True) c.connection_pool.nodes.cluster_require_full_coverage = MagicMock() c.connection_pool.nodes.initialize() assert not c.connection_pool.nodes.cluster_require_full_coverage.called @@ -144,7 +144,7 @@ def test_blocked_transaction(r): """ with pytest.raises(RedisClusterException) as ex: r.transaction(None) - assert unicode(ex.value).startswith("method StrictRedisCluster.transaction() is not implemented"), unicode(ex.value) + assert unicode(ex.value).startswith("method RedisCluster.transaction() is not implemented"), unicode(ex.value) def test_cluster_of_one_instance(): @@ -153,10 +153,10 @@ def test_cluster_of_one_instance(): one server. There is another redis server joining the cluster, hold slot 0, and - eventually quit the cluster. The StrictRedisCluster instance may get confused + eventually quit the cluster. The RedisCluster instance may get confused when slots mapping and nodes change during the test. """ - with patch.object(StrictRedisCluster, 'parse_response') as parse_response_mock: + with patch.object(RedisCluster, 'parse_response') as parse_response_mock: with patch.object(NodeManager, 'initialize', autospec=True) as init_mock: def side_effect(self, *args, **kwargs): def ok_call(self, *args, **kwargs): @@ -198,7 +198,7 @@ def map_7007(self): parse_response_mock.side_effect = side_effect init_mock.side_effect = side_effect_rebuild_slots_cache - rc = StrictRedisCluster(host='127.0.0.1', port=7006) + rc = RedisCluster(host='127.0.0.1', port=7006) rc.set("foo", "bar") @@ -226,12 +226,12 @@ def test_refresh_table_asap(): mock_initialize.return_value = None # Patch parse_response to avoid issues when the cluster sometimes return MOVED - with patch.object(StrictRedisCluster, 'parse_response') as mock_parse_response: + with patch.object(RedisCluster, 'parse_response') as mock_parse_response: def side_effect(self, *args, **kwargs): return None mock_parse_response.side_effect = side_effect - r = StrictRedisCluster(host="127.0.0.1", port=7000) + r = RedisCluster(host="127.0.0.1", port=7000) r.connection_pool.nodes.slots[12182] = [{ "host": "127.0.0.1", "port": 7002, @@ -261,14 +261,14 @@ def test_ask_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = StrictRedisCluster(host="127.0.0.1", port=7000) + r = RedisCluster(host="127.0.0.1", port=7000) r.connection_pool.nodes.nodes['127.0.0.1:7001'] = { 'host': u'127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001' } - with patch.object(StrictRedisCluster, + with patch.object(RedisCluster, 'parse_response') as parse_response: host_ip = find_node_ip_based_on_port(r, '7001') @@ -296,8 +296,8 @@ def test_pipeline_ask_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = StrictRedisCluster(host="127.0.0.1", port=7000) - with patch.object(StrictRedisCluster, + r = RedisCluster(host="127.0.0.1", port=7000) + with patch.object(RedisCluster, 'parse_response') as parse_response: def response(connection, *args, **options): @@ -329,7 +329,7 @@ def test_moved_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = StrictRedisCluster(host="127.0.0.1", port=7000) + r = RedisCluster(host="127.0.0.1", port=7000) m = Mock(autospec=True) def ask_redirect_effect(connection, *args, **options): @@ -356,7 +356,7 @@ def test_moved_redirection_pipeline(): Important thing to verify is that it tries to talk to the second node. """ - with patch.object(StrictRedisCluster, 'parse_response') as parse_response: + with patch.object(RedisCluster, 'parse_response') as parse_response: def moved_redirect_effect(connection, *args, **options): def ok_response(connection, *args, **options): assert connection.host == "127.0.0.1" @@ -368,7 +368,7 @@ def ok_response(connection, *args, **options): parse_response.side_effect = moved_redirect_effect - r = StrictRedisCluster(host="127.0.0.1", port=7000) + r = RedisCluster(host="127.0.0.1", port=7000) p = r.pipeline() p.set("foo", "bar") assert p.execute() == ["MOCK_OK"] @@ -404,7 +404,7 @@ def test_moved_redirection_on_slave_with_default_client(sr): assert_moved_redirection_on_slave( sr, ClusterConnectionPool, - StrictRedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) + RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) ) @@ -415,7 +415,7 @@ def test_moved_redirection_on_slave_with_readonly_mode_client(sr): assert_moved_redirection_on_slave( sr, ClusterReadOnlyConnectionPool, - StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) + RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) ) @@ -443,10 +443,10 @@ def test_access_correct_slave_with_readonly_mode_client(sr): ClusterConnectionPool, 'get_master_node_by_slot', return_value=master_value) as return_master_mock: - readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) + readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') - readonly_client = StrictRedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) + readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') @@ -455,7 +455,7 @@ def test_refresh_using_specific_nodes(r): Test making calls on specific nodes when the cluster has failed over to another node """ - with patch.object(StrictRedisCluster, 'parse_response') as parse_response_mock: + with patch.object(RedisCluster, 'parse_response') as parse_response_mock: with patch.object(NodeManager, 'initialize', autospec=True) as init_mock: # simulate 7006 as a failed node def side_effect(self, *args, **kwargs): @@ -498,7 +498,7 @@ def map_7007(self): init_mock.side_effect = side_effect_rebuild_slots_cache - rc = StrictRedisCluster(host='127.0.0.1', port=7006) + rc = RedisCluster(host='127.0.0.1', port=7006) assert len(rc.connection_pool.nodes.nodes) == 1 assert '127.0.0.1:7006' in rc.connection_pool.nodes.nodes diff --git a/tests/test_node_manager.py b/tests/test_node_manager.py index 52bfb367..5972d59d 100644 --- a/tests/test_node_manager.py +++ b/tests/test_node_manager.py @@ -5,14 +5,14 @@ # rediscluster imports from tests.conftest import skip_if_server_version_lt -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster from rediscluster.exceptions import RedisClusterException from rediscluster.nodemanager import NodeManager # 3rd party imports import pytest from mock import patch, Mock -from redis import StrictRedis +from redis import Redis from redis._compat import unicode from redis import ConnectionError @@ -57,7 +57,7 @@ def test_init_slots_cache_not_all_slots(s): """ # Create wrapper function so we can inject custom 'CLUSTER SLOTS' command result def get_redis_link_wrapper(*args, **kwargs): - link = StrictRedis(host="127.0.0.1", port=7000, decode_responses=True) + link = Redis(host="127.0.0.1", port=7000, decode_responses=True) orig_exec_method = link.execute_command @@ -91,7 +91,7 @@ def test_init_slots_cache_not_all_slots_not_require_full_coverage(s): """ # Create wrapper function so we can inject custom 'CLUSTER SLOTS' command result def get_redis_link_wrapper(*args, **kwargs): - link = StrictRedis(host="127.0.0.1", port=7000, decode_responses=True) + link = Redis(host="127.0.0.1", port=7000, decode_responses=True) orig_exec_method = link.execute_command @@ -130,7 +130,7 @@ def test_init_slots_cache(s): [10923, 16383, [b'127.0.0.1', 7002], [b'127.0.0.2', 7005]], ] - with patch.object(StrictRedis, 'execute_command') as execute_command_mock: + with patch.object(Redis, 'execute_command') as execute_command_mock: def patch_execute_command(*args, **kwargs): if args == ('CONFIG GET', 'cluster-require-full-coverage'): return {'cluster-require-full-coverage': 'yes'} @@ -202,7 +202,7 @@ def monkey_link(host=None, port=None, *args, **kwargs): else: result = [] - r = StrictRedisCluster(host=host, port=port, decode_responses=True) + r = RedisCluster(host=host, port=port, decode_responses=True) orig_execute_command = r.execute_command def execute_command(*args, **kwargs): @@ -279,7 +279,7 @@ def test_cluster_slots_error(): Check that exception is raised if initialize can't execute 'CLUSTER SLOTS' command. """ - with patch.object(StrictRedisCluster, 'execute_command') as execute_command_mock: + with patch.object(RedisCluster, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = Exception("foobar") n = NodeManager(startup_nodes=[{}]) @@ -321,7 +321,7 @@ def test_cluster_one_instance(): If the cluster exists of only 1 node then there is some hacks that must be validated they work. """ - with patch.object(StrictRedis, 'execute_command') as mock_execute_command: + with patch.object(Redis, 'execute_command') as mock_execute_command: return_data = [[0, 16383, ['', 7006]]] def patch_execute_command(*args, **kwargs): @@ -367,7 +367,7 @@ def test_init_with_down_node(): def get_redis_link(host, port, decode_responses=False): if port == 7000: raise ConnectionError('mock connection error for 7000') - return StrictRedis(host=host, port=port, decode_responses=decode_responses) + return Redis(host=host, port=port, decode_responses=decode_responses) with patch.object(NodeManager, 'get_redis_link', side_effect=get_redis_link): n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 2f034d6b..acbc6172 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -5,7 +5,7 @@ import re # rediscluster imports -from rediscluster.client import StrictRedisCluster +from rediscluster.client import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool from rediscluster.exceptions import RedisClusterException from tests.conftest import _get_client @@ -527,7 +527,7 @@ def test_moved_redirection_on_slave_with_default(self): """ self.assert_moved_redirection_on_slave( ClusterConnectionPool, - StrictRedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) + RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) ) def test_moved_redirection_on_slave_with_readonly_mode_client(self): @@ -536,7 +536,7 @@ def test_moved_redirection_on_slave_with_readonly_mode_client(self): """ self.assert_moved_redirection_on_slave( ClusterReadOnlyConnectionPool, - StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) + RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) ) def test_access_correct_slave_with_readonly_mode_client(self, sr): @@ -564,6 +564,6 @@ def test_access_correct_slave_with_readonly_mode_client(self, sr): ClusterConnectionPool, 'get_master_node_by_slot', return_value=master_value) as return_master_mock: - readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) + readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) with readonly_client.pipeline() as readonly_pipe: assert readonly_pipe.get('foo88').get('foo87').execute() == [b('bar'), b('foo')] diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 686b55a3..4bbecfdc 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -6,13 +6,13 @@ import time # rediscluster imports -from rediscluster.client import StrictRedisCluster +from rediscluster.client import RedisCluster # 3rd party imports import pytest # import redis -from redis import StrictRedis, Redis +from redis import Redis from redis.exceptions import ConnectionError from redis._compat import basestring, u, unichr, b @@ -221,12 +221,12 @@ class TestPubSubMessages(object): Bug: Currently in cluster mode publish command will behave different then in standard/non cluster mode. See (docs/Pubsub.md) for details. - Currently StrictRedis instances will be used to test pubsub because they + Currently Redis instances will be used to test pubsub because they are easier to work with. """ def get_strict_redis_node(self, port, host="127.0.0.1"): - return StrictRedis(port=port, host=host) + return Redis(port=port, host=host) def setup_method(self, *args): self.message = None @@ -444,7 +444,7 @@ def test_pubsub_thread_publish(): """ startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] - r = StrictRedisCluster( + r = RedisCluster( startup_nodes=startup_nodes, decode_responses=True, max_connections=16, From f73676d010601e02bf25ef7cd4e95ec80b831fe5 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:23:29 +0100 Subject: [PATCH 062/263] Update __init__ to use new helper method to build VERSION attribute --- rediscluster/__init__.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 5024086d..bd9c5437 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -17,5 +17,15 @@ # Major, Minor, Fix version __version__ = (2, 0, 0) +def int_or_str(value): + try: + return int(value) + except ValueError: + return value + + +__version__ = '2.0.0' +VERSION = tuple(map(int_or_str, __version__.split('.'))) + if sys.version_info[0:3] == (3, 4, 0): raise RuntimeError("CRITICAL: rediscluster do not work with python 3.4.0. Please use 3.4.1 or higher.") From 82c650151d31a0e8c6a9a2f1c3ce6059c6b8578a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:23:55 +0100 Subject: [PATCH 063/263] Update test_scripting.py to match redis-py code --- tests/test_scripting.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/test_scripting.py b/tests/test_scripting.py index 1dd14d50..968fdc61 100644 --- a/tests/test_scripting.py +++ b/tests/test_scripting.py @@ -1,14 +1,13 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement +from __future__ import unicode_literals # rediscluster imports from rediscluster.exceptions import RedisClusterException # 3rd party imports from redis import exceptions -from redis._compat import b import pytest @@ -114,7 +113,7 @@ def test_script_object_in_pipeline(self, r): assert multiply.sha assert r.script_exists(multiply.sha) == [True] # [SET worked, GET 'a', result of multiple script] - assert pipe.execute() == [True, b('2'), 6] + assert pipe.execute() == [True, b'2', 6] # purge the script from redis's cache and re-run the pipeline # the multiply script object knows it's sha, so it shouldn't get @@ -127,7 +126,7 @@ def test_script_object_in_pipeline(self, r): multiply(keys=['a'], args=[3], client=pipe) assert r.script_exists(multiply.sha) == [False] # [SET worked, GET 'a', result of multiple script] - assert pipe.execute() == [True, b('2'), 6] + assert pipe.execute() == [True, b'2', 6] @pytest.mark.xfail(reason="Not Yet Implemented") def test_eval_msgpack_pipeline_error_in_lua(self, r): From 081b3ac37ba0d02bfac11ed0e588924e14658c2d Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:32:12 +0100 Subject: [PATCH 064/263] Update test_pubsub.py to match redis-py code --- tests/test_pubsub.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 4bbecfdc..e0ea2837 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement +from __future__ import unicode_literals import threading import time @@ -14,8 +14,9 @@ # import redis from redis import Redis from redis.exceptions import ConnectionError -from redis._compat import basestring, u, unichr, b +from redis._compat import basestring, unichr +from .conftest import _get_client from .conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt def wait_for_message(pubsub, timeout=0.5, ignore_subscribe_messages=False): @@ -35,7 +36,7 @@ def make_message(type, channel, data, pattern=None): return { 'type': type, 'pattern': pattern and pattern.encode('utf-8') or None, - 'channel': channel.encode('utf-8'), + 'channel': channel and channel.encode('utf-8') or None, 'data': data.encode('utf-8') if isinstance(data, basestring) else data } @@ -48,7 +49,7 @@ def make_subscribe_test_data(pubsub, type): 'unsub_type': 'unsubscribe', 'sub_func': pubsub.subscribe, 'unsub_func': pubsub.unsubscribe, - 'keys': ['foo', 'bar', u('uni') + unichr(4456) + u('code')] + 'keys': ['foo', 'bar', 'uni' + unichr(4456) + 'code'] } elif type == 'pattern': return { @@ -57,7 +58,7 @@ def make_subscribe_test_data(pubsub, type): 'unsub_type': 'punsubscribe', 'sub_func': pubsub.psubscribe, 'unsub_func': pubsub.punsubscribe, - 'keys': ['f*', 'b*', u('uni') + unichr(4456) + u('*')] + 'keys': ['f*', 'b*', 'uni' + unichr(4456) + '*'] } assert False, 'invalid subscribe type: {0}'.format(type) @@ -309,7 +310,7 @@ def test_pattern_message_handler(self, r): @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_unicode_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) - channel = u('uni') + unichr(4456) + u('code') + channel = 'uni' + unichr(4456) + 'code' channels = {channel: self.message_handler} print(channels) p.subscribe(**channels) @@ -320,8 +321,8 @@ def test_unicode_channel_message_handler(self, r): @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_unicode_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) - pattern = u('uni') + unichr(4456) + u('*') - channel = u('uni') + unichr(4456) + u('code') + pattern = 'uni' + unichr(4456) + '*' + channel = 'uni' + unichr(4456) + 'code' p.psubscribe(**{pattern: self.message_handler}) assert r.publish(channel, 'test message') == 1 assert wait_for_message(p) is None @@ -332,9 +333,9 @@ def test_unicode_pattern_message_handler(self, r): class TestPubSubAutoDecoding(object): "These tests only validate that we get unicode values back" - channel = u('uni') + unichr(4456) + u('code') - pattern = u('uni') + unichr(4456) + u('*') - data = u('abc') + unichr(4458) + u('123') + channel = 'uni' + unichr(4456) + 'code' + pattern = 'uni' + unichr(4456) + '*' + data = 'abc' + unichr(4458) + '123' def make_message(self, type, channel, data, pattern=None): return { @@ -400,7 +401,7 @@ def test_channel_message_handler(self, o): # test that we reconnected to the correct channel p.connection.disconnect() assert wait_for_message(p) is None # should reconnect - new_data = self.data + u('new data') + new_data = self.data + 'new data' o.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('message', self.channel, @@ -419,7 +420,7 @@ def test_pattern_message_handler(self, o): # test that we reconnected to the correct pattern p.connection.disconnect() assert wait_for_message(p) is None # should reconnect - new_data = self.data + u('new data') + new_data = self.data + 'new data' o.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('pmessage', self.channel, @@ -487,7 +488,7 @@ class TestPubSubPubSubSubcommands(object): def test_pubsub_channels(self, r): r.pubsub(ignore_subscribe_messages=True).subscribe('foo', 'bar', 'baz', 'quux') channels = sorted(r.pubsub_channels()) - assert channels == [b('bar'), b('baz'), b('foo'), b('quux')] + assert channels == [b'bar', b'baz', b'foo', b'quux'] @skip_if_redis_py_version_lt('2.10.6') def test_pubsub_numsub(self, r): @@ -495,7 +496,7 @@ def test_pubsub_numsub(self, r): r.pubsub(ignore_subscribe_messages=True).subscribe('bar', 'baz') r.pubsub(ignore_subscribe_messages=True).subscribe('baz') - channels = [(b('bar'), 2), (b('baz'), 3), (b('foo'), 1)] + channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)] assert channels == sorted(r.pubsub_numsub('foo', 'bar', 'baz')) @skip_if_redis_py_version_lt('2.10.6') From 6afa8ad1db1c70220677ba1dfc6707c3de0d05b4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:34:04 +0100 Subject: [PATCH 065/263] Add python_requires to define what redis version to include/exclude. Add extras_require to support installing hiredis as extras dependency --- setup.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/setup.py b/setup.py index 8215d9fa..4d8fcca0 100644 --- a/setup.py +++ b/setup.py @@ -34,6 +34,12 @@ install_requires=[ 'redis>=3.0.0,<3.1.0' ], + python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", + extras_require={ + 'hiredis': [ + "hiredis>=0.1.3", + ], + }, keywords=[ 'redis', 'redis cluster', From f7b94b9196ee38de216b52d95fa19686c319fc47 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:36:38 +0100 Subject: [PATCH 066/263] Update years on docs/License file --- docs/License.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/License.txt b/docs/License.txt index 9c297fec..bf0afb13 100644 --- a/docs/License.txt +++ b/docs/License.txt @@ -1,4 +1,4 @@ -Copyright (c) 2014-2016 Johan Andersson +Copyright (c) 2014-2018 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation From 1709e1f6044766a4723c671887405f619c766595 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:37:14 +0100 Subject: [PATCH 067/263] Add LICENSE file to root of repo --- LICENSE | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..66ccb488 --- /dev/null +++ b/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2014-2018 Johan Andersson + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + \ No newline at end of file From 6eff6b51a55f87d1a2cb986cf9fc0703856f638c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:43:30 +0100 Subject: [PATCH 068/263] Add [metadata] and [pycodestyle] to setup.cfg to configure tools for future use. --- setup.cfg | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/setup.cfg b/setup.cfg index 3c6e79cf..85215dcb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,9 @@ [bdist_wheel] universal=1 + +[metadata] +license_file = LICENSE + +[pycodestyle] +show-source = 1 +exclude = .venv,.tox,dist,docs,build,*.egg From 7334ac8c6efa597281521e186196519ef506b44c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:54:04 +0100 Subject: [PATCH 069/263] Fix byte and unicode issues in test_pipeline.py --- tests/test_pipeline.py | 70 ++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index acbc6172..e8750439 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement +from __future__ import unicode_literals import re # rediscluster imports @@ -13,7 +13,7 @@ # 3rd party imports import pytest from mock import patch -from redis._compat import b, u, unichr, unicode +from redis._compat import unichr, unicode from redis.exceptions import WatchError, ResponseError, ConnectionError @@ -23,15 +23,19 @@ class TestPipeline(object): def test_pipeline(self, r): with r.pipeline() as pipe: - pipe.set('a', 'a1').get('a').zadd('z', z1=1).zadd('z', z2=4) - pipe.zincrby('z', 'z1').zrange('z', 0, 5, withscores=True) + (pipe.set('a', 'a1') + .get('a') + .zadd('z', {'z1': 1}) + .zadd('z', {'z2': 4}) + .zincrby('z', 1, 'z1') + .zrange('z', 0, 5, withscores=True)) assert pipe.execute() == [ True, - b('a1'), + b'a1', True, True, 2.0, - [(b('z1'), 2.0), (b('z2'), 4)], + [(b'z1', 2.0), (b'z2', 4)], ] def test_pipeline_length(self, r): @@ -54,18 +58,18 @@ def test_pipeline_no_transaction(self, r): with r.pipeline(transaction=False) as pipe: pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') assert pipe.execute() == [True, True, True] - assert r['a'] == b('a1') - assert r['b'] == b('b1') - assert r['c'] == b('c1') + assert r['a'] == b'a1' + assert r['b'] == b'b1' + assert r['c'] == b'c1' def test_pipeline_eval(self, r): with r.pipeline(transaction=False) as pipe: pipe.eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", 2, "A{foo}", "B{foo}", "first", "second") res = pipe.execute()[0] - assert res[0] == b('A{foo}') - assert res[1] == b('B{foo}') - assert res[2] == b('first') - assert res[3] == b('second') + assert res[0] == b'A{foo}' + assert res[1] == b'B{foo}' + assert res[2] == b'first' + assert res[3] == b'second' @pytest.mark.xfail(reason="unsupported command: watch") def test_pipeline_no_transaction_watch(self, r): @@ -95,7 +99,7 @@ def test_pipeline_no_transaction_watch_failure(self, r): with pytest.raises(WatchError): pipe.execute() - assert r['a'] == b('bad') + assert r['a'] == b'bad' def test_exec_error_in_response(self, r): """ @@ -108,23 +112,23 @@ def test_exec_error_in_response(self, r): result = pipe.execute(raise_on_error=False) assert result[0] - assert r['a'] == b('1') + assert r['a'] == b'1' assert result[1] - assert r['b'] == b('2') + assert r['b'] == b'2' # we can't lpush to a key that's a string value, so this should # be a ResponseError exception assert isinstance(result[2], ResponseError) - assert r['c'] == b('a') + assert r['c'] == 'a' # since this isn't a transaction, the other commands after the # error are still executed assert result[3] - assert r['d'] == b('4') + assert r['d'] == b'4' # make sure the pipe was restored to a working state assert pipe.set('z', 'zzz').execute() == [True] - assert r['z'] == b('zzz') + assert r['z'] == b'zzz' def test_exec_error_raised(self, r): r['c'] = 'a' @@ -137,7 +141,7 @@ def test_exec_error_raised(self, r): # make sure the pipe was restored to a working state assert pipe.set('z', 'zzz').execute() == [True] - assert r['z'] == b('zzz') + assert r['z'] == b'zzz' def test_parse_error_raised(self, r): with r.pipeline() as pipe: @@ -151,7 +155,7 @@ def test_parse_error_raised(self, r): # make sure the pipe was restored to a working state assert pipe.set('z', 'zzz').execute() == [True] - assert r['z'] == b('zzz') + assert r['z'] == b'zzz' @pytest.mark.xfail(reason="unsupported command: watch") def test_watch_succeed(self, r): @@ -163,8 +167,8 @@ def test_watch_succeed(self, r): assert pipe.watching a_value = pipe.get('a') b_value = pipe.get('b') - assert a_value == b('1') - assert b_value == b('2') + assert a_value == b'1' + assert b_value == b'2' pipe.multi() pipe.set('c', 3) @@ -197,7 +201,7 @@ def test_unwatch(self, r): pipe.unwatch() assert not pipe.watching pipe.get('a') - assert pipe.execute() == [b('1')] + assert pipe.execute() == [b'1'] @pytest.mark.xfail(reason="unsupported command: watch") def test_transaction_callable(self, r): @@ -207,9 +211,9 @@ def test_transaction_callable(self, r): def my_transaction(pipe): a_value = pipe.get('a') - assert a_value in (b('1'), b('2')) + assert a_value in (b'1', b'2') b_value = pipe.get('b') - assert b_value == b('2') + assert b_value == b'2' # silly run-once code... incr's "a" so WatchError should be raised # forcing this all to run again. this should incr "a" once to "2" @@ -222,7 +226,7 @@ def my_transaction(pipe): result = r.transaction(my_transaction, 'a', 'b') assert result == [True] - assert r['c'] == b('4') + assert r['c'] == b'4' def test_exec_error_in_no_transaction_pipeline(self, r): r['a'] = 1 @@ -236,10 +240,10 @@ def test_exec_error_in_no_transaction_pipeline(self, r): assert unicode(ex.value).startswith('Command # 1 (LLEN a) of ' 'pipeline caused error: ') - assert r['a'] == b('1') + assert r['a'] == b'1' def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): - key = unichr(3456) + u('abcd') + unichr(3421) + key = unichr(3456) + u'abcd' + unichr(3421) r[key] = 1 with r.pipeline(transaction=False) as pipe: pipe.llen(key) @@ -251,7 +255,7 @@ def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): expected = unicode('Command # 1 (LLEN {0}) of pipeline caused error: ').format(key) assert unicode(ex.value).startswith(expected) - assert r[key] == b('1') + assert r[key] == b'1' def test_blocked_methods(self, r): """ @@ -498,8 +502,8 @@ def test_pipeline_readonly(self, r, ro): with ro.pipeline() as readonly_pipe: readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True) assert readonly_pipe.execute() == [ - b('a1'), - [(b('z1'), 1.0), (b('z2'), 4)], + b'a1', + [(b'z1', 1.0), (b'z2', 4)], ] def assert_moved_redirection_on_slave(self, connection_pool_cls, cluster_obj): @@ -566,4 +570,4 @@ def test_access_correct_slave_with_readonly_mode_client(self, sr): return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) with readonly_client.pipeline() as readonly_pipe: - assert readonly_pipe.get('foo88').get('foo87').execute() == [b('bar'), b('foo')] + assert readonly_pipe.get('foo88').get('foo87').execute() == [b'bar', b'foo'] From 647b1ab6d00a1e9716e714c6ac41b405820fd3a7 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 16 Dec 2018 11:07:29 +0100 Subject: [PATCH 070/263] Fix all unicode and binary litterals in test_commands.py --- tests/test_commands.py | 451 ++++++++++++++++++++--------------------- 1 file changed, 224 insertions(+), 227 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 2e349f6b..fe251b14 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -31,13 +31,13 @@ class TestRedisCommands(object): @skip_if_server_version_lt('2.9.9') def test_zrevrangebylex(self, r): r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) - assert r.zrevrangebylex('a', '[c', '-') == [b('c'), b('b'), b('a')] - assert r.zrevrangebylex('a', '(c', '-') == [b('b'), b('a')] + assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a'] + assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a'] assert r.zrevrangebylex('a', '(g', '[aaa') == \ - [b('f'), b('e'), b('d'), b('c'), b('b')] - assert r.zrevrangebylex('a', '+', '[f') == [b('g'), b('f')] + [b'f', b'e', b'd', b'c', b'b'] + assert r.zrevrangebylex('a', '+', '[f') == [b'g', b'f'] assert r.zrevrangebylex('a', '+', '-', start=3, num=2) == \ - [b('d'), b('c')] + [b'd', b'c'] def test_command_on_invalid_key_type(self, r): r.lpush('a', '1') @@ -80,13 +80,13 @@ def test_config_set(self, r): def test_echo(self, r): for server, res in r.echo('foo bar').items(): - assert res == b('foo bar') + assert res == b'foo bar' def test_object(self, r): r['a'] = 'foo' assert isinstance(r.object('refcount', 'a'), int) assert isinstance(r.object('idletime', 'a'), int) - assert r.object('encoding', 'a') in (b('raw'), b('embstr')) + assert r.object('encoding', 'a') in (b'raw', b'embstr') assert r.object('idletime', 'invalid-key') is None def test_ping(self, r): @@ -101,9 +101,9 @@ def test_time(self, r): # BASIC KEY COMMANDS def test_append(self, r): assert r.append('a', 'a1') == 2 - assert r['a'] == b('a1') + assert r['a'] == b'a1' assert r.append('a', 'a2') == 4 - assert r['a'] == b('a1a2') + assert r['a'] == b'a1a2' def test_bitcount(self, r): r.setbit('a', 5, True) @@ -133,16 +133,13 @@ def test_bitop_not_supported(self, r): def test_bitpos(self, r): """ Bitpos was added in redis-py in version 2.10.2 - - # TODO: Added b() around keys but i think they should not have to be - there for this command to work properly. """ key = 'key:bitpos' - r.set(key, b('\xff\xf0\x00')) + r.set(key, b'\xff\xf0\x00') assert r.bitpos(key, 0) == 12 assert r.bitpos(key, 0, 2, -1) == 16 assert r.bitpos(key, 0, -2, -1) == 12 - r.set(key, b('\x00\xff\xf0')) + r.set(key, b'\x00\xff\xf0') assert r.bitpos(key, 1, 0) == 8 assert r.bitpos(key, 1, 1) == 8 r.set(key, '\x00\x00\x00') @@ -155,7 +152,7 @@ def test_bitpos_wrong_arguments(self, r): Bitpos was added in redis-py in version 2.10.2 """ key = 'key:bitpos:wrong:args' - r.set(key, b('\xff\xf0\x00')) + r.set(key, b'\xff\xf0\x00') with pytest.raises(RedisError): r.bitpos(key, 0, end=1) == 12 with pytest.raises(RedisError): @@ -163,11 +160,11 @@ def test_bitpos_wrong_arguments(self, r): def test_decr(self, r): assert r.decr('a') == -1 - assert r['a'] == b('-1') + assert r['a'] == b'-1' assert r.decr('a') == -2 - assert r['a'] == b('-2') + assert r['a'] == b'-2' assert r.decr('a', amount=5) == -7 - assert r['a'] == b('-7') + assert r['a'] == b'-7' def test_delete(self, r): assert r.delete('a') == 0 @@ -191,7 +188,7 @@ def test_dump_and_restore(self, r): dumped = r.dump('a') del r['a'] r.restore('a', 0, dumped) - assert r['a'] == b('foo') + assert r['a'] == b'foo' def test_exists(self, r): assert not r.exists('a') @@ -232,19 +229,19 @@ def test_expireat_unixtime(self, r): def test_get_and_set(self, r): # get and set can't be tested independently of each other assert r.get('a') is None - byte_string = b('value') + byte_string = b'value' integer = 5 - unicode_string = unichr(3456) + u('abcd') + unichr(3421) + unicode_string = unichr(3456) + u'abcd' + unichr(3421) assert r.set('byte_string', byte_string) assert r.set('integer', 5) assert r.set('unicode_string', unicode_string) assert r.get('byte_string') == byte_string - assert r.get('integer') == b(str(integer)) + assert r.get('integer') == bstr(integer) assert r.get('unicode_string').decode('utf-8') == unicode_string def test_getitem_and_setitem(self, r): r['a'] = 'bar' - assert r['a'] == b('bar') + assert r['a'] == b'bar' def test_getitem_raises_keyerror_for_missing_key(self, r): with pytest.raises(KeyError): @@ -268,31 +265,31 @@ def test_get_set_bit(self, r): def test_getrange(self, r): r['a'] = 'foo' - assert r.getrange('a', 0, 0) == b('f') - assert r.getrange('a', 0, 2) == b('foo') - assert r.getrange('a', 3, 4) == b('') + assert r.getrange('a', 0, 0) == b'f' + assert r.getrange('a', 0, 2) == b'foo' + assert r.getrange('a', 3, 4) == b'' def test_getset(self, r): assert r.getset('a', 'foo') is None - assert r.getset('a', 'bar') == b('foo') - assert r.get('a') == b('bar') + assert r.getset('a', 'bar') == b'foo' + assert r.get('a') == b'bar' def test_incr(self, r): assert r.incr('a') == 1 - assert r['a'] == b('1') + assert r['a'] == b'1' assert r.incr('a') == 2 - assert r['a'] == b('2') + assert r['a'] == b'2' assert r.incr('a', amount=5) == 7 - assert r['a'] == b('7') + assert r['a'] == b'7' def test_incrby(self, r): assert r.incrby('a') == 1 assert r.incrby('a', 4) == 5 - assert r['a'] == b('5') + assert r['a'] == b'5' def test_incrbyfloat(self, r): assert r.incrbyfloat('a') == 1.0 - assert r['a'] == b('1') + assert r['a'] == b'1' assert r.incrbyfloat('a', 1.1) == 2.1 assert float(r['a']) == float(2.1) @@ -303,41 +300,41 @@ def test_keys(self, r): keys = keys_with_underscores.union(set(['testc'])) for key in keys: r[key] = 1 - assert set(r.keys(pattern='test_*')) == {b(k) for k in keys_with_underscores} - assert set(r.keys(pattern='test*')) == {b(k) for k in keys} + assert set(r.keys(pattern='test_*')) == {b"{0}".format(k) for k in keys_with_underscores} + assert set(r.keys(pattern='test*')) == {b"{0}".format(k) for k in keys} def test_mget(self, r): assert r.mget(['a', 'b']) == [None, None] r['a'] = '1' r['b'] = '2' r['c'] = '3' - assert r.mget('a', 'other', 'b', 'c') == [b('1'), None, b('2'), b('3')] + assert r.mget('a', 'other', 'b', 'c') == [b'1', None, b'2', b'3'] def test_mset(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.mset(d) for k, v in iteritems(d): assert r[k] == v def test_mset_kwargs(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.mset(**d) for k, v in iteritems(d): assert r[k] == v def test_msetnx(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.msetnx(d) - d2 = {'a': b('x'), 'd': b('4')} + d2 = {'a': b'x', 'd': b'4'} assert not r.msetnx(d2) for k, v in iteritems(d): assert r[k] == v assert r.get('d') is None def test_msetnx_kwargs(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.msetnx(**d) - d2 = {'a': b('x'), 'd': b('4')} + d2 = {'a': b'x', 'd': b'4'} assert not r.msetnx(**d2) for k, v in iteritems(d): assert r[k] == v @@ -372,26 +369,26 @@ def test_pexpireat_unixtime(self, r): def test_psetex(self, r): assert r.psetex('a', 1000, 'value') - assert r['a'] == b('value') + assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 def test_psetex_timedelta(self, r): expire_at = datetime.timedelta(milliseconds=1000) assert r.psetex('a', expire_at, 'value') - assert r['a'] == b('value') + assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 def test_randomkey(self, r): assert r.randomkey() is None for key in ('a', 'b', 'c'): r[key] = 1 - assert r.randomkey() in (b('a'), b('b'), b('c')) + assert r.randomkey() in (b'a', b'b', b'c') def test_rename(self, r): r['a'] = '1' assert r.rename('a', 'b') assert r.get('a') is None - assert r['b'] == b('1') + assert r['b'] == b'1' with pytest.raises(ResponseError) as ex: r.rename("foo", "foo") @@ -406,27 +403,27 @@ def test_renamenx(self, r): r['a'] = '1' r['b'] = '2' assert not r.renamenx('a', 'b') - assert r['a'] == b('1') - assert r['b'] == b('2') + assert r['a'] == b'1' + assert r['b'] == b'2' assert r.renamenx('a', 'c') - assert r['c'] == b('1') + assert r['c'] == b'1' def test_set_nx(self, r): assert r.set('a', '1', nx=True) assert not r.set('a', '2', nx=True) - assert r['a'] == b('1') + assert r['a'] == b'1' def test_set_xx(self, r): assert not r.set('a', '1', xx=True) assert r.get('a') is None r['a'] = 'bar' assert r.set('a', '2', xx=True) - assert r.get('a') == b('2') + assert r.get('a') == b'2' def test_set_px(self, r): assert r.set('a', '1', px=10000) - assert r['a'] == b('1') + assert r['a'] == b'1' assert 0 < r.pttl('a') <= 10000 assert 0 < r.ttl('a') <= 10 @@ -452,21 +449,21 @@ def test_set_multipleoptions(self, r): def test_setex(self, r): assert r.setex('a', 60, '1') - assert r['a'] == b('1') + assert r['a'] == b'1' assert 0 < r.ttl('a') <= 60 def test_setnx(self, r): assert r.setnx('a', '1') - assert r['a'] == b('1') + assert r['a'] == b'1' assert not r.setnx('a', '2') - assert r['a'] == b('1') + assert r['a'] == b'1' def test_setrange(self, r): assert r.setrange('a', 5, 'foo') == 8 - assert r['a'] == b('\0\0\0\0\0foo') + assert r['a'] == b'\0\0\0\0\0foo' r['a'] = 'abcdefghijh' assert r.setrange('a', 6, '12345') == 11 - assert r['a'] == b('abcdef12345') + assert r['a'] == b'abcdef12345' def test_strlen(self, r): r['a'] = 'foo' @@ -474,74 +471,74 @@ def test_strlen(self, r): def test_substr(self, r): r['a'] = '0123456789' - assert r.substr('a', 0) == b('0123456789') - assert r.substr('a', 2) == b('23456789') - assert r.substr('a', 3, 5) == b('345') - assert r.substr('a', 3, -2) == b('345678') + assert r.substr('a', 0) == b'0123456789' + assert r.substr('a', 2) == b'23456789' + assert r.substr('a', 3, 5) == b'345' + assert r.substr('a', 3, -2) == b'345678' def test_type(self, r): - assert r.type('a') == b('none') + assert r.type('a') == b'none' r['a'] = '1' - assert r.type('a') == b('string') + assert r.type('a') == b'string' del r['a'] r.lpush('a', '1') - assert r.type('a') == b('list') + assert r.type('a') == b'list' del r['a'] r.sadd('a', '1') - assert r.type('a') == b('set') + assert r.type('a') == b'set' del r['a'] r.zadd('a', **{'1': 1}) - assert r.type('a') == b('zset') + assert r.type('a') == b'zset' # LIST COMMANDS def test_blpop(self, r): r.rpush('a{foo}', '1', '2') r.rpush('b{foo}', '3', '4') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('3')) - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('4')) - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('1')) - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('2')) + assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'3') + assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'4') + assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'1') + assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'2') assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) is None r.rpush('c{foo}', '1') - assert r.blpop('c{foo}', timeout=1) == (b('c{foo}'), b('1')) + assert r.blpop('c{foo}', timeout=1) == (b'c{foo}', b'1') def test_brpop(self, r): r.rpush('a{foo}', '1', '2') r.rpush('b{foo}', '3', '4') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('4')) - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('3')) - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('2')) - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('1')) + assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'4') + assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'3') + assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'2') + assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'1') assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) is None r.rpush('c{foo}', '1') - assert r.brpop('c{foo}', timeout=1) == (b('c{foo}'), b('1')) + assert r.brpop('c{foo}', timeout=1) == (b'c{foo}', b'1') def test_brpoplpush(self, r): r.rpush('a{foo}', '1', '2') r.rpush('b{foo}', '3', '4') - assert r.brpoplpush('a{foo}', 'b{foo}') == b('2') - assert r.brpoplpush('a{foo}', 'b{foo}') == b('1') + assert r.brpoplpush('a{foo}', 'b{foo}') == b'2' + assert r.brpoplpush('a{foo}', 'b{foo}') == b'1' assert r.brpoplpush('a{foo}', 'b{foo}', timeout=1) is None assert r.lrange('a{foo}', 0, -1) == [] - assert r.lrange('b{foo}', 0, -1) == [b('1'), b('2'), b('3'), b('4')] + assert r.lrange('b{foo}', 0, -1) == [b'1', b'2', b'3', b'4'] def test_brpoplpush_empty_string(self, r): r.rpush('a', '') - assert r.brpoplpush('a', 'b') == b('') + assert r.brpoplpush('a', 'b') == b'' def test_lindex(self, r): r.rpush('a', '1', '2', '3') - assert r.lindex('a', '0') == b('1') - assert r.lindex('a', '1') == b('2') - assert r.lindex('a', '2') == b('3') + assert r.lindex('a', '0') == b'1' + assert r.lindex('a', '1') == b'2' + assert r.lindex('a', '2') == b'3' def test_linsert(self, r): r.rpush('a', '1', '2', '3') assert r.linsert('a', 'after', '2', '2.5') == 4 - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('2.5'), b('3')] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'2.5', b'3'] assert r.linsert('a', 'before', '2', '1.5') == 5 assert r.lrange('a', 0, -1) == \ - [b('1'), b('1.5'), b('2'), b('2.5'), b('3')] + [b'1', b'1.5', b'2', b'2.5', b'3'] def test_llen(self, r): r.rpush('a', '1', '2', '3') @@ -549,74 +546,74 @@ def test_llen(self, r): def test_lpop(self, r): r.rpush('a', '1', '2', '3') - assert r.lpop('a') == b('1') - assert r.lpop('a') == b('2') - assert r.lpop('a') == b('3') + assert r.lpop('a') == b'1' + assert r.lpop('a') == b'2' + assert r.lpop('a') == b'3' assert r.lpop('a') is None def test_lpush(self, r): assert r.lpush('a', '1') == 1 assert r.lpush('a', '2') == 2 assert r.lpush('a', '3', '4') == 4 - assert r.lrange('a', 0, -1) == [b('4'), b('3'), b('2'), b('1')] + assert r.lrange('a', 0, -1) == [b'4', b'3', b'2', b'1'] def test_lpushx(self, r): assert r.lpushx('a', '1') == 0 assert r.lrange('a', 0, -1) == [] r.rpush('a', '1', '2', '3') assert r.lpushx('a', '4') == 4 - assert r.lrange('a', 0, -1) == [b('4'), b('1'), b('2'), b('3')] + assert r.lrange('a', 0, -1) == [4'4', b'1', b'2', b'3'] def test_lrange(self, r): r.rpush('a', '1', '2', '3', '4', '5') - assert r.lrange('a', 0, 2) == [b('1'), b('2'), b('3')] - assert r.lrange('a', 2, 10) == [b('3'), b('4'), b('5')] - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4'), b('5')] + assert r.lrange('a', 0, 2) == [b'1', b'2', b'3'] + assert r.lrange('a', 2, 10) == [b'3', b'4', b'5'] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4', b'5'] def test_lrem(self, r): r.rpush('a', '1', '1', '1', '1') assert r.lrem('a', '1', 1) == 1 - assert r.lrange('a', 0, -1) == [b('1'), b('1'), b('1')] + assert r.lrange('a', 0, -1) == [b'1', b'1', b'1'] assert r.lrem('a', 0, '1') == 3 assert r.lrange('a', 0, -1) == [] def test_lset(self, r): r.rpush('a', '1', '2', '3') - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3')] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'3'] assert r.lset('a', 1, '4') - assert r.lrange('a', 0, 2) == [b('1'), b('4'), b('3')] + assert r.lrange('a', 0, 2) == [b'1', b'4', b'3'] def test_ltrim(self, r): r.rpush('a', '1', '2', '3') assert r.ltrim('a', 0, 1) - assert r.lrange('a', 0, -1) == [b('1'), b('2')] + assert r.lrange('a', 0, -1) == [b'1', b'2'] def test_rpop(self, r): r.rpush('a', '1', '2', '3') - assert r.rpop('a') == b('3') - assert r.rpop('a') == b('2') - assert r.rpop('a') == b('1') + assert r.rpop('a') == b'3' + assert r.rpop('a') == b'2' + assert r.rpop('a') == b'1' assert r.rpop('a') is None def test_rpoplpush(self, r): r.rpush('a', 'a1', 'a2', 'a3') r.rpush('b', 'b1', 'b2', 'b3') - assert r.rpoplpush('a', 'b') == b('a3') - assert r.lrange('a', 0, -1) == [b('a1'), b('a2')] - assert r.lrange('b', 0, -1) == [b('a3'), b('b1'), b('b2'), b('b3')] + assert r.rpoplpush('a', 'b') == b'a3' + assert r.lrange('a', 0, -1) == [b'a1', b'a2'] + assert r.lrange('b', 0, -1) == [b'a3', b'b1', b'b2', b'b3'] def test_rpush(self, r): assert r.rpush('a', '1') == 1 assert r.rpush('a', '2') == 2 assert r.rpush('a', '3', '4') == 4 - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] def test_rpushx(self, r): assert r.rpushx('a', 'b') == 0 assert r.lrange('a', 0, -1) == [] r.rpush('a', '1', '2', '3') assert r.rpushx('a', '4') == 4 - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] # SCAN COMMANDS def test_scan(self, r): @@ -629,81 +626,81 @@ def test_scan(self, r): assert cursor == 0 keys += partial_keys - assert set(keys) == set([b('a'), b('b'), b('c')]) + assert set(keys) == set([b'a', b'b', b'c']) keys = [] for result in r.scan(match='a').values(): cursor, partial_keys = result assert cursor == 0 keys += partial_keys - assert set(keys) == set([b('a')]) + assert set(keys) == set([b'a']) def test_scan_iter(self, r): alphabet = 'abcdefghijklmnopqrstuvwABCDEFGHIJKLMNOPQRSTUVW' for i, c in enumerate(alphabet): r.set(c, i) keys = list(r.scan_iter()) - expected_result = [b(c) for c in alphabet] + expected_result = [b"{0}".format(c) for c in alphabet] assert set(keys) == set(expected_result) keys = list(r.scan_iter(match='a')) - assert set(keys) == set([b('a')]) + assert set(keys) == set([b'a']) r.set('Xa', 1) r.set('Xb', 2) r.set('Xc', 3) keys = list(r.scan_iter('X*', count=1000)) assert len(keys) == 3 - assert set(keys) == set([b('Xa'), b('Xb'), b('Xc')]) + assert set(keys) == set([b'Xa', b'Xb', b'Xc']) def test_sscan(self, r): r.sadd('a', 1, 2, 3) cursor, members = r.sscan('a') assert cursor == 0 - assert set(members) == set([b('1'), b('2'), b('3')]) - _, members = r.sscan('a', match=b('1')) - assert set(members) == set([b('1')]) + assert set(members) == set([b'1', b'2', b'3']) + _, members = r.sscan('a', match=b'1') + assert set(members) == set([b'1']) def test_sscan_iter(self, r): r.sadd('a', 1, 2, 3) members = list(r.sscan_iter('a')) - assert set(members) == set([b('1'), b('2'), b('3')]) - members = list(r.sscan_iter('a', match=b('1'))) - assert set(members) == set([b('1')]) + assert set(members) == set([b'1', b'2', b'3']) + members = list(r.sscan_iter('a', match=b'1')) + assert set(members) == set([b'1']) def test_hscan(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) cursor, dic = r.hscan('a') assert cursor == 0 - assert dic == {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} + assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'} _, dic = r.hscan('a', match='a') - assert dic == {b('a'): b('1')} + assert dic == {b'a': b'1'} def test_hscan_iter(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) dic = dict(r.hscan_iter('a')) - assert dic == {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} + assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'} dic = dict(r.hscan_iter('a', match='a')) - assert dic == {b('a'): b('1')} + assert dic == {b'a': b'1'} def test_zscan(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') cursor, pairs = r.zscan('a') assert cursor == 0 - assert set(pairs) == set([(b('a'), 1), (b('b'), 2), (b('c'), 3)]) + assert set(pairs) == set([(b'a', 1), (b'b', 2), (b'c', 3)]) _, pairs = r.zscan('a', match='a') - assert set(pairs) == set([(b('a'), 1)]) + assert set(pairs) == set([(b'a', 1)]) def test_zscan_iter(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') pairs = list(r.zscan_iter('a')) - assert set(pairs) == set([(b('a'), 1), (b('b'), 2), (b('c'), 3)]) + assert set(pairs) == set([(b'a', 1), (b'b', 2), (b'c', 3)]) pairs = list(r.zscan_iter('a', match='a')) - assert set(pairs) == set([(b('a'), 1)]) + assert set(pairs) == set([(b'a', 1)]) # SET COMMANDS def test_sadd(self, r): - members = set([b('1'), b('2'), b('3')]) + members = set([b'1', b'2', b'3']) r.sadd('a', *members) assert r.smembers('a') == members @@ -713,17 +710,17 @@ def test_scard(self, r): def test_sdiff(self, r): r.sadd('a{foo}', '1', '2', '3') - assert r.sdiff('a{foo}', 'b{foo}') == set([b('1'), b('2'), b('3')]) + assert r.sdiff('a{foo}', 'b{foo}') == set([b'1', b'2', b'3']) r.sadd('b{foo}', '2', '3') - assert r.sdiff('a{foo}', 'b{foo}') == set([b('1')]) + assert r.sdiff('a{foo}', 'b{foo}') == set([b'1']) def test_sdiffstore(self, r): r.sadd('a{foo}', '1', '2', '3') assert r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 3 - assert r.smembers('c{foo}') == set([b('1'), b('2'), b('3')]) + assert r.smembers('c{foo}') == set([b'1', b'2', b'3']) r.sadd('b{foo}', '2', '3') assert r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 1 - assert r.smembers('c{foo}') == set([b('1')]) + assert r.smembers('c{foo}') == set([b'1']) # Diff:s that return empty set should not fail r.sdiffstore('d{foo}', 'e{foo}') == 0 @@ -732,7 +729,7 @@ def test_sinter(self, r): r.sadd('a{foo}', '1', '2', '3') assert r.sinter('a{foo}', 'b{foo}') == set() r.sadd('b{foo}', '2', '3') - assert r.sinter('a{foo}', 'b{foo}') == set([b('2'), b('3')]) + assert r.sinter('a{foo}', 'b{foo}') == set([b'2', b'3']) def test_sinterstore(self, r): r.sadd('a{foo}', '1', '2', '3') @@ -740,7 +737,7 @@ def test_sinterstore(self, r): assert r.smembers('c{foo}') == set() r.sadd('b{foo}', '2', '3') assert r.sinterstore('c{foo}', 'a{foo}', 'b{foo}') == 2 - assert r.smembers('c{foo}') == set([b('2'), b('3')]) + assert r.smembers('c{foo}') == set([b'2', b'3']) def test_sismember(self, r): r.sadd('a', '1', '2', '3') @@ -751,29 +748,29 @@ def test_sismember(self, r): def test_smembers(self, r): r.sadd('a', '1', '2', '3') - assert r.smembers('a') == set([b('1'), b('2'), b('3')]) + assert r.smembers('a') == set([b'1', b'2', b'3']) def test_smove(self, r): r.sadd('a{foo}', 'a1', 'a2') r.sadd('b{foo}', 'b1', 'b2') assert r.smove('a{foo}', 'b{foo}', 'a1') - assert r.smembers('a{foo}') == set([b('a2')]) - assert r.smembers('b{foo}') == set([b('b1'), b('b2'), b('a1')]) + assert r.smembers('a{foo}') == set([b'a2']) + assert r.smembers('b{foo}') == set([b'b1', b'b2', b'a1']) def test_spop(self, r): - s = [b('1'), b('2'), b('3')] + s = [b'1', b'2', b'3'] r.sadd('a', *s) value = r.spop('a') assert value in s assert r.smembers('a') == set(s) - set([value]) def test_srandmember(self, r): - s = [b('1'), b('2'), b('3')] + s = [b'1', b'2', b'3'] r.sadd('a', *s) assert r.srandmember('a') in s def test_srandmember_multi_value(self, r): - s = [b('1'), b('2'), b('3')] + s = [b'1', b'2', b'3'] r.sadd('a', *s) randoms = r.srandmember('a', number=2) assert len(randoms) == 2 @@ -783,23 +780,23 @@ def test_srem(self, r): r.sadd('a', '1', '2', '3', '4') assert r.srem('a', '5') == 0 assert r.srem('a', '2', '4') == 2 - assert r.smembers('a') == set([b('1'), b('3')]) + assert r.smembers('a') == set([b'1', b'3']) def test_sunion(self, r): r.sadd('a{foo}', '1', '2') r.sadd('b{foo}', '2', '3') - assert r.sunion('a{foo}', 'b{foo}') == set([b('1'), b('2'), b('3')]) + assert r.sunion('a{foo}', 'b{foo}') == set([b'1', b'2', b'3']) def test_sunionstore(self, r): r.sadd('a{foo}', '1', '2') r.sadd('b{foo}', '2', '3') assert r.sunionstore('c{foo}', 'a{foo}', 'b{foo}') == 3 - assert r.smembers('c{foo}') == set([b('1'), b('2'), b('3')]) + assert r.smembers('c{foo}') == set([b'1', b'2', b'3']) # SORTED SET COMMANDS def test_zadd(self, r): r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, -1) == [b('a1'), b('a2'), b('a3')] + assert r.zrange('a', 0, -1) == [b'a1', b'a2'g, b'a3'] def test_zcard(self, r): r.zadd('a', a1=1, a2=2, a3=3) @@ -837,7 +834,7 @@ def test_zinterstore_sum(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a3'), 8), (b('a1'), 9)] + [(b'a3', 8), (b'a1'), 9] def test_zinterstore_max(self, r): r.zadd('a{foo}', a1=1, a2=1, a3=1) @@ -845,7 +842,7 @@ def test_zinterstore_max(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a3'), 5), (b('a1'), 6)] + [(b'a3', 5), (b'a1', 6)] def test_zinterstore_min(self, r): r.zadd('a{foo}', a1=1, a2=2, a3=3) @@ -853,7 +850,7 @@ def test_zinterstore_min(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a1'), 1), (b('a3'), 3)] + [(b'a1', 1), (b'a3', 3)] def test_zinterstore_with_weight(self, r): r.zadd('a{foo}', a1=1, a2=1, a3=1) @@ -861,48 +858,48 @@ def test_zinterstore_with_weight(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zinterstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a3'), 20), (b('a1'), 23)] + [(b'a3', 20), (b'a1', 23)] def test_zrange(self, r): r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, 1) == [b('a1'), b('a2')] - assert r.zrange('a', 1, 2) == [b('a2'), b('a3')] + assert r.zrange('a', 0, 1) == [b'a1', b'a2'] + assert r.zrange('a', 1, 2) == [b'a2', b'a3'] # withscores assert r.zrange('a', 0, 1, withscores=True) == \ - [(b('a1'), 1.0), (b('a2'), 2.0)] + [(b'a1', 1.0), (b'a2', 2.0)] assert r.zrange('a', 1, 2, withscores=True) == \ - [(b('a2'), 2.0), (b('a3'), 3.0)] + [(b'a2', 2.0), (b'a3', 3.0)] # custom score function assert r.zrange('a', 0, 1, withscores=True, score_cast_func=int) == \ - [(b('a1'), 1), (b('a2'), 2)] + [(b'a1', 1), (b'a2', 2)] def test_zrangebylex(self, r): r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) - assert r.zrangebylex('a', '-', '[c') == [b('a'), b('b'), b('c')] - assert r.zrangebylex('a', '-', '(c') == [b('a'), b('b')] + assert r.zrangebylex('a', '-', '[c') == [b'a', b'b', b'c'] + assert r.zrangebylex('a', '-', '(c') == [b'a', b'b'] assert r.zrangebylex('a', '[aaa', '(g') == \ - [b('b'), b('c'), b('d'), b('e'), b('f')] - assert r.zrangebylex('a', '[f', '+') == [b('f'), b('g')] - assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b('d'), b('e')] + [b'b', b'c', b'd', b'e', b'f'] + assert r.zrangebylex('a', '[f', '+') == [b'f', b'g'] + assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b'd', b'e'] def test_zrangebyscore(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zrangebyscore('a', 2, 4) == [b('a2'), b('a3'), b('a4')] + assert r.zrangebyscore('a', 2, 4) == [b'a2', b'a3', b'a4'] # slicing with start/num assert r.zrangebyscore('a', 2, 4, start=1, num=2) == \ - [b('a3'), b('a4')] + [b'a3', b'a4'] # withscores assert r.zrangebyscore('a', 2, 4, withscores=True) == \ - [(b('a2'), 2.0), (b('a3'), 3.0), (b('a4'), 4.0)] + [(b'a2', 2.0), (b'a3', 3.0), (b'a4', 4.0)] # custom score function assert r.zrangebyscore('a', 2, 4, withscores=True, score_cast_func=int) == \ - [(b('a2'), 2), (b('a3'), 3), (b('a4'), 4)] + [(b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zrank(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) @@ -913,68 +910,68 @@ def test_zrank(self, r): def test_zrem(self, r): r.zadd('a', a1=1, a2=2, a3=3) assert r.zrem('a', 'a2') == 1 - assert r.zrange('a', 0, -1) == [b('a1'), b('a3')] + assert r.zrange('a', 0, -1) == [b'a1', b'a3'] assert r.zrem('a', 'b') == 0 - assert r.zrange('a', 0, -1) == [b('a1'), b('a3')] + assert r.zrange('a', 0, -1) == [b'a1', b'a3'] def test_zrem_multiple_keys(self, r): r.zadd('a', a1=1, a2=2, a3=3) assert r.zrem('a', 'a1', 'a2') == 2 - assert r.zrange('a', 0, 5) == [b('a3')] + assert r.zrange('a', 0, 5) == [b'a3'] def test_zremrangebylex(self, r): r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) assert r.zremrangebylex('a', '-', '[c') == 3 - assert r.zrange('a', 0, -1) == [b('d'), b('e'), b('f'), b('g')] + assert r.zrange('a', 0, -1) == [b'd', b'e', b'f', b'g'] assert r.zremrangebylex('a', '[f', '+') == 2 - assert r.zrange('a', 0, -1) == [b('d'), b('e')] + assert r.zrange('a', 0, -1) == [b'd', b'e'] assert r.zremrangebylex('a', '[h', '+') == 0 - assert r.zrange('a', 0, -1) == [b('d'), b('e')] + assert r.zrange('a', 0, -1) == [b'd', b'e'] def test_zremrangebyrank(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) assert r.zremrangebyrank('a', 1, 3) == 3 - assert r.zrange('a', 0, 5) == [b('a1'), b('a5')] + assert r.zrange('a', 0, 5) == [b'a1', b'a5'] def test_zremrangebyscore(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) assert r.zremrangebyscore('a', 2, 4) == 3 - assert r.zrange('a', 0, -1) == [b('a1'), b('a5')] + assert r.zrange('a', 0, -1) == [b'a1', b'a5'] assert r.zremrangebyscore('a', 2, 4) == 0 - assert r.zrange('a', 0, -1) == [b('a1'), b('a5')] + assert r.zrange('a', 0, -1) == [b'a1', b'a5'] def test_zrevrange(self, r): r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrevrange('a', 0, 1) == [b('a3'), b('a2')] - assert r.zrevrange('a', 1, 2) == [b('a2'), b('a1')] + assert r.zrevrange('a', 0, 1) == [b'a3', b'a2'] + assert r.zrevrange('a', 1, 2) == [b'a2', b'a1'] # withscores assert r.zrevrange('a', 0, 1, withscores=True) == \ - [(b('a3'), 3.0), (b('a2'), 2.0)] + [(b'a3', 3.0), (b'a2', 2.0)] assert r.zrevrange('a', 1, 2, withscores=True) == \ - [(b('a2'), 2.0), (b('a1'), 1.0)] + [(b'a2', 2.0), (b'a1', 1.0)] # custom score function assert r.zrevrange('a', 0, 1, withscores=True, score_cast_func=int) == \ - [(b('a3'), 3.0), (b('a2'), 2.0)] + [(b'a3', 3.0), (b'a2', 2.0)] def test_zrevrangebyscore(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zrevrangebyscore('a', 4, 2) == [b('a4'), b('a3'), b('a2')] + assert r.zrevrangebyscore('a', 4, 2) == [b'a4', b'a3', b'a2'] # slicing with start/num assert r.zrevrangebyscore('a', 4, 2, start=1, num=2) == \ - [b('a3'), b('a2')] + [b'a3', b'a2'] # withscores assert r.zrevrangebyscore('a', 4, 2, withscores=True) == \ - [(b('a4'), 4.0), (b('a3'), 3.0), (b('a2'), 2.0)] + [(b'a4', 4.0), (b'a3', 3.0), (b'a2', 2.0)] # custom score function assert r.zrevrangebyscore('a', 4, 2, withscores=True, score_cast_func=int) == \ - [(b('a4'), 4), (b('a3'), 3), (b('a2'), 2)] + [(b'a4', 4), (b'a3', 3), (b'a2', 2)] def test_zrevrank(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) @@ -1002,7 +999,7 @@ def test_zunionstore_sum(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a2'), 3), (b('a4'), 4), (b('a3'), 8), (b('a1'), 9)] + [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] def test_zunionstore_max(self, r): r.zadd('a{foo}', a1=1, a2=1, a3=1) @@ -1010,7 +1007,7 @@ def test_zunionstore_max(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a2'), 2), (b('a4'), 4), (b('a3'), 5), (b('a1'), 6)] + [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] def test_zunionstore_min(self, r): r.zadd('a{foo}', a1=1, a2=2, a3=3) @@ -1018,7 +1015,7 @@ def test_zunionstore_min(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a1'), 1), (b('a2'), 2), (b('a3'), 3), (b('a4'), 4)] + [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zunionstore_with_weight(self, r): r.zadd('a{foo}', a1=1, a2=1, a3=1) @@ -1026,11 +1023,11 @@ def test_zunionstore_with_weight(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zunionstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a2'), 5), (b('a4'), 12), (b('a3'), 20), (b('a1'), 23)] + [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] # # HYPERLOGLOG TESTS def test_pfadd(self, r): - members = set([b('1'), b('2'), b('3')]) + members = set([b'1', b'2', b'3']) assert r.pfadd('a', *members) == 1 assert r.pfadd('a', *members) == 0 assert r.pfcount('a') == len(members) @@ -1038,18 +1035,18 @@ def test_pfadd(self, r): @pytest.mark.xfail(reason="New pfcount in 2.10.5 currently breaks in cluster") @skip_if_server_version_lt('2.8.9') def test_pfcount(self, r): - members = set([b('1'), b('2'), b('3')]) + members = set([b'1', b'2', b'3']) r.pfadd('a', *members) assert r.pfcount('a') == len(members) - members_b = set([b('2'), b('3'), b('4')]) + members_b = set([b'2', b'3', b'4']) r.pfadd('b', *members_b) assert r.pfcount('b') == len(members_b) assert r.pfcount('a', 'b') == len(members_b.union(members)) def test_pfmerge(self, r): - mema = set([b('1'), b('2'), b('3')]) - memb = set([b('2'), b('3'), b('4')]) - memc = set([b('5'), b('6'), b('7')]) + mema = set([b'1', b'2', b'3']) + memb = set([b'2', b'3', b'4']) + memc = set([b'5', b'6', b'7']) r.pfadd('a', *mema) r.pfadd('b', *memb) r.pfadd('c', *memc) @@ -1061,17 +1058,17 @@ def test_pfmerge(self, r): # HASH COMMANDS def test_hget_and_hset(self, r): r.hmset('a', {'1': 1, '2': 2, '3': 3}) - assert r.hget('a', '1') == b('1') - assert r.hget('a', '2') == b('2') - assert r.hget('a', '3') == b('3') + assert r.hget('a', '1') == b'1' + assert r.hget('a', '2') == b'2' + assert r.hget('a', '3') == b'3' # field was updated, redis returns 0 assert r.hset('a', '2', 5) == 0 - assert r.hget('a', '2') == b('5') + assert r.hget('a', '2') == b'5' # field is new, redis returns 1 assert r.hset('a', '4', 4) == 1 - assert r.hget('a', '4') == b('4') + assert r.hget('a', '4') == b'4' # key inside of hash that doesn't exist returns null value assert r.hget('a', 'b') is None @@ -1089,7 +1086,7 @@ def test_hexists(self, r): assert not r.hexists('a', '4') def test_hgetall(self, r): - h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} + h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hmset('a', h) assert r.hgetall('a') == h @@ -1104,7 +1101,7 @@ def test_hincrbyfloat(self, r): assert r.hincrbyfloat('a', '1', 1.2) == 3.2 def test_hkeys(self, r): - h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} + h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hmset('a', h) local_keys = list(iterkeys(h)) remote_keys = r.hkeys('a') @@ -1116,22 +1113,22 @@ def test_hlen(self, r): def test_hmget(self, r): assert r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) - assert r.hmget('a', 'a', 'b', 'c') == [b('1'), b('2'), b('3')] + assert r.hmget('a', 'a', 'b', 'c') == [b'1', b'2', b'3'] def test_hmset(self, r): - h = {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} + h = {b'a': b'1', b'b': b'2', b'c': b'3'} assert r.hmset('a', h) assert r.hgetall('a') == h def test_hsetnx(self, r): # Initially set the hash field assert r.hsetnx('a', '1', 1) - assert r.hget('a', '1') == b('1') + assert r.hget('a', '1') == b'1' assert not r.hsetnx('a', '1', 2) - assert r.hget('a', '1') == b('1') + assert r.hget('a', '1') == b'1' def test_hvals(self, r): - h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} + h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hmset('a', h) local_vals = list(itervalues(h)) remote_vals = r.hvals('a') @@ -1140,25 +1137,25 @@ def test_hvals(self, r): # SORT def test_sort_basic(self, r): r.rpush('a', '3', '2', '1', '4') - assert r.sort('a') == [b('1'), b('2'), b('3'), b('4')] + assert r.sort('a') == [b'1', b'2', b'3', b'4'] def test_sort_limited(self, r): r.rpush('a', '3', '2', '1', '4') - assert r.sort('a', start=1, num=2) == [b('2'), b('3')] + assert r.sort('a', start=1, num=2) == [b'2', b'3'] def test_sort_by(self, r): r['score:1'] = 8 r['score:2'] = 3 r['score:3'] = 5 r.rpush('a', '3', '2', '1') - assert r.sort('a', by='score:*') == [b('2'), b('3'), b('1')] + assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] def test_sort_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') - assert r.sort('a', get='user:*') == [b('u1'), b('u2'), b('u3')] + assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] def test_sort_get_multi(self, r): r['user:1'] = 'u1' @@ -1166,7 +1163,7 @@ def test_sort_get_multi(self, r): r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', '#')) == \ - [b('u1'), b('1'), b('u2'), b('2'), b('u3'), b('3')] + [b'u1', b'1', b'u2', b'2', b'u3', b'3'] def test_sort_get_groups_two(self, r): r['user:1'] = 'u1' @@ -1174,7 +1171,7 @@ def test_sort_get_groups_two(self, r): r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', '#'), groups=True) == \ - [(b('u1'), b('1')), (b('u2'), b('2')), (b('u3'), b('3'))] + [(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')] def test_sort_groups_string_get(self, r): r['user:1'] = 'u1' @@ -1209,24 +1206,24 @@ def test_sort_groups_three_gets(self, r): r['door:3'] = 'd3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == [ - (b('u1'), b('d1'), b('1')), - (b('u2'), b('d2'), b('2')), - (b('u3'), b('d3'), b('3')) + (b'u1', b'd1', b'1'), + (b'u2', b'd2', b'2'), + (b'u3', b'd3', b'3') ] def test_sort_desc(self, r): r.rpush('a', '2', '3', '1') - assert r.sort('a', desc=True) == [b('3'), b('2'), b('1')] + assert r.sort('a', desc=True) == [b'3', b'2', b'1'] def test_sort_alpha(self, r): r.rpush('a', 'e', 'c', 'b', 'd', 'a') assert r.sort('a', alpha=True) == \ - [b('a'), b('b'), b('c'), b('d'), b('e')] + [b'a', b'b', b'c', b'd', b'e'] def test_sort_store(self, r): r.rpush('a', '2', '3', '1') assert r.sort('a', store='sorted_values') == 3 - assert r.lrange('sorted_values', 0, -1) == [b('1'), b('2'), b('3')] + assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3'] def test_sort_all_options(self, r): r['user:1:username'] = 'zeus' @@ -1253,7 +1250,7 @@ def test_sort_all_options(self, r): store='sorted') assert num == 4 assert r.lrange('sorted', 0, 10) == \ - [b('vodka'), b('milk'), b('gin'), b('apple juice')] + [b'vodka', b'milk', b'gin', b'apple juice'] class TestStrictCommands(object): @@ -1261,16 +1258,16 @@ class TestStrictCommands(object): def test_strict_zadd(self, sr): sr.zadd('a', 1.0, 'a1', 2.0, 'a2', a3=3.0) assert sr.zrange('a', 0, -1, withscores=True) == \ - [(b('a1'), 1.0), (b('a2'), 2.0), (b('a3'), 3.0)] + [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)] def test_strict_lrem(self, sr): sr.rpush('a', 'a1', 'a2', 'a3', 'a1') sr.lrem('a', 0, 'a1') - assert sr.lrange('a', 0, -1) == [b('a2'), b('a3')] + assert sr.lrange('a', 0, -1) == [b'a2', b'a3'] def test_strict_setex(self, sr): assert sr.setex('a', 60, '1') - assert sr['a'] == b('1') + assert sr['a'] == b'1' assert 0 < sr.ttl('a') <= 60 def test_strict_ttl(self, sr): @@ -1291,25 +1288,25 @@ def test_strict_pttl(self, sr): def test_eval(self, sr): res = sr.eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", 2, "A{foo}", "B{foo}", "first", "second") - assert res[0] == b('A{foo}') - assert res[1] == b('B{foo}') - assert res[2] == b('first') - assert res[3] == b('second') + assert res[0] == b'A{foo}' + assert res[1] == b'B{foo}' + assert res[2] == b'first' + assert res[3] == b'second' class TestBinarySave(object): def test_binary_get_set(self, r): assert r.set(' foo bar ', '123') - assert r.get(' foo bar ') == b('123') + assert r.get(' foo bar ') == b'123' assert r.set(' foo\r\nbar\r\n ', '456') - assert r.get(' foo\r\nbar\r\n ') == b('456') + assert r.get(' foo\r\nbar\r\n ') == b'456' assert r.set(' \r\n\t\x07\x13 ', '789') - assert r.get(' \r\n\t\x07\x13 ') == b('789') + assert r.get(' \r\n\t\x07\x13 ') == b'789' assert sorted(r.keys('*')) == \ - [b(' \r\n\t\x07\x13 '), b(' foo\r\nbar\r\n '), b(' foo bar ')] + [b' \r\n\t\x07\x13 ', b' foo\r\nbar\r\n ', b' foo bar '] assert r.delete(' foo bar ') assert r.delete(' foo\r\nbar\r\n ') @@ -1317,9 +1314,9 @@ def test_binary_get_set(self, r): def test_binary_lists(self, r): mapping = { - b('foo bar'): [b('1'), b('2'), b('3')], - b('foo\r\nbar\r\n'): [b('4'), b('5'), b('6')], - b('foo\tbar\x07'): [b('7'), b('8'), b('9')], + b'foo bar': [b'1', b'2', b'3'], + b'foo\r\nbar\r\n': [b'4', b'5', b'6'], + b'foo\tbar\x07': [b'7', b'8', b'9'], } # fill in lists for key, value in iteritems(mapping): @@ -1371,7 +1368,7 @@ def test_large_responses(self, r): # load up 100K of data into a key data = ''.join([ascii_letters] * (100000 // len(ascii_letters))) r['a'] = data - assert r['a'] == b(data) + assert r['a'] == bdata def test_floating_point_encoding(self, r): """ From c87f792f3f697d8c864492e6a5fcd8b0ccb4c6fa Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 16 Dec 2018 15:56:19 +0100 Subject: [PATCH 071/263] Ported in a few new testcases that was not present in the test code --- rediscluster/connection.py | 1 + tests/test_cluster_connection_pool.py | 31 +++++++++++++++++- tests/test_pipeline.py | 45 +++++++++++++++++++++++++++ tests/test_pubsub.py | 21 +++++++++++++ 4 files changed, 97 insertions(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index ac6abc74..62a89c91 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # python std lib +from __future__ import unicode_literals import os import random import threading diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 5a432c3e..f403e684 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement import os import re import time @@ -429,6 +428,36 @@ def test_db_in_querystring(self): 'password': None, } + def test_extra_typed_querystring_options(self): + pool = redis.ConnectionPool.from_url( + 'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10' + '&socket_keepalive=&retry_on_timeout=Yes&max_connections=10' + ) + + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6379, + 'db': 2, + 'socket_timeout': 20.0, + 'socket_connect_timeout': 10.0, + 'retry_on_timeout': True, + 'password': None, + } + assert pool.max_connections == 10 + + def test_boolean_parsing(self): + for expected, value in ( + (None, None), + (None, ''), + (False, 0), (False, '0'), + (False, 'f'), (False, 'F'), (False, 'False'), + (False, 'n'), (False, 'N'), (False, 'No'), + (True, 1), (True, '1'), + (True, 'y'), (True, 'Y'), (True, 'Yes'), + ): + assert expected is to_bool(value) + def test_extra_querystring_options(self): pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2') assert pool.connection_class == redis.Connection diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index e8750439..b49fadeb 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -143,6 +143,34 @@ def test_exec_error_raised(self, r): assert pipe.set('z', 'zzz').execute() == [True] assert r['z'] == b'zzz' + def test_transaction_with_empty_error_command(self, r): + """ + Commands with custom EMPTY_ERROR functionality return their default + values in the pipeline no matter the raise_on_error preference + """ + for error_switch in (True, False): + with r.pipeline() as pipe: + pipe.set('a', 1).mget([]).set('c', 3) + result = pipe.execute(raise_on_error=error_switch) + + assert result[0] + assert result[1] == [] + assert result[2] + + def test_pipeline_with_empty_error_command(self, r): + """ + Commands with custom EMPTY_ERROR functionality return their default + values in the pipeline no matter the raise_on_error preference + """ + for error_switch in (True, False): + with r.pipeline(transaction=False) as pipe: + pipe.set('a', 1).mget([]).set('c', 3) + result = pipe.execute(raise_on_error=error_switch) + + assert result[0] + assert result[1] == [] + assert result[2] + def test_parse_error_raised(self, r): with r.pipeline() as pipe: # the zrem is invalid because we don't pass any keys to it @@ -257,6 +285,23 @@ def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): assert r[key] == b'1' + def test_pipeline_with_bitfield(self, r): + with r.pipeline() as pipe: + pipe.set('a', '1') + bf = pipe.bitfield('b') + pipe2 = (bf + .set('u8', 8, 255) + .get('u8', 0) + .get('u4', 8) # 1111 + .get('u4', 12) # 1111 + .get('u4', 13) # 1110 + .execute()) + pipe.get('a') + response = pipe.execute() + + assert pipe == pipe2 + assert response == [True, [0, 0, 15, 15, 14], b'1'] + def test_blocked_methods(self, r): """ Currently some method calls on a Cluster pipeline diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index e0ea2837..556f584f 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -504,3 +504,24 @@ def test_pubsub_numpat(self, r): r.pubsub(ignore_subscribe_messages=True).psubscribe('*oo', '*ar', 'b*z') assert r.pubsub_numpat() == 3 + +class TestPubSubPings(object): + + @skip_if_server_version_lt('3.0.0') + def test_send_pubsub_ping(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe('foo') + p.ping() + assert wait_for_message(p) == make_message(type='pong', channel=None, + data='', + pattern=None) + + @skip_if_server_version_lt('3.0.0') + @pytest.mark.xfail(reason="Pattern pubsub do not work currently") + def test_send_pubsub_ping_message(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe('foo') + p.ping(message='hello world') + assert wait_for_message(p) == make_message(type='pong', channel=None, + data='hello world', + pattern=None) From 11f315713a30f26a1fda32278cc114851c0acb4e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 26 Dec 2018 14:09:59 +0100 Subject: [PATCH 072/263] More unicode, bytestring fixes. Fixed so testing acctually works by fixing StrictCluterPipeline imports everywhere. Lots of old set syntax cleanup --- rediscluster/__init__.py | 4 +- rediscluster/client.py | 6 +- rediscluster/nodemanager.py | 2 +- rediscluster/pipeline.py | 122 +++++++++++++------------- tests/test_cluster_obj.py | 2 +- tests/test_commands.py | 167 ++++++++++++++++++------------------ tests/test_utils.py | 2 +- 7 files changed, 152 insertions(+), 153 deletions(-) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index bd9c5437..7bcc442a 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -5,14 +5,14 @@ # Import shortcut from .client import RedisCluster -from .pipeline import StrictClusterPipeline +from .pipeline import ClusterPipeline from .pubsub import ClusterPubSub # Monkey patch RedisCluster class into redis for easy access import redis setattr(redis, "RedisCluster", RedisCluster) setattr(redis, "ClusterPubSub", ClusterPubSub) -setattr(redis, "StrictClusterPipeline", StrictClusterPipeline) +setattr(redis, "ClusterPipeline", ClusterPipeline) # Major, Minor, Fix version __version__ = (2, 0, 0) diff --git a/rediscluster/client.py b/rediscluster/client.py index e37178eb..27ced430 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -36,7 +36,7 @@ from redis import Redis from redis.client import list_or_args, parse_info from redis.connection import Token -from redis._compat import iteritems, basestring, b, izip, nativestr, long +from redis._compat import iteritems, basestring, izip, nativestr, long from redis.exceptions import RedisError, ResponseError, TimeoutError, DataError, ConnectionError, BusyLoadingError @@ -282,7 +282,7 @@ def pipeline(self, transaction=None, shard_hint=None): if transaction: raise RedisClusterException("transaction is deprecated in cluster mode") - return StrictClusterPipeline( + return ClusterPipeline( connection_pool=self.connection_pool, startup_nodes=self.connection_pool.nodes.startup_nodes, result_callbacks=self.result_callbacks, @@ -1245,4 +1245,4 @@ def _random_id(self, size=16, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) -from rediscluster.pipeline import StrictClusterPipeline +from rediscluster.pipeline import ClusterPipeline diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index ed936c94..6644e9ea 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -9,7 +9,7 @@ # 3rd party imports from redis import Redis -from redis._compat import b, unicode, bytes, long, basestring +from redis._compat import unicode, bytes, long, basestring from redis import ConnectionError, TimeoutError, ResponseError diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index 2e16dac7..29ec8793 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -221,7 +221,7 @@ def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=T for c in attempt: try: # send each command individually like we do in the main client. - c.result = super(StrictClusterPipeline, self).execute_command(*c.args, **c.options) + c.result = super(ClusterPipeline, self).execute_command(*c.args, **c.options) except RedisError as e: c.result = e @@ -296,66 +296,66 @@ def inner(*args, **kwargs): # Blocked pipeline commands -StrictClusterPipeline.bgrewriteaof = block_pipeline_command(Redis.bgrewriteaof) -StrictClusterPipeline.bgsave = block_pipeline_command(Redis.bgsave) -StrictClusterPipeline.bitop = block_pipeline_command(Redis.bitop) -StrictClusterPipeline.brpoplpush = block_pipeline_command(Redis.brpoplpush) -StrictClusterPipeline.client_getname = block_pipeline_command(Redis.client_getname) -StrictClusterPipeline.client_kill = block_pipeline_command(Redis.client_kill) -StrictClusterPipeline.client_list = block_pipeline_command(Redis.client_list) -StrictClusterPipeline.client_setname = block_pipeline_command(Redis.client_setname) -StrictClusterPipeline.config_get = block_pipeline_command(Redis.config_get) -StrictClusterPipeline.config_resetstat = block_pipeline_command(Redis.config_resetstat) -StrictClusterPipeline.config_rewrite = block_pipeline_command(Redis.config_rewrite) -StrictClusterPipeline.config_set = block_pipeline_command(Redis.config_set) -StrictClusterPipeline.dbsize = block_pipeline_command(Redis.dbsize) -StrictClusterPipeline.echo = block_pipeline_command(Redis.echo) -StrictClusterPipeline.evalsha = block_pipeline_command(Redis.evalsha) -StrictClusterPipeline.flushall = block_pipeline_command(Redis.flushall) -StrictClusterPipeline.flushdb = block_pipeline_command(Redis.flushdb) -StrictClusterPipeline.info = block_pipeline_command(Redis.info) -StrictClusterPipeline.keys = block_pipeline_command(Redis.keys) -StrictClusterPipeline.lastsave = block_pipeline_command(Redis.lastsave) -StrictClusterPipeline.mget = block_pipeline_command(Redis.mget) -StrictClusterPipeline.move = block_pipeline_command(Redis.move) -StrictClusterPipeline.mset = block_pipeline_command(Redis.mset) -StrictClusterPipeline.msetnx = block_pipeline_command(Redis.msetnx) -StrictClusterPipeline.pfmerge = block_pipeline_command(Redis.pfmerge) -StrictClusterPipeline.pfcount = block_pipeline_command(Redis.pfcount) -StrictClusterPipeline.ping = block_pipeline_command(Redis.ping) -StrictClusterPipeline.publish = block_pipeline_command(Redis.publish) -StrictClusterPipeline.randomkey = block_pipeline_command(Redis.randomkey) -StrictClusterPipeline.rename = block_pipeline_command(Redis.rename) -StrictClusterPipeline.renamenx = block_pipeline_command(Redis.renamenx) -StrictClusterPipeline.rpoplpush = block_pipeline_command(Redis.rpoplpush) -StrictClusterPipeline.save = block_pipeline_command(Redis.save) -StrictClusterPipeline.scan = block_pipeline_command(Redis.scan) -StrictClusterPipeline.script_exists = block_pipeline_command(Redis.script_exists) -StrictClusterPipeline.script_flush = block_pipeline_command(Redis.script_flush) -StrictClusterPipeline.script_kill = block_pipeline_command(Redis.script_kill) -StrictClusterPipeline.script_load = block_pipeline_command(Redis.script_load) -StrictClusterPipeline.sdiff = block_pipeline_command(Redis.sdiff) -StrictClusterPipeline.sdiffstore = block_pipeline_command(Redis.sdiffstore) -StrictClusterPipeline.sentinel_get_master_addr_by_name = block_pipeline_command(Redis.sentinel_get_master_addr_by_name) -StrictClusterPipeline.sentinel_master = block_pipeline_command(Redis.sentinel_master) -StrictClusterPipeline.sentinel_masters = block_pipeline_command(Redis.sentinel_masters) -StrictClusterPipeline.sentinel_monitor = block_pipeline_command(Redis.sentinel_monitor) -StrictClusterPipeline.sentinel_remove = block_pipeline_command(Redis.sentinel_remove) -StrictClusterPipeline.sentinel_sentinels = block_pipeline_command(Redis.sentinel_sentinels) -StrictClusterPipeline.sentinel_set = block_pipeline_command(Redis.sentinel_set) -StrictClusterPipeline.sentinel_slaves = block_pipeline_command(Redis.sentinel_slaves) -StrictClusterPipeline.shutdown = block_pipeline_command(Redis.shutdown) -StrictClusterPipeline.sinter = block_pipeline_command(Redis.sinter) -StrictClusterPipeline.sinterstore = block_pipeline_command(Redis.sinterstore) -StrictClusterPipeline.slaveof = block_pipeline_command(Redis.slaveof) -StrictClusterPipeline.slowlog_get = block_pipeline_command(Redis.slowlog_get) -StrictClusterPipeline.slowlog_len = block_pipeline_command(Redis.slowlog_len) -StrictClusterPipeline.slowlog_reset = block_pipeline_command(Redis.slowlog_reset) -StrictClusterPipeline.smove = block_pipeline_command(Redis.smove) -StrictClusterPipeline.sort = block_pipeline_command(Redis.sort) -StrictClusterPipeline.sunion = block_pipeline_command(Redis.sunion) -StrictClusterPipeline.sunionstore = block_pipeline_command(Redis.sunionstore) -StrictClusterPipeline.time = block_pipeline_command(Redis.time) +ClusterPipeline.bgrewriteaof = block_pipeline_command(Redis.bgrewriteaof) +ClusterPipeline.bgsave = block_pipeline_command(Redis.bgsave) +ClusterPipeline.bitop = block_pipeline_command(Redis.bitop) +ClusterPipeline.brpoplpush = block_pipeline_command(Redis.brpoplpush) +ClusterPipeline.client_getname = block_pipeline_command(Redis.client_getname) +ClusterPipeline.client_kill = block_pipeline_command(Redis.client_kill) +ClusterPipeline.client_list = block_pipeline_command(Redis.client_list) +ClusterPipeline.client_setname = block_pipeline_command(Redis.client_setname) +ClusterPipeline.config_get = block_pipeline_command(Redis.config_get) +ClusterPipeline.config_resetstat = block_pipeline_command(Redis.config_resetstat) +ClusterPipeline.config_rewrite = block_pipeline_command(Redis.config_rewrite) +ClusterPipeline.config_set = block_pipeline_command(Redis.config_set) +ClusterPipeline.dbsize = block_pipeline_command(Redis.dbsize) +ClusterPipeline.echo = block_pipeline_command(Redis.echo) +ClusterPipeline.evalsha = block_pipeline_command(Redis.evalsha) +ClusterPipeline.flushall = block_pipeline_command(Redis.flushall) +ClusterPipeline.flushdb = block_pipeline_command(Redis.flushdb) +ClusterPipeline.info = block_pipeline_command(Redis.info) +ClusterPipeline.keys = block_pipeline_command(Redis.keys) +ClusterPipeline.lastsave = block_pipeline_command(Redis.lastsave) +ClusterPipeline.mget = block_pipeline_command(Redis.mget) +ClusterPipeline.move = block_pipeline_command(Redis.move) +ClusterPipeline.mset = block_pipeline_command(Redis.mset) +ClusterPipeline.msetnx = block_pipeline_command(Redis.msetnx) +ClusterPipeline.pfmerge = block_pipeline_command(Redis.pfmerge) +ClusterPipeline.pfcount = block_pipeline_command(Redis.pfcount) +ClusterPipeline.ping = block_pipeline_command(Redis.ping) +ClusterPipeline.publish = block_pipeline_command(Redis.publish) +ClusterPipeline.randomkey = block_pipeline_command(Redis.randomkey) +ClusterPipeline.rename = block_pipeline_command(Redis.rename) +ClusterPipeline.renamenx = block_pipeline_command(Redis.renamenx) +ClusterPipeline.rpoplpush = block_pipeline_command(Redis.rpoplpush) +ClusterPipeline.save = block_pipeline_command(Redis.save) +ClusterPipeline.scan = block_pipeline_command(Redis.scan) +ClusterPipeline.script_exists = block_pipeline_command(Redis.script_exists) +ClusterPipeline.script_flush = block_pipeline_command(Redis.script_flush) +ClusterPipeline.script_kill = block_pipeline_command(Redis.script_kill) +ClusterPipeline.script_load = block_pipeline_command(Redis.script_load) +ClusterPipeline.sdiff = block_pipeline_command(Redis.sdiff) +ClusterPipeline.sdiffstore = block_pipeline_command(Redis.sdiffstore) +ClusterPipeline.sentinel_get_master_addr_by_name = block_pipeline_command(Redis.sentinel_get_master_addr_by_name) +ClusterPipeline.sentinel_master = block_pipeline_command(Redis.sentinel_master) +ClusterPipeline.sentinel_masters = block_pipeline_command(Redis.sentinel_masters) +ClusterPipeline.sentinel_monitor = block_pipeline_command(Redis.sentinel_monitor) +ClusterPipeline.sentinel_remove = block_pipeline_command(Redis.sentinel_remove) +ClusterPipeline.sentinel_sentinels = block_pipeline_command(Redis.sentinel_sentinels) +ClusterPipeline.sentinel_set = block_pipeline_command(Redis.sentinel_set) +ClusterPipeline.sentinel_slaves = block_pipeline_command(Redis.sentinel_slaves) +ClusterPipeline.shutdown = block_pipeline_command(Redis.shutdown) +ClusterPipeline.sinter = block_pipeline_command(Redis.sinter) +ClusterPipeline.sinterstore = block_pipeline_command(Redis.sinterstore) +ClusterPipeline.slaveof = block_pipeline_command(Redis.slaveof) +ClusterPipeline.slowlog_get = block_pipeline_command(Redis.slowlog_get) +ClusterPipeline.slowlog_len = block_pipeline_command(Redis.slowlog_len) +ClusterPipeline.slowlog_reset = block_pipeline_command(Redis.slowlog_reset) +ClusterPipeline.smove = block_pipeline_command(Redis.smove) +ClusterPipeline.sort = block_pipeline_command(Redis.sort) +ClusterPipeline.sunion = block_pipeline_command(Redis.sunion) +ClusterPipeline.sunionstore = block_pipeline_command(Redis.sunionstore) +ClusterPipeline.time = block_pipeline_command(Redis.time) class PipelineCommand(object): diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 34bf0c5b..9728b0ec 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -16,7 +16,7 @@ # 3rd party imports from mock import patch, Mock, MagicMock -from redis._compat import b, unicode +from redis._compat import unicode from redis import Redis import pytest diff --git a/tests/test_commands.py b/tests/test_commands.py index fe251b14..9f195aff 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement +from __future__ import unicode_literals import datetime import re import time @@ -12,7 +12,7 @@ # 3rd party imports import pytest -from redis._compat import unichr, u, b, ascii_letters, iteritems, iterkeys, itervalues, unicode +from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode from redis.client import parse_info from redis.exceptions import ResponseError, DataError, RedisError @@ -166,6 +166,11 @@ def test_decr(self, r): assert r.decr('a', amount=5) == -7 assert r['a'] == b'-7' + def test_decrby(self, r): + assert r.decrby('a', amount=2) == -2 + assert r.decrby('a', amount=3) == -5 + assert r['a'] == b'-5' + def test_delete(self, r): assert r.delete('a') == 0 r['a'] = 'foo' @@ -193,7 +198,10 @@ def test_dump_and_restore(self, r): def test_exists(self, r): assert not r.exists('a') r['a'] = 'foo' - assert r.exists('a') + assert r.exists('a') == 0 + r['b'] = 'bar' + assert r.exists('a') == 1 + assert r.exists('a', 'b') == 2 def test_exists_contains(self, r): assert 'a' not in r @@ -296,14 +304,15 @@ def test_incrbyfloat(self, r): def test_keys(self, r): keys = r.keys() assert keys == [] - keys_with_underscores = set(['test_a', 'test_b']) - keys = keys_with_underscores.union(set(['testc'])) + keys_with_underscores = {b'test_a', b'test_b'} + keys = keys_with_underscores.union({b'testc'}) for key in keys: r[key] = 1 assert set(r.keys(pattern='test_*')) == {b"{0}".format(k) for k in keys_with_underscores} assert set(r.keys(pattern='test*')) == {b"{0}".format(k) for k in keys} def test_mget(self, r): + assert r.mget([]) == [] assert r.mget(['a', 'b']) == [None, None] r['a'] = '1' r['b'] = '2' @@ -316,12 +325,6 @@ def test_mset(self, r): for k, v in iteritems(d): assert r[k] == v - def test_mset_kwargs(self, r): - d = {'a': b'1', 'b': b'2', 'c': b'3'} - assert r.mset(**d) - for k, v in iteritems(d): - assert r[k] == v - def test_msetnx(self, r): d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.msetnx(d) @@ -331,15 +334,6 @@ def test_msetnx(self, r): assert r[k] == v assert r.get('d') is None - def test_msetnx_kwargs(self, r): - d = {'a': b'1', 'b': b'2', 'c': b'3'} - assert r.msetnx(**d) - d2 = {'a': b'x', 'd': b'4'} - assert not r.msetnx(**d2) - for k, v in iteritems(d): - assert r[k] == v - assert r.get('d') is None - def test_pexpire(self, r): assert not r.pexpire('a', 60000) r['a'] = 'foo' @@ -562,7 +556,7 @@ def test_lpushx(self, r): assert r.lrange('a', 0, -1) == [] r.rpush('a', '1', '2', '3') assert r.lpushx('a', '4') == 4 - assert r.lrange('a', 0, -1) == [4'4', b'1', b'2', b'3'] + assert r.lrange('a', 0, -1) == [b'4', b'1', b'2', b'3'] def test_lrange(self, r): r.rpush('a', '1', '2', '3', '4', '5') @@ -571,11 +565,16 @@ def test_lrange(self, r): assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4', b'5'] def test_lrem(self, r): - r.rpush('a', '1', '1', '1', '1') - assert r.lrem('a', '1', 1) == 1 - assert r.lrange('a', 0, -1) == [b'1', b'1', b'1'] - assert r.lrem('a', 0, '1') == 3 - assert r.lrange('a', 0, -1) == [] + r.rpush('a', 'Z', 'b', 'Z', 'Z', 'c', 'Z', 'Z') + # remove the first 'Z' item + assert r.lrem('a', 1, 'Z') == 1 + assert r.lrange('a', 0, -1) == [b'b', b'Z', b'Z', b'c', b'Z', b'Z'] + # remove the last 2 'Z' items + assert r.lrem('a', -2, 'Z') == 2 + assert r.lrange('a', 0, -1) == [b'b', b'Z', b'Z', b'c'] + # remove all 'Z' items + assert r.lrem('a', 0, 'Z') == 2 + assert r.lrange('a', 0, -1) == [b'b', b'c'] def test_lset(self, r): r.rpush('a', '1', '2', '3') @@ -626,14 +625,14 @@ def test_scan(self, r): assert cursor == 0 keys += partial_keys - assert set(keys) == set([b'a', b'b', b'c']) + assert set(keys) == {b'a', b'b', b'c'} keys = [] for result in r.scan(match='a').values(): cursor, partial_keys = result assert cursor == 0 keys += partial_keys - assert set(keys) == set([b'a']) + assert set(keys) == {b'a'} def test_scan_iter(self, r): alphabet = 'abcdefghijklmnopqrstuvwABCDEFGHIJKLMNOPQRSTUVW' @@ -644,29 +643,29 @@ def test_scan_iter(self, r): assert set(keys) == set(expected_result) keys = list(r.scan_iter(match='a')) - assert set(keys) == set([b'a']) + assert set(keys) == {b'a'} r.set('Xa', 1) r.set('Xb', 2) r.set('Xc', 3) keys = list(r.scan_iter('X*', count=1000)) assert len(keys) == 3 - assert set(keys) == set([b'Xa', b'Xb', b'Xc']) + assert set(keys) == {b'Xa', b'Xb', b'Xc'} def test_sscan(self, r): r.sadd('a', 1, 2, 3) cursor, members = r.sscan('a') assert cursor == 0 - assert set(members) == set([b'1', b'2', b'3']) + assert set(members) == {b'a', b'2', b'3'} _, members = r.sscan('a', match=b'1') - assert set(members) == set([b'1']) + assert set(members) == {b'1'} def test_sscan_iter(self, r): r.sadd('a', 1, 2, 3) members = list(r.sscan_iter('a')) - assert set(members) == set([b'1', b'2', b'3']) + assert set(members) == {b'1', b'2', b'3'} members = list(r.sscan_iter('a', match=b'1')) - assert set(members) == set([b'1']) + assert set(members) == {b'1'} def test_hscan(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) @@ -687,16 +686,16 @@ def test_zscan(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') cursor, pairs = r.zscan('a') assert cursor == 0 - assert set(pairs) == set([(b'a', 1), (b'b', 2), (b'c', 3)]) + assert set(pairs) == {(b'a', 1), (b'b, 2'), (b'c', 3)} _, pairs = r.zscan('a', match='a') - assert set(pairs) == set([(b'a', 1)]) + assert set(pairs == {(b'a', 1)}) def test_zscan_iter(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') pairs = list(r.zscan_iter('a')) - assert set(pairs) == set([(b'a', 1), (b'b', 2), (b'c', 3)]) + assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)} pairs = list(r.zscan_iter('a', match='a')) - assert set(pairs) == set([(b'a', 1)]) + assert set(pairs) == {(b'a', 1)} # SET COMMANDS def test_sadd(self, r): @@ -748,21 +747,21 @@ def test_sismember(self, r): def test_smembers(self, r): r.sadd('a', '1', '2', '3') - assert r.smembers('a') == set([b'1', b'2', b'3']) + assert r.smembers('a') == {b'1', b'2', b'3'} def test_smove(self, r): r.sadd('a{foo}', 'a1', 'a2') r.sadd('b{foo}', 'b1', 'b2') assert r.smove('a{foo}', 'b{foo}', 'a1') - assert r.smembers('a{foo}') == set([b'a2']) - assert r.smembers('b{foo}') == set([b'b1', b'b2', b'a1']) + assert r.smembers('a{foo}') == {b'a2'} + assert r.smembers('b{foo}') == {b'b1', b'b2', b'a1'} def test_spop(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) value = r.spop('a') assert value in s - assert r.smembers('a') == set(s) - set([value]) + assert r.smembers('a') == set(s) - {value} def test_srandmember(self, r): s = [b'1', b'2', b'3'] @@ -796,14 +795,14 @@ def test_sunionstore(self, r): # SORTED SET COMMANDS def test_zadd(self, r): r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, -1) == [b'a1', b'a2'g, b'a3'] + assert r.zrange('a', 0, -1) == [b'a1', b'a2', b'a3'] def test_zcard(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcard('a') == 3 def test_zcount(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcount('a', '-inf', '+inf') == 3 assert r.zcount('a', 1, 2) == 2 assert r.zcount('a', 10, 20) == 0 @@ -816,7 +815,7 @@ def test_zincrby(self, r): assert r.zscore('a', 'a3') == 8.0 def test_zlexcount(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zlexcount('a', '-', '+') == 7 assert r.zlexcount('a', '[b', '[f') == 5 @@ -829,33 +828,33 @@ def test_zinterstore_fail_cross_slot(self, r): assert re.search('ClusterCrossSlotError', str(excinfo)) def test_zinterstore_sum(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1'), 9] def test_zinterstore_max(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] def test_zinterstore_min(self, r): - r.zadd('a{foo}', a1=1, a2=2, a3=3) - r.zadd('b{foo}', a1=2, a2=3, a3=5) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] def test_zinterstore_with_weight(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a3', 20), (b'a1', 23)] @@ -902,25 +901,25 @@ def test_zrangebyscore(self, r): [(b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zrank(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrank('a', 'a1') == 0 assert r.zrank('a', 'a2') == 1 assert r.zrank('a', 'a6') is None def test_zrem(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrem('a', 'a2') == 1 assert r.zrange('a', 0, -1) == [b'a1', b'a3'] assert r.zrem('a', 'b') == 0 assert r.zrange('a', 0, -1) == [b'a1', b'a3'] def test_zrem_multiple_keys(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrem('a', 'a1', 'a2') == 2 assert r.zrange('a', 0, 5) == [b'a3'] def test_zremrangebylex(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zremrangebylex('a', '-', '[c') == 3 assert r.zrange('a', 0, -1) == [b'd', b'e', b'f', b'g'] assert r.zremrangebylex('a', '[f', '+') == 2 @@ -929,7 +928,7 @@ def test_zremrangebylex(self, r): assert r.zrange('a', 0, -1) == [b'd', b'e'] def test_zremrangebyrank(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zremrangebyrank('a', 1, 3) == 3 assert r.zrange('a', 0, 5) == [b'a1', b'a5'] @@ -974,13 +973,13 @@ def test_zrevrangebyscore(self, r): [(b'a4', 4), (b'a3', 3), (b'a2', 2)] def test_zrevrank(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrevrank('a', 'a1') == 4 assert r.zrevrank('a', 'a2') == 3 assert r.zrevrank('a', 'a6') is None def test_zscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zscore('a', 'a1') == 1.0 assert r.zscore('a', 'a2') == 2.0 assert r.zscore('a', 'a4') is None @@ -994,40 +993,40 @@ def test_zunionstore_fail_crossslot(self, r): assert re.search('ClusterCrossSlotError', str(excinfo)) def test_zunionstore_sum(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] def test_zunionstore_max(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] def test_zunionstore_min(self, r): - r.zadd('a{foo}', a1=1, a2=2, a3=3) - r.zadd('b{foo}', a1=2, a2=2, a3=4) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zunionstore_with_weight(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] # # HYPERLOGLOG TESTS def test_pfadd(self, r): - members = set([b'1', b'2', b'3']) + members = {b'1', b'2', b'3'} assert r.pfadd('a', *members) == 1 assert r.pfadd('a', *members) == 0 assert r.pfcount('a') == len(members) @@ -1035,18 +1034,18 @@ def test_pfadd(self, r): @pytest.mark.xfail(reason="New pfcount in 2.10.5 currently breaks in cluster") @skip_if_server_version_lt('2.8.9') def test_pfcount(self, r): - members = set([b'1', b'2', b'3']) + members = {b'1', b'2', b'3'} r.pfadd('a', *members) assert r.pfcount('a') == len(members) - members_b = set([b'2', b'3', b'4']) + members_b = {b'2', b'3', b'4'} r.pfadd('b', *members_b) assert r.pfcount('b') == len(members_b) assert r.pfcount('a', 'b') == len(members_b.union(members)) def test_pfmerge(self, r): - mema = set([b'1', b'2', b'3']) - memb = set([b'2', b'3', b'4']) - memc = set([b'5', b'6', b'7']) + mema = {b'1', b'2', b'3'} + memb = {b'2', b'3', b'4'} + memc = {b'5', b'6', b'7'} r.pfadd('a', *mema) r.pfadd('b', *memb) r.pfadd('c', *memc) @@ -1376,5 +1375,5 @@ def test_floating_point_encoding(self, r): precision. """ timestamp = 1349673917.939762 - r.zadd('a', timestamp, 'a1') + r.zadd('a', {'a1': timestamp}) assert r.zscore('a', 'a1') == timestamp diff --git a/tests/test_utils.py b/tests/test_utils.py index 7ee9278e..32c21422 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -19,7 +19,7 @@ # 3rd party imports import pytest -from redis._compat import unicode, b +from redis._compat import unicode def test_parse_cluster_slots(): From a0fbc7c1ef1f8450743e819b1f6ca9d3cab6d3cc Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 10 Feb 2019 11:00:53 +0100 Subject: [PATCH 073/263] Binary string fixes and updates of tests --- tests/test_commands.py | 79 +++++++++++++++++++++++++++++++++--------- tests/test_utils.py | 12 +++---- 2 files changed, 68 insertions(+), 23 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 9f195aff..136ea112 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -7,14 +7,14 @@ import time # rediscluster imports -from rediscluster.exceptions import RedisClusterException +from rediscluster.exceptions import RedisClusterException, ClusterCrossSlotError from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt # 3rd party imports import pytest from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode from redis.client import parse_info -from redis.exceptions import ResponseError, DataError, RedisError +from redis.exceptions import ResponseError, DataError, RedisError, DataError pytestmark = skip_if_server_version_lt('2.9.0') @@ -30,7 +30,7 @@ class TestRedisCommands(object): @skip_if_server_version_lt('2.9.9') def test_zrevrangebylex(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a'] assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a'] assert r.zrevrangebylex('a', '(g', '[aaa') == \ @@ -196,12 +196,27 @@ def test_dump_and_restore(self, r): assert r['a'] == b'foo' def test_exists(self, r): - assert not r.exists('a') - r['a'] = 'foo' assert r.exists('a') == 0 + r['a'] = 'foo' r['b'] = 'bar' assert r.exists('a') == 1 - assert r.exists('a', 'b') == 2 + assert r.exists('b') == 1 + # This no longer works in cluster. See test_exists_fail_not_same_slots() for failing test + # assert r.exists('a', 'b') == 2 + + def test_exists_fail_not_same_slots(self, r): + """ + This test is conditioned on that the 2 keys will be in different slots + """ + key_a = 'a' + key_b = 'b' + assert r.cluster_keyslot(key_a) != r.cluster_keyslot(key_b) + r[key_a] = 'foo' + r[key_b] = 'bar' + assert r.exists('a') == 1 + assert r.exists('b') == 1 + with pytest.raises(ClusterCrossSlotError): + r.exists('a', 'b') def test_exists_contains(self, r): assert 'a' not in r @@ -239,12 +254,12 @@ def test_get_and_set(self, r): assert r.get('a') is None byte_string = b'value' integer = 5 - unicode_string = unichr(3456) + u'abcd' + unichr(3421) + unicode_string = unichr(3456) + 'abcd' + unichr(3421) assert r.set('byte_string', byte_string) assert r.set('integer', 5) assert r.set('unicode_string', unicode_string) assert r.get('byte_string') == byte_string - assert r.get('integer') == bstr(integer) + assert r.get('integer') == str(integer).encode() assert r.get('unicode_string').decode('utf-8') == unicode_string def test_getitem_and_setitem(self, r): @@ -481,7 +496,7 @@ def test_type(self, r): r.sadd('a', '1') assert r.type('a') == b'set' del r['a'] - r.zadd('a', **{'1': 1}) + r.zadd('a', {'1': 1}) assert r.type('a') == b'zset' # LIST COMMANDS @@ -615,6 +630,7 @@ def test_rpushx(self, r): assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] # SCAN COMMANDS + @pytest.mark.skip(reason="WIP") def test_scan(self, r): r.set('a', 1) r.set('b', 2) @@ -634,6 +650,7 @@ def test_scan(self, r): keys += partial_keys assert set(keys) == {b'a'} + @pytest.mark.skip(reason="WIP") def test_scan_iter(self, r): alphabet = 'abcdefghijklmnopqrstuvwABCDEFGHIJKLMNOPQRSTUVW' for i, c in enumerate(alphabet): @@ -652,6 +669,7 @@ def test_scan_iter(self, r): assert len(keys) == 3 assert set(keys) == {b'Xa', b'Xb', b'Xc'} + @pytest.mark.skip(reason="WIP") def test_sscan(self, r): r.sadd('a', 1, 2, 3) cursor, members = r.sscan('a') @@ -660,6 +678,7 @@ def test_sscan(self, r): _, members = r.sscan('a', match=b'1') assert set(members) == {b'1'} + @pytest.mark.skip(reason="WIP") def test_sscan_iter(self, r): r.sadd('a', 1, 2, 3) members = list(r.sscan_iter('a')) @@ -667,6 +686,7 @@ def test_sscan_iter(self, r): members = list(r.sscan_iter('a', match=b'1')) assert set(members) == {b'1'} + @pytest.mark.skip(reason="WIP") def test_hscan(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) cursor, dic = r.hscan('a') @@ -675,6 +695,7 @@ def test_hscan(self, r): _, dic = r.hscan('a', match='a') assert dic == {b'a': b'1'} + @pytest.mark.skip(reason="WIP") def test_hscan_iter(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) dic = dict(r.hscan_iter('a')) @@ -682,6 +703,7 @@ def test_hscan_iter(self, r): dic = dict(r.hscan_iter('a', match='a')) assert dic == {b'a': b'1'} + @pytest.mark.skip(reason="WIP") def test_zscan(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') cursor, pairs = r.zscan('a') @@ -690,6 +712,7 @@ def test_zscan(self, r): _, pairs = r.zscan('a', match='a') assert set(pairs == {(b'a', 1)}) + @pytest.mark.skip(reason="WIP") def test_zscan_iter(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') pairs = list(r.zscan_iter('a')) @@ -794,8 +817,22 @@ def test_sunionstore(self, r): # SORTED SET COMMANDS def test_zadd(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, -1) == [b'a1', b'a2', b'a3'] + mapping = {'a1': 1.0, 'a2': 2.0, 'a3': 3.0} + r.zadd('a', mapping) + assert r.zrange('a', 0, -1, withscores=True) == \ + [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)] + + # error cases + with pytest.raises(DataError): + r.zadd('a', {}) + + # cannot use both nx and xx options + with pytest.raises(DataError): + r.zadd('a', mapping, nx=True, xx=True) + + # cannot use the incr options with more than one value + with pytest.raises(DataError): + r.zadd('a', mapping, incr=True) def test_zcard(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) @@ -808,9 +845,9 @@ def test_zcount(self, r): assert r.zcount('a', 10, 20) == 0 def test_zincrby(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zincrby('a', 'a2') == 3.0 - assert r.zincrby('a', 'a3', amount=5) == 8.0 + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zincrby('a', 1, 'a2') == 3.0 + assert r.zincrby('a', 5, 'a3') == 8.0 assert r.zscore('a', 'a2') == 3.0 assert r.zscore('a', 'a3') == 8.0 @@ -820,9 +857,9 @@ def test_zlexcount(self, r): assert r.zlexcount('a', '[b', '[f') == 5 def test_zinterstore_fail_cross_slot(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('a', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('a', {'a1': 6, 'a2': 5, 'a3': 4}) with pytest.raises(ResponseError) as excinfo: r.zinterstore('d', ['a', 'b', 'c']) assert re.search('ClusterCrossSlotError', str(excinfo)) @@ -835,6 +872,14 @@ def test_zinterstore_sum(self, r): assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1'), 9] + def test_zinterstore_sum(self, r): + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('d', ['a', 'b', 'c']) == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b'a3', 8), (b'a1', 9)] + def test_zinterstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) diff --git a/tests/test_utils.py b/tests/test_utils.py index 32c21422..31dccf18 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -71,16 +71,16 @@ def test_parse_cluster_slots(): parse_cluster_slots(extended_mock_response) mock_binary_response = [ - [0, 5460, [b('172.17.0.2'), 7000], [b('172.17.0.2'), 7003]], - [5461, 10922, [b('172.17.0.2'), 7001], [b('172.17.0.2'), 7004]], - [10923, 16383, [b('172.17.0.2'), 7002], [b('172.17.0.2'), 7005]] + [0, 5460, [b'172.17.0.2', 7000], [b'172.17.0.2', 7003]], + [5461, 10922, [b'172.17.0.2', 7001], [b'172.17.0.2', 7004]], + [10923, 16383, [b'172.17.0.2', 7002], [b'172.17.0.2', 7005]] ] parse_cluster_slots(mock_binary_response) extended_mock_binary_response = [ - [0, 5460, [b('172.17.0.2'), 7000, b('ffd36d8d7cb10d813f81f9662a835f6beea72677')], [b('172.17.0.2'), 7003, b('5c15b69186017ddc25ebfac81e74694fc0c1a160')]], - [5461, 10922, [b('172.17.0.2'), 7001, b('069cda388c7c41c62abe892d9e0a2d55fbf5ffd5')], [b('172.17.0.2'), 7004, b('dc152a08b4cf1f2a0baf775fb86ad0938cb907dc')]], - [10923, 16383, [b('172.17.0.2'), 7002, b('3588b4cf9fc72d57bb262a024747797ead0cf7ea')], [b('172.17.0.2'), 7005, b('a72c02c7d85f4ec3145ab2c411eefc0812aa96b0')]] + [0, 5460, [b'172.17.0.2', 7000, b'ffd36d8d7cb10d813f81f9662a835f6beea72677'], [b'172.17.0.2', 7003, b'5c15b69186017ddc25ebfac81e74694fc0c1a160']], + [5461, 10922, [b'172.17.0.2', 7001, b'069cda388c7c41c62abe892d9e0a2d55fbf5ffd5'], [b'172.17.0.2', 7004, b'dc152a08b4cf1f2a0baf775fb86ad0938cb907dc']], + [10923, 16383, [b'172.17.0.2', 7002, b'3588b4cf9fc72d57bb262a024747797ead0cf7ea'], [b'172.17.0.2', 7005, b'a72c02c7d85f4ec3145ab2c411eefc0812aa96b0']] ] extended_mock_parsed = { From aa195ac29ae657fd57c26ff5ca08f701c4301e08 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 10 Feb 2019 11:05:27 +0100 Subject: [PATCH 074/263] Update .travis.yml --- .travis.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1b8ad1d1..376c0df0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,6 +16,7 @@ install: - "if [[ $REDIS_VERSION == '3.2' ]]; then REDIS_VERSION=3.2 make redis-install; fi" - "if [[ $REDIS_VERSION == '4.0' ]]; then REDIS_VERSION=4.0 make redis-install; fi" - "if [[ $REDIS_VERSION == '5.0' ]]; then REDIS_VERSION=5.0 make redis-install; fi" + - "if [[ $TEST_PYCODESTYLE == '1' ]]; then pip install pycodestyle; fi" - pip install -r dev-requirements.txt - pip install -e . - "if [[ $HIREDIS == '1' ]]; then pip install hiredis; fi" @@ -32,7 +33,7 @@ env: # Redis 5.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=5.0 - HIREDIS=1 REDIS_VERSION=5.0 -script: +script: - make start - coverage erase - coverage run --source rediscluster -p -m py.test @@ -41,6 +42,21 @@ script: after_success: - coverage combine - coveralls + - "if [[ $TEST_PYCODESTYLE == '1' ]]; then pycodestyle --repeat --show-source --exclude=.venv,.tox,dist,docs,build,*.egg,redis_install .; fi" matrix: allow_failures: - python: "nightly" + - python: 2.7 + env: TEST_PYCODESTYLE=1 + - python: 3.6 + env: TEST_PYCODESTYLE=1 + # python 3.7 has to be specified manually in the matrix + # https://github.com/travis-ci/travis-ci/issues/9815 + - python: 3.7 + dist: xenial + sudo: true + env: TEST_HIREDIS=0 + - python: 3.7 + dist: xenial + sudo: true + env: TEST_HIREDIS=1 From 47bacfd656f407c80e3f8a9794ad43e7df3cef01 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 13 Feb 2019 09:52:54 +0100 Subject: [PATCH 075/263] Additional updates and compat fixes --- rediscluster/client.py | 21 +++++++++++---------- tests/test_pipeline.py | 8 ++++---- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 27ced430..034e0d10 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from __future__ import unicode_literals # python std lib import datetime @@ -15,7 +16,7 @@ ) from .exceptions import ( RedisClusterException, AskError, MovedError, ClusterDownError, - ClusterError, TryAgainError, + ClusterError, TryAgainError ) from .pubsub import ClusterPubSub from .utils import ( @@ -942,13 +943,13 @@ def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha= (start is not None and num is None): raise RedisError("RedisError: ``start`` and ``num`` must both be specified") try: - data_type = b(self.type(name)) + data_type = b"{0}".format(self.type(name)) - if data_type == b("none"): + if data_type == b"none": return [] - elif data_type == b("set"): + elif data_type == b"set": data = list(self.smembers(name))[:] - elif data_type == b("list"): + elif data_type == b"list": data = self.lrange(name, 0, -1) else: raise RedisClusterException("Unable to sort data type : {0}".format(data_type)) @@ -969,10 +970,10 @@ def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha= data = self._retrive_data_from_sort(data, get) if store is not None: - if data_type == b("set"): + if data_type == b"set": self.delete(store) self.rpush(store, *data) - elif data_type == b("list"): + elif data_type == b"list": self.delete(store) self.rpush(store, *data) else: @@ -981,7 +982,7 @@ def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha= return len(data) if groups: - if not get or isinstance(get, basestring) or len(get) < 2: + if not get or isinstance(get, (bytes, basestring)) or len(get) < 2: raise DataError('when using "groups" the "get" argument ' 'must be specified and contain at least ' 'two keys') @@ -997,7 +998,7 @@ def _retrive_data_from_sort(self, data, get): Used by sort() """ if get is not None: - if isinstance(get, basestring): + if isinstance(get, (bytes, basestring)): get = [get] new_data = [] for k in data: @@ -1025,7 +1026,7 @@ def _get_single_item(self, k, g): single_item = k else: single_item = None - return b(single_item) + return b"{0}".format(single_item) def _strtod_key_func(self, arg): """ diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index b49fadeb..5f01b919 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -150,11 +150,11 @@ def test_transaction_with_empty_error_command(self, r): """ for error_switch in (True, False): with r.pipeline() as pipe: - pipe.set('a', 1).mget([]).set('c', 3) + pipe.set('a', 1).get("").set('c', 3) result = pipe.execute(raise_on_error=error_switch) assert result[0] - assert result[1] == [] + assert result[1] == None assert result[2] def test_pipeline_with_empty_error_command(self, r): @@ -164,11 +164,11 @@ def test_pipeline_with_empty_error_command(self, r): """ for error_switch in (True, False): with r.pipeline(transaction=False) as pipe: - pipe.set('a', 1).mget([]).set('c', 3) + pipe.set('a', 1).get("").set('c', 3) result = pipe.execute(raise_on_error=error_switch) assert result[0] - assert result[1] == [] + assert result[1] == None assert result[2] def test_parse_error_raised(self, r): From 727f6dd204b33347bf56a01d3a98dd882d5d5b24 Mon Sep 17 00:00:00 2001 From: Vinicius Souza Date: Thu, 10 Jan 2019 18:10:04 -0200 Subject: [PATCH 076/263] enable replace option when performs rename operation --- rediscluster/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 034e0d10..a2d4742a 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -765,7 +765,7 @@ def msetnx(self, *args, **kwargs): return self.mset(**kwargs) - def rename(self, src, dst): + def rename(self, src, dst, replace=False): """ Rename key ``src`` to ``dst`` @@ -805,7 +805,7 @@ def rename(self, src, dst): ttl = 0 self.delete(dst) - self.restore(dst, ttl, data) + self.restore(dst, ttl, data, replace) self.delete(src) return True From 68077ff8c95c589872cc2f10998ea29ac8ed4f1e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 5 Mar 2019 10:33:36 +0100 Subject: [PATCH 077/263] Use the new Encoder class --- rediscluster/nodemanager.py | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 6644e9ea..2c69e38d 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -10,6 +10,7 @@ # 3rd party imports from redis import Redis from redis._compat import unicode, bytes, long, basestring +from redis.connection import Encoder from redis import ConnectionError, TimeoutError, ResponseError @@ -37,34 +38,21 @@ def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_covera self.reinitialize_steps = reinitialize_steps or 25 self._skip_full_coverage_check = skip_full_coverage_check self.nodemanager_follow_cluster = nodemanager_follow_cluster + self.encoder = Encoder( + connection_kwargs.get('encoding', 'utf-8'), + connection_kwargs.get('encoding_errors', 'strict'), + connection_kwargs.get('decode_responses', False) + ) if not self.startup_nodes: raise RedisClusterException("No startup nodes provided") - def encode(self, value): - """ - Return a bytestring representation of the value. - This method is copied from Redis' connection.py:Connection.encode - """ - if isinstance(value, bytes): - return value - elif isinstance(value, (int, long)): - value = b(str(value)) - elif isinstance(value, float): - value = b(repr(value)) - elif not isinstance(value, basestring): - value = unicode(value) - if isinstance(value, unicode): - # The encoding should be configurable as in connection.py:Connection.encode - value = value.encode('utf-8') - return value - def keyslot(self, key): """ Calculate keyslot for a given key. Tuned for compatibility with python 2.7.x """ - k = self.encode(key) + k = self.encoder.encode(key) start = k.find(b"{") From 35fe0d42ed1cb7e1e90946ba02071c60d0484904 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:15:44 +0100 Subject: [PATCH 078/263] Fix more byte method conversion calls --- tests/test_cluster_obj.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 9728b0ec..dd96c252 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -392,8 +392,8 @@ def assert_moved_redirection_on_slave(sr, connection_pool_cls, cluster_obj): master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7000', 'port': 7000, 'server_type': 'master'} with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: return_master_mock.return_value = master_value - assert cluster_obj.get('foo16706') == b('foo') - assert return_slave_mock.call_count == 1 + assert cluster_obj.get('foo16706') == b'foo' + assert return_master_mock.call_count == 1 def test_moved_redirection_on_slave_with_default_client(sr): @@ -444,10 +444,19 @@ def test_access_correct_slave_with_readonly_mode_client(sr): 'get_master_node_by_slot', return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) +<<<<<<< HEAD assert b('foo') == readonly_client.get('foo16706') readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') +======= + assert b'foo' == readonly_client.get('foo16706') + assert return_master_mock.call_count == 0 + + readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) + assert b'foo' == readonly_client.get('foo16706') + assert return_master_mock.call_count == 0 +>>>>>>> Fix more byte method conversion calls def test_refresh_using_specific_nodes(r): From 3d14ef5229ffc3d98ac7fdb984327dde789badcb Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:19:05 +0100 Subject: [PATCH 079/263] Fix test test_large_responses --- tests/test_commands.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 136ea112..3795fd72 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -201,7 +201,7 @@ def test_exists(self, r): r['b'] = 'bar' assert r.exists('a') == 1 assert r.exists('b') == 1 - # This no longer works in cluster. See test_exists_fail_not_same_slots() for failing test + # This no longer works in cluster. See test_exists_fail_not_same_slots() for failing test # assert r.exists('a', 'b') == 2 def test_exists_fail_not_same_slots(self, r): @@ -891,7 +891,7 @@ def test_zinterstore_max(self, r): def test_zinterstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] @@ -1412,7 +1412,7 @@ def test_large_responses(self, r): # load up 100K of data into a key data = ''.join([ascii_letters] * (100000 // len(ascii_letters))) r['a'] = data - assert r['a'] == bdata + assert r['a'] == data.encode() def test_floating_point_encoding(self, r): """ From 64fd6d565929c401e9cf5936c31e08cb13dc348b Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:30:10 +0100 Subject: [PATCH 080/263] Remove test class that is no longer used. --- tests/test_commands.py | 41 ----------------------------------------- 1 file changed, 41 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 3795fd72..3135b47f 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1297,47 +1297,6 @@ def test_sort_all_options(self, r): [b'vodka', b'milk', b'gin', b'apple juice'] -class TestStrictCommands(object): - - def test_strict_zadd(self, sr): - sr.zadd('a', 1.0, 'a1', 2.0, 'a2', a3=3.0) - assert sr.zrange('a', 0, -1, withscores=True) == \ - [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)] - - def test_strict_lrem(self, sr): - sr.rpush('a', 'a1', 'a2', 'a3', 'a1') - sr.lrem('a', 0, 'a1') - assert sr.lrange('a', 0, -1) == [b'a2', b'a3'] - - def test_strict_setex(self, sr): - assert sr.setex('a', 60, '1') - assert sr['a'] == b'1' - assert 0 < sr.ttl('a') <= 60 - - def test_strict_ttl(self, sr): - assert not sr.expire('a', 10) - sr['a'] = '1' - assert sr.expire('a', 10) - assert 0 < sr.ttl('a') <= 10 - assert sr.persist('a') - assert sr.ttl('a') == -1 - - def test_strict_pttl(self, sr): - assert not sr.pexpire('a', 10000) - sr['a'] = '1' - assert sr.pexpire('a', 10000) - assert 0 < sr.pttl('a') <= 10000 - assert sr.persist('a') - assert sr.pttl('a') == -1 - - def test_eval(self, sr): - res = sr.eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", 2, "A{foo}", "B{foo}", "first", "second") - assert res[0] == b'A{foo}' - assert res[1] == b'B{foo}' - assert res[2] == b'first' - assert res[3] == b'second' - - class TestBinarySave(object): def test_binary_get_set(self, r): assert r.set(' foo bar ', '123') From 022e4011d865b75851aaf43147931507cf9f3831 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:34:51 +0100 Subject: [PATCH 081/263] Fix test test_get_node_by_slot_random --- tests/test_cluster_connection_pool.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index f403e684..d7af752b 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -242,6 +242,12 @@ def test_get_node_by_slot_random(self): """ pool = self.get_pool(connection_kwargs={}) + # Set the values that we expect to be set for the NodeManager. Represents 2 nodes for 1 specific slot + pool.nodes.slots[0] = [ + {'host': '172.20.0.2', 'port': 7000, 'name': '172.20.0.2:7000', 'server_type': 'master'}, + {'host': '172.20.0.2', 'port': 7003, 'name': '172.20.0.2:7003', 'server_type': 'slave'}, + ] + expected_ports = {7000, 7003} actual_ports = set() for _ in range(0, 100): From b7454aec52d719c43b5bcd8e5bec59a9b8c5454d Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:35:58 +0100 Subject: [PATCH 082/263] Fix test test_boolean_parsing --- tests/test_cluster_connection_pool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index d7af752b..c586d1c1 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -17,7 +17,7 @@ import pytest import redis from mock import patch, Mock -from redis.connection import ssl_available +from redis.connection import ssl_available, to_bool from redis._compat import unicode From 22b3ad7ee5f6789dd4461290a6a2eebb8fdfd0c5 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:04:13 +0100 Subject: [PATCH 083/263] Make a mocked RedisCluster object return a more stable and determenistic RedisCluster object by forcing it to think the nodes setup is always pointing to 127.0.0.1 to avoid issues with different ip addresses and node configurations returned from redis. Your real redis server that you test against still must be accessable on 127.0.0.1 for tests to work out. --- tests/test_cluster_obj.py | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index dd96c252..1e442be4 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -31,6 +31,41 @@ class DummyConnection(object): pass +def get_mocked_redis_client(*args, **kwargs): + """ + Return a stable RedisCluster object that have deterministic + nodes and slots setup to remove the problem of different IP addresses + on different installations and machines. + """ + with patch.object(Redis, 'execute_command') as execute_command_mock: + def execute_command(self, *args, **kwargs): + if args[0] == 'slots': + mock_cluster_slots = [ + [ + 0, 5460, + ['127.0.0.1', 7000, 'node_0'], + ['127.0.0.1', 7004, 'node_4'] + ], + [ + 5461, 10922, + ['127.0.0.1', 7001, 'node_1'], + ['127.0.0.1', 7005, 'node_5'] + ], + [ + 10923, 16383, + ['127.0.0.1', 7002, 'node_2'], + ['127.0.0.1', 7003, '2node_3'] + ] + ] + return mock_cluster_slots + elif args[0] == 'cluster-require-full-coverage': + return {'cluster-require-full-coverage': 'yes'} + + execute_command_mock.side_effect = execute_command + + return RedisCluster(*args, **kwargs) + + def test_representation(r): assert re.search('^RedisCluster<[0-9\.\:\,].+>$', str(r)) @@ -296,7 +331,7 @@ def test_pipeline_ask_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = RedisCluster(host="127.0.0.1", port=7000) + r = get_mocked_redis_client(host="127.0.0.1", port=7000) with patch.object(RedisCluster, 'parse_response') as parse_response: From d5486f3be915f97d3986bf5d852d368c285b586f Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:27:21 +0100 Subject: [PATCH 084/263] Update sorted set tests to the latest code. Some still fails but most of them now pass --- tests/test_commands.py | 141 ++++++++++++++++++++++++++++++++--------- 1 file changed, 110 insertions(+), 31 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 3135b47f..38a53a16 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -15,6 +15,7 @@ from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode from redis.client import parse_info from redis.exceptions import ResponseError, DataError, RedisError, DataError +from redis import exceptions pytestmark = skip_if_server_version_lt('2.9.0') @@ -823,17 +824,45 @@ def test_zadd(self, r): [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)] # error cases - with pytest.raises(DataError): + with pytest.raises(exceptions.DataError): r.zadd('a', {}) # cannot use both nx and xx options - with pytest.raises(DataError): + with pytest.raises(exceptions.DataError): r.zadd('a', mapping, nx=True, xx=True) # cannot use the incr options with more than one value - with pytest.raises(DataError): + with pytest.raises(exceptions.DataError): r.zadd('a', mapping, incr=True) + def test_zadd_nx(self, r): + assert r.zadd('a', {'a1': 1}) == 1 + assert r.zadd('a', {'a1': 99, 'a2': 2}, nx=True) == 1 + assert r.zrange('a', 0, -1, withscores=True) == \ + [(b'a1', 1.0), (b'a2', 2.0)] + + def test_zadd_xx(self, r): + assert r.zadd('a', {'a1': 1}) == 1 + assert r.zadd('a', {'a1': 99, 'a2': 2}, xx=True) == 0 + assert r.zrange('a', 0, -1, withscores=True) == \ + [(b'a1', 99.0)] + + def test_zadd_ch(self, r): + assert r.zadd('a', {'a1': 1}) == 1 + assert r.zadd('a', {'a1': 99, 'a2': 2}, ch=True) == 2 + assert r.zrange('a', 0, -1, withscores=True) == \ + [(b'a2', 2.0), (b'a1', 99.0)] + + def test_zadd_incr(self, r): + assert r.zadd('a', {'a1': 1}) == 1 + assert r.zadd('a', {'a1': 4.5}, incr=True) == 5.5 + + def test_zadd_incr_with_xx(self, r): + # this asks zadd to incr 'a1' only if it exists, but it clearly + # doesn't. Redis returns a null value in this case and so should + # redis-py + assert r.zadd('a', {'a1': 1}, xx=True, incr=True) is None + def test_zcard(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcard('a') == 3 @@ -842,6 +871,8 @@ def test_zcount(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcount('a', '-inf', '+inf') == 3 assert r.zcount('a', 1, 2) == 2 + assert r.zcount('a', '(' + str(1), 2) == 1 + assert r.zcount('a', 1, '(' + str(2)) == 1 assert r.zcount('a', 10, 20) == 0 def test_zincrby(self, r): @@ -851,6 +882,7 @@ def test_zincrby(self, r): assert r.zscore('a', 'a2') == 3.0 assert r.zscore('a', 'a3') == 8.0 + @skip_if_server_version_lt('2.8.9') def test_zlexcount(self, r): r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zlexcount('a', '-', '+') == 7 @@ -864,14 +896,6 @@ def test_zinterstore_fail_cross_slot(self, r): r.zinterstore('d', ['a', 'b', 'c']) assert re.search('ClusterCrossSlotError', str(excinfo)) - def test_zinterstore_sum(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 2 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b'a3', 8), (b'a1'), 9] - def test_zinterstore_sum(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -884,28 +908,70 @@ def test_zinterstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 2 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MAX') == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] def test_zinterstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 2 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MIN') == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] def test_zinterstore_with_weight(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 2 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zinterstore('d', {'a': 1, 'b': 2, 'c': 3}) == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 20), (b'a1', 23)] + @skip_if_server_version_lt('4.9.0') + def test_zpopmax(self, r): + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmax('a') == [(b'a3', 3)] + + # with count + assert r.zpopmax('a', count=2) == \ + [(b'a2', 2), (b'a1', 1)] + + @skip_if_server_version_lt('4.9.0') + def test_zpopmin(self, r): + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmin('a') == [(b'a1', 1)] + + # with count + assert r.zpopmin('a', count=2) == \ + [(b'a2', 2), (b'a3', 3)] + + @skip_if_server_version_lt('4.9.0') + def test_bzpopmax(self, r): + r.zadd('a', {'a1': 1, 'a2': 2}) + r.zadd('b', {'b1': 10, 'b2': 20}) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmax(['b', 'a'], timeout=1) is None + r.zadd('c', {'c1': 100}) + assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100) + + @skip_if_server_version_lt('4.9.0') + def test_bzpopmin(self, r): + r.zadd('a', {'a1': 1, 'a2': 2}) + r.zadd('b', {'b1': 10, 'b2': 20}) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmin(['b', 'a'], timeout=1) is None + r.zadd('c', {'c1': 100}) + assert r.bzpopmin('c', timeout=1) == (b'c', b'c1', 100) + def test_zrange(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrange('a', 0, 1) == [b'a1', b'a2'] assert r.zrange('a', 1, 2) == [b'a2', b'a3'] @@ -919,8 +985,9 @@ def test_zrange(self, r): assert r.zrange('a', 0, 1, withscores=True, score_cast_func=int) == \ [(b'a1', 1), (b'a2', 2)] + @skip_if_server_version_lt('2.8.9') def test_zrangebylex(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zrangebylex('a', '-', '[c') == [b'a', b'b', b'c'] assert r.zrangebylex('a', '-', '(c') == [b'a', b'b'] assert r.zrangebylex('a', '[aaa', '(g') == \ @@ -928,8 +995,19 @@ def test_zrangebylex(self, r): assert r.zrangebylex('a', '[f', '+') == [b'f', b'g'] assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b'd', b'e'] + @skip_if_server_version_lt('2.9.9') + def test_zrevrangebylex(self, r): + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) + assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a'] + assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a'] + assert r.zrevrangebylex('a', '(g', '[aaa') == \ + [b'f', b'e', b'd', b'c', b'b'] + assert r.zrevrangebylex('a', '+', '[f') == [b'g', b'f'] + assert r.zrevrangebylex('a', '+', '-', start=3, num=2) == \ + [b'd', b'c'] + def test_zrangebyscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrangebyscore('a', 2, 4) == [b'a2', b'a3', b'a4'] # slicing with start/num @@ -963,6 +1041,7 @@ def test_zrem_multiple_keys(self, r): assert r.zrem('a', 'a1', 'a2') == 2 assert r.zrange('a', 0, 5) == [b'a3'] + @skip_if_server_version_lt('2.8.9') def test_zremrangebylex(self, r): r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zremrangebylex('a', '-', '[c') == 3 @@ -978,14 +1057,14 @@ def test_zremrangebyrank(self, r): assert r.zrange('a', 0, 5) == [b'a1', b'a5'] def test_zremrangebyscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zremrangebyscore('a', 2, 4) == 3 assert r.zrange('a', 0, -1) == [b'a1', b'a5'] assert r.zremrangebyscore('a', 2, 4) == 0 assert r.zrange('a', 0, -1) == [b'a1', b'a5'] def test_zrevrange(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrevrange('a', 0, 1) == [b'a3', b'a2'] assert r.zrevrange('a', 1, 2) == [b'a2', b'a1'] @@ -1001,7 +1080,7 @@ def test_zrevrange(self, r): [(b'a3', 3.0), (b'a2', 2.0)] def test_zrevrangebyscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrevrangebyscore('a', 4, 2) == [b'a4', b'a3', b'a2'] # slicing with start/num @@ -1041,32 +1120,32 @@ def test_zunionstore_sum(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 4 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zunionstore('d', ['a', 'b', 'c']) == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] def test_zunionstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 4 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] def test_zunionstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 4 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zunionstore_with_weight(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 4 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] # # HYPERLOGLOG TESTS From 5834df81e31a6bf7306c9ce73378186d0303cf96 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:28:07 +0100 Subject: [PATCH 085/263] Fix test_keys --- tests/test_commands.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 38a53a16..5c9aac6f 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -318,14 +318,13 @@ def test_incrbyfloat(self, r): assert float(r['a']) == float(2.1) def test_keys(self, r): - keys = r.keys() - assert keys == [] + assert r.keys() == [] keys_with_underscores = {b'test_a', b'test_b'} keys = keys_with_underscores.union({b'testc'}) for key in keys: r[key] = 1 - assert set(r.keys(pattern='test_*')) == {b"{0}".format(k) for k in keys_with_underscores} - assert set(r.keys(pattern='test*')) == {b"{0}".format(k) for k in keys} + assert set(r.keys(pattern='test_*')) == keys_with_underscores + assert set(r.keys(pattern='test*')) == keys def test_mget(self, r): assert r.mget([]) == [] From 2c1797658bdab311e8e99ceb6b713331c1f60c48 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:52:41 +0100 Subject: [PATCH 086/263] Make all sort commands into own class to make em easier to run. Add test skip markmarks for 7 sort tests that fails based on how they are currently designed to work cross slot. If a good set of keys is found to work on the redis server, they will be reintegrated. --- tests/test_commands.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_commands.py b/tests/test_commands.py index 5c9aac6f..3d041c9d 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1256,6 +1256,8 @@ def test_hvals(self, r): remote_vals = r.hvals('a') assert sorted(local_vals) == sorted(remote_vals) + +class TestRedisCommandsSort(object): # SORT def test_sort_basic(self, r): r.rpush('a', '3', '2', '1', '4') @@ -1265,6 +1267,7 @@ def test_sort_limited(self, r): r.rpush('a', '3', '2', '1', '4') assert r.sort('a', start=1, num=2) == [b'2', b'3'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_by(self, r): r['score:1'] = 8 r['score:2'] = 3 @@ -1272,6 +1275,7 @@ def test_sort_by(self, r): r.rpush('a', '3', '2', '1') assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' @@ -1279,6 +1283,7 @@ def test_sort_get(self, r): r.rpush('a', '2', '3', '1') assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_get_multi(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' @@ -1287,6 +1292,7 @@ def test_sort_get_multi(self, r): assert r.sort('a', get=('user:*', '#')) == \ [b'u1', b'1', b'u2', b'2', b'u3', b'3'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_get_groups_two(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' @@ -1319,6 +1325,7 @@ def test_sort_groups_no_get(self, r): with pytest.raises(DataError): r.sort('a', groups=True) + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_groups_three_gets(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' @@ -1342,11 +1349,13 @@ def test_sort_alpha(self, r): assert r.sort('a', alpha=True) == \ [b'a', b'b', b'c', b'd', b'e'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_store(self, r): r.rpush('a', '2', '3', '1') assert r.sort('a', store='sorted_values') == 3 assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_all_options(self, r): r['user:1:username'] = 'zeus' r['user:2:username'] = 'titan' From f8f58fe326a30d35de3b98a5ce9ecd8ff1551fbf Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:54:04 +0100 Subject: [PATCH 087/263] Rip out the old sort client side version and revert back this method to now only work on the same-slot logic. This might be breaking change when released. --- rediscluster/client.py | 146 ----------------------------------------- 1 file changed, 146 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index a2d4742a..dba80661 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -911,152 +911,6 @@ def rpoplpush(self, src, dst): return None - def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha=False, store=None, groups=None): - """Sort and return the list, set or sorted set at ``name``. - - :start: and :num: - allow for paging through the sorted data - - :by: - allows using an external key to weight and sort the items. - Use an "*" to indicate where in the key the item value is located - - :get: - allows for returning items from external keys rather than the - sorted data itself. Use an "*" to indicate where int he key - the item value is located - - :desc: - allows for reversing the sort - - :alpha: - allows for sorting lexicographically rather than numerically - - :store: - allows for storing the result of the sort into the key `store` - - ClusterImpl: - A full implementation of the server side sort mechanics because many of the - options work on multiple keys that can exist on multiple servers. - """ - if (start is None and num is not None) or \ - (start is not None and num is None): - raise RedisError("RedisError: ``start`` and ``num`` must both be specified") - try: - data_type = b"{0}".format(self.type(name)) - - if data_type == b"none": - return [] - elif data_type == b"set": - data = list(self.smembers(name))[:] - elif data_type == b"list": - data = self.lrange(name, 0, -1) - else: - raise RedisClusterException("Unable to sort data type : {0}".format(data_type)) - if by is not None: - # _sort_using_by_arg mutates data so we don't - # need need a return value. - self._sort_using_by_arg(data, by, alpha) - elif not alpha: - data.sort(key=self._strtod_key_func) - else: - data.sort() - if desc: - data = data[::-1] - if not (start is None and num is None): - data = data[start:start + num] - - if get: - data = self._retrive_data_from_sort(data, get) - - if store is not None: - if data_type == b"set": - self.delete(store) - self.rpush(store, *data) - elif data_type == b"list": - self.delete(store) - self.rpush(store, *data) - else: - raise RedisClusterException("Unable to store sorted data for data type : {0}".format(data_type)) - - return len(data) - - if groups: - if not get or isinstance(get, (bytes, basestring)) or len(get) < 2: - raise DataError('when using "groups" the "get" argument ' - 'must be specified and contain at least ' - 'two keys') - n = len(get) - return list(izip(*[data[i::n] for i in range(n)])) - else: - return data - except KeyError: - return [] - - def _retrive_data_from_sort(self, data, get): - """ - Used by sort() - """ - if get is not None: - if isinstance(get, (bytes, basestring)): - get = [get] - new_data = [] - for k in data: - for g in get: - single_item = self._get_single_item(k, g) - new_data.append(single_item) - data = new_data - return data - - def _get_single_item(self, k, g): - """ - Used by sort() - """ - if getattr(k, "decode", None): - k = k.decode("utf-8") - - if '*' in g: - g = g.replace('*', k) - if '->' in g: - key, hash_key = g.split('->') - single_item = self.get(key, {}).get(hash_key) - else: - single_item = self.get(g) - elif '#' in g: - single_item = k - else: - single_item = None - return b"{0}".format(single_item) - - def _strtod_key_func(self, arg): - """ - Used by sort() - """ - return float(arg) - - def _sort_using_by_arg(self, data, by, alpha): - """ - Used by sort() - """ - if getattr(by, "decode", None): - by = by.decode("utf-8") - - def _by_key(arg): - if getattr(arg, "decode", None): - arg = arg.decode("utf-8") - - key = by.replace('*', arg) - if '->' in by: - key, hash_key = key.split('->') - v = self.hget(key, hash_key) - if alpha: - return v - else: - return float(v) - else: - return self.get(key) - data.sort(key=_by_key) - ### # Set commands From 7907f038ae5fd8382af19d2bf16e0e0d056b1e97 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 25 Mar 2019 00:07:46 +0100 Subject: [PATCH 088/263] Add skip marks for most broken set tests that requires cross slot implementation to work. Also fixed one broken test after zadd signature update --- tests/test_commands.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 3d041c9d..91377ed7 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -856,6 +856,7 @@ def test_zadd_incr(self, r): assert r.zadd('a', {'a1': 1}) == 1 assert r.zadd('a', {'a1': 4.5}, incr=True) == 5.5 + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zadd_incr_with_xx(self, r): # this asks zadd to incr 'a1' only if it exists, but it clearly # doesn't. Redis returns a null value in this case and so should @@ -895,6 +896,7 @@ def test_zinterstore_fail_cross_slot(self, r): r.zinterstore('d', ['a', 'b', 'c']) assert re.search('ClusterCrossSlotError', str(excinfo)) + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_sum(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -903,6 +905,7 @@ def test_zinterstore_sum(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1', 9)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -911,6 +914,7 @@ def test_zinterstore_max(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) @@ -919,6 +923,7 @@ def test_zinterstore_min(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_with_weight(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -928,6 +933,7 @@ def test_zinterstore_with_weight(self, r): [(b'a3', 20), (b'a1', 23)] @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zpopmax(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zpopmax('a') == [(b'a3', 3)] @@ -937,6 +943,7 @@ def test_zpopmax(self, r): [(b'a2', 2), (b'a1', 1)] @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zpopmin(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zpopmin('a') == [(b'a1', 1)] @@ -946,6 +953,7 @@ def test_zpopmin(self, r): [(b'a2', 2), (b'a3', 3)] @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_bzpopmax(self, r): r.zadd('a', {'a1': 1, 'a2': 2}) r.zadd('b', {'b1': 10, 'b2': 20}) @@ -958,6 +966,7 @@ def test_bzpopmax(self, r): assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100) @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_bzpopmin(self, r): r.zadd('a', {'a1': 1, 'a2': 2}) r.zadd('b', {'b1': 10, 'b2': 20}) @@ -1108,13 +1117,14 @@ def test_zscore(self, r): assert r.zscore('a', 'a4') is None def test_zunionstore_fail_crossslot(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a2': 5, 'a3': 4}) with pytest.raises(ResponseError) as excinfo: r.zunionstore('d', ['a', 'b', 'c']) assert re.search('ClusterCrossSlotError', str(excinfo)) + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_sum(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -1123,6 +1133,7 @@ def test_zunionstore_sum(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -1131,6 +1142,7 @@ def test_zunionstore_max(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4}) @@ -1139,6 +1151,7 @@ def test_zunionstore_min(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_with_weight(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) From c671e072e4fbbf9314fbae21362d0045bb804a6c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 25 Mar 2019 00:07:59 +0100 Subject: [PATCH 089/263] Fix binary string typo --- tests/test_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 5f01b919..963c7ca3 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -119,7 +119,7 @@ def test_exec_error_in_response(self, r): # we can't lpush to a key that's a string value, so this should # be a ResponseError exception assert isinstance(result[2], ResponseError) - assert r['c'] == 'a' + assert r['c'] == b'a' # since this isn't a transaction, the other commands after the # error are still executed From b1b4c59126b7e2191fcacc653914f839ba65c708 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 25 Mar 2019 00:19:50 +0100 Subject: [PATCH 090/263] Fix 2 broken things after last rebase --- tests/test_cluster_obj.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 1e442be4..938b3a89 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -479,19 +479,10 @@ def test_access_correct_slave_with_readonly_mode_client(sr): 'get_master_node_by_slot', return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) -<<<<<<< HEAD assert b('foo') == readonly_client.get('foo16706') readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') -======= - assert b'foo' == readonly_client.get('foo16706') - assert return_master_mock.call_count == 0 - - readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) - assert b'foo' == readonly_client.get('foo16706') - assert return_master_mock.call_count == 0 ->>>>>>> Fix more byte method conversion calls def test_refresh_using_specific_nodes(r): From 389d7b4cfc2ebf2601740617d3aa413e7db3a3b0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 10 Apr 2019 00:51:51 +0200 Subject: [PATCH 091/263] Fix test_pipeline_readonly --- tests/test_pipeline.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 963c7ca3..72d190c1 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -541,8 +541,8 @@ def test_pipeline_readonly(self, r, ro): On readonly mode, we supports get related stuff only. """ r.set('foo71', 'a1') # we assume this key is set on 127.0.0.1:7001 - r.zadd('foo88', z1=1) # we assume this key is set on 127.0.0.1:7002 - r.zadd('foo88', z2=4) + r.zadd('foo88', {'z1': 1}) # we assume this key is set on 127.0.0.1:7002 + r.zadd('foo88', {'z2': 4}) with ro.pipeline() as readonly_pipe: readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True) From b9096230846d2bdd3fd5d80e579c4ed4ebe2cb2c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 10 Apr 2019 00:54:46 +0200 Subject: [PATCH 092/263] Fix tests test_moved_redirection, test_moved_redirection_pipeline --- tests/test_cluster_obj.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 938b3a89..9ec515d3 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -364,7 +364,7 @@ def test_moved_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = RedisCluster(host="127.0.0.1", port=7000) + r = get_mocked_redis_client(host="127.0.0.1", port=7000) m = Mock(autospec=True) def ask_redirect_effect(connection, *args, **options): @@ -403,7 +403,8 @@ def ok_response(connection, *args, **options): parse_response.side_effect = moved_redirect_effect - r = RedisCluster(host="127.0.0.1", port=7000) + # r = RedisCluster(host="127.0.0.1", port=7000) + r = get_mocked_redis_client(host="127.0.0.1", port=7000) p = r.pipeline() p.set("foo", "bar") assert p.execute() == ["MOCK_OK"] From 651a473300446ad934775f27f6ca305e5335ace2 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 10 Apr 2019 00:56:09 +0200 Subject: [PATCH 093/263] fix test test_access_correct_slave_with_readonly_mode_client --- tests/test_cluster_obj.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 9ec515d3..632851f0 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -480,10 +480,10 @@ def test_access_correct_slave_with_readonly_mode_client(sr): 'get_master_node_by_slot', return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) - assert b('foo') == readonly_client.get('foo16706') + assert b'foo' == readonly_client.get('foo16706') readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) - assert b('foo') == readonly_client.get('foo16706') + assert b'foo' == readonly_client.get('foo16706') def test_refresh_using_specific_nodes(r): From 715b9f592a6424c5280fa3fab6fe289dd942c8a0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 10 Apr 2019 01:07:10 +0200 Subject: [PATCH 094/263] Update test methods of TestPubSubPubSubSubcommands class to fix the broken tests --- tests/test_pubsub.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 556f584f..566fd78b 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -479,29 +479,30 @@ def t_run(rc): class TestPubSubPubSubSubcommands(object): - """ - Test Pub/Sub subcommands of PUBSUB - @see https://redis.io/commands/pubsub - """ - @skip_if_redis_py_version_lt('2.10.6') + @skip_if_server_version_lt('2.8.0') def test_pubsub_channels(self, r): - r.pubsub(ignore_subscribe_messages=True).subscribe('foo', 'bar', 'baz', 'quux') + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe('foo', 'bar', 'baz', 'quux') channels = sorted(r.pubsub_channels()) assert channels == [b'bar', b'baz', b'foo', b'quux'] - @skip_if_redis_py_version_lt('2.10.6') + @skip_if_server_version_lt('2.8.0') def test_pubsub_numsub(self, r): - r.pubsub(ignore_subscribe_messages=True).subscribe('foo', 'bar', 'baz') - r.pubsub(ignore_subscribe_messages=True).subscribe('bar', 'baz') - r.pubsub(ignore_subscribe_messages=True).subscribe('baz') + p1 = r.pubsub(ignore_subscribe_messages=True) + p1.subscribe('foo', 'bar', 'baz') + p2 = r.pubsub(ignore_subscribe_messages=True) + p2.subscribe('bar', 'baz') + p3 = r.pubsub(ignore_subscribe_messages=True) + p3.subscribe('baz') channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)] - assert channels == sorted(r.pubsub_numsub('foo', 'bar', 'baz')) + assert channels == r.pubsub_numsub('foo', 'bar', 'baz') - @skip_if_redis_py_version_lt('2.10.6') + @skip_if_server_version_lt('2.8.0') def test_pubsub_numpat(self, r): - r.pubsub(ignore_subscribe_messages=True).psubscribe('*oo', '*ar', 'b*z') + p = r.pubsub(ignore_subscribe_messages=True) + p.psubscribe('*oo', '*ar', 'b*z') assert r.pubsub_numpat() == 3 From d71822e1af0a05cdd0a41de9086a650ec1761385 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 24 Apr 2019 19:29:50 +0200 Subject: [PATCH 095/263] Update conftest.py and start to update test_commands.py to be the same in redis-py --- tests/conftest.py | 123 +++++++++++++++++++++++++++++--------- tests/test_cluster_obj.py | 13 +++- tests/test_commands.py | 48 ++++++++++++++- 3 files changed, 152 insertions(+), 32 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index f359ccaa..23871eff 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,31 +20,34 @@ _REDIS_VERSIONS = {} -def get_versions(**kwargs): - """ - """ - key = json.dumps(kwargs) +def get_version(**kwargs): + params = {'host': 'localhost', 'port': 7000} + params.update(kwargs) + key = '%s:%s' % (params['host'], params['port']) if key not in _REDIS_VERSIONS: - client = _get_client(**kwargs) - _REDIS_VERSIONS[key] = {key: value['redis_version'] for key, value in client.info().items()} - return _REDIS_VERSIONS[key] + client = RedisCluster(**params) + # INFO command returns for all nodes but we only care for port 7000 + client_info = client.info() + for client_id, client_data in client_info.items(): + if '7000' in key: + _REDIS_VERSIONS[key] = client_data['redis_version'] + + client.connection_pool.disconnect() + return _REDIS_VERSIONS[key] -def _get_client(cls=None, **kwargs): - """ - """ - if not cls: - cls = RedisCluster - params = { - 'startup_nodes': [{ - 'host': '127.0.0.1', 'port': 7000 - }], - 'socket_timeout': 10, - 'decode_responses': False, - } +def _get_client(cls, request=None, **kwargs): + params = {'host': 'localhost', 'port': 7000} params.update(kwargs) - return cls(**params) + client = cls(**params) + client.flushdb() + if request: + def teardown(): + client.flushdb() + client.connection_pool.disconnect() + request.addfinalizer(teardown) + return client def _init_client(request, cls=None, **kwargs): @@ -78,13 +81,13 @@ def skip_if_not_password_protected_nodes(): def skip_if_server_version_lt(min_version): - """ - """ - versions = get_versions() - for version in versions.values(): - if StrictVersion(version) < StrictVersion(min_version): - return pytest.mark.skipif(True, reason="") - return pytest.mark.skipif(False, reason="") + check = StrictVersion(get_version()) < StrictVersion(min_version) + return pytest.mark.skipif(check, reason="") + + +def skip_if_server_version_gte(min_version): + check = StrictVersion(get_version()) >= StrictVersion(min_version) + return pytest.mark.skipif(check, reason="") def skip_if_redis_py_version_lt(min_version): @@ -148,3 +151,69 @@ def sr(request, *args, **kwargs): Returns a instance of RedisCluster """ return _init_client(request, reinitialize_steps=1, cls=RedisCluster, **kwargs) + + +def _gen_cluster_mock_resp(r, response): + mock_connection_pool = Mock() + connection = Mock() + response = response + connection.read_response.return_value = response + mock_connection_pool.get_connection.return_value = connection + r.connection_pool = mock_connection_pool + return r + + +@pytest.fixture() +def mock_cluster_resp_ok(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + return _gen_cluster_mock_resp(r, 'OK') + + +@pytest.fixture() +def mock_cluster_resp_int(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + return _gen_cluster_mock_resp(r, '2') + + +@pytest.fixture() +def mock_cluster_resp_info(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + response = ('cluster_state:ok\r\ncluster_slots_assigned:16384\r\n' + 'cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n' + 'cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n' + 'cluster_size:3\r\ncluster_current_epoch:7\r\n' + 'cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n' + 'cluster_stats_messages_received:105653\r\n') + return _gen_cluster_mock_resp(r, response) + + +@pytest.fixture() +def mock_cluster_resp_nodes(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + response = ('c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 ' + 'slave aa90da731f673a99617dfe930306549a09f83a6b 0 ' + '1447836263059 5 connected\n' + '9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 ' + 'master - 0 1447836264065 0 connected\n' + 'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 ' + 'myself,master - 0 0 2 connected 5461-10922\n' + '1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 ' + 'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 ' + '1447836262556 3 connected\n' + '4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 ' + 'master - 0 1447836262555 7 connected 0-5460\n' + '19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 ' + 'master - 0 1447836263562 3 connected 10923-16383\n' + 'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 ' + 'master,fail - 1447829446956 1447829444948 1 disconnected\n' + ) + return _gen_cluster_mock_resp(r, response) + + +@pytest.fixture() +def mock_cluster_resp_slaves(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + response = ("['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 " + "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 " + "1447836789290 3 connected']") + return _gen_cluster_mock_resp(r, response) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 632851f0..756890dd 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -425,7 +425,13 @@ def assert_moved_redirection_on_slave(sr, connection_pool_cls, cluster_obj): 'server_type': 'slave', } - master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7000', 'port': 7000, 'server_type': 'master'} + master_value = { + 'host': '127.0.0.1', + 'name': '127.0.0.1:7000', + 'port': 7000, + 'server_type': 'master', + } + with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: return_master_mock.return_value = master_value assert cluster_obj.get('foo16706') == b'foo' @@ -437,10 +443,13 @@ def test_moved_redirection_on_slave_with_default_client(sr): Test that the client is redirected normally with default (readonly_mode=False) client even when we connect always to slave. """ + r = get_mocked_redis_client(host="127.0.0.1", port=7000) + assert_moved_redirection_on_slave( sr, ClusterConnectionPool, - RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) + # RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) + get_mocked_redis_client(host="127.0.0.1", port=7000, reinitialize_steps=1) ) diff --git a/tests/test_commands.py b/tests/test_commands.py index 91377ed7..01081bd7 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -7,7 +7,9 @@ import time # rediscluster imports +import rediscluster from rediscluster.exceptions import RedisClusterException, ClusterCrossSlotError +from rediscluster.utils import dict_merge from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt # 3rd party imports @@ -18,15 +20,55 @@ from redis import exceptions -pytestmark = skip_if_server_version_lt('2.9.0') +@pytest.fixture() +def slowlog(request, r): + current_config = r.config_get() + old_slower_than_value = current_config['slowlog-log-slower-than'] + old_max_legnth_value = current_config['slowlog-max-len'] + + def cleanup(): + r.config_set('slowlog-log-slower-than', old_slower_than_value) + r.config_set('slowlog-max-len', old_max_legnth_value) + request.addfinalizer(cleanup) + + r.config_set('slowlog-log-slower-than', 0) + r.config_set('slowlog-max-len', 128) def redis_server_time(client): - seconds, milliseconds = list(client.time().values())[0] - timestamp = float('{0}.{1}'.format(seconds, milliseconds)) + all_clients_time = client.time() + for server_id, server_time_data in all_clients_time.items(): + if '7000' in server_id: + seconds, milliseconds = server_time_data + + timestamp = float('%s.%s' % (seconds, milliseconds)) return datetime.datetime.fromtimestamp(timestamp) +def get_stream_message(client, stream, message_id): + "Fetch a stream message and format it as a (message_id, fields) pair" + response = client.xrange(stream, min=message_id, max=message_id) + assert len(response) == 1 + return response[0] + + +# RESPONSE CALLBACKS +class TestResponseCallbacks(object): + "Tests for the response callback system" + + def test_response_callbacks(self, r): + all_response_callbacks = dict_merge( + rediscluster.RedisCluster.RESPONSE_CALLBACKS, + rediscluster.RedisCluster.CLUSTER_COMMANDS_RESPONSE_CALLBACKS, + ) + + assert r.response_callbacks == all_response_callbacks + assert id(r.response_callbacks) != id(all_response_callbacks) + r.set_response_callback('GET', lambda x: 'static') + r['a'] = 'foo' + assert r['a'] == 'static' + + class TestRedisCommands(object): @skip_if_server_version_lt('2.9.9') From 51386e9f32b8fd00f35a00f09ac7f3a1357002ed Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 25 Apr 2019 21:03:46 +0200 Subject: [PATCH 096/263] Add command handlers for CLIENT ID command --- rediscluster/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index dba80661..5e953019 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -60,7 +60,7 @@ class RedisCluster(Redis): "BGREWRITEAOF", "BGSAVE", "CLIENT LIST", "CLIENT GETNAME", "CONFIG RESETSTAT", "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", "TIME", "KEYS", "CLUSTER INFO", "PUBSUB CHANNELS", - "PUBSUB NUMSUB", "PUBSUB NUMPAT", + "PUBSUB NUMSUB", "PUBSUB NUMPAT", "CLIENT ID", ], 'all-nodes'), string_keys_to_dict([ "FLUSHALL", "FLUSHDB", "SCRIPT LOAD", "SCRIPT FLUSH", "SCRIPT EXISTS", "SCAN", @@ -99,7 +99,7 @@ class RedisCluster(Redis): "BGREWRITEAOF", "BGSAVE", "CLIENT LIST", "CLIENT GETNAME", "CONFIG RESETSTAT", "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", "TIME", "SCAN", "CLUSTER INFO", 'CLUSTER ADDSLOTS', 'CLUSTER COUNT-FAILURE-REPORTS', - 'CLUSTER DELSLOTS', 'CLUSTER FAILOVER', 'CLUSTER FORGET', "FLUSHALL", "FLUSHDB", + 'CLUSTER DELSLOTS', 'CLUSTER FAILOVER', 'CLUSTER FORGET', "FLUSHALL", "FLUSHDB", "CLIENT ID", ], lambda command, res: res), string_keys_to_dict([ "SCRIPT LOAD", From a076bf438952ff88828f35c2c0c4f11856b88492 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 25 Apr 2019 21:47:40 +0200 Subject: [PATCH 097/263] Fix import error and add new skip function when we don't have a working cluster implementation --- tests/conftest.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 23871eff..f2fe40a9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,7 @@ # 3rd party imports import pytest from redis import Redis +from redis.exceptions import ResponseError from distutils.version import StrictVersion # put our path in front so we can be sure we are testing locally not against the global package @@ -74,6 +75,10 @@ def teardown(): return client +def skip_for_no_cluster_impl(): + return pytest.mark.skipif(True, reason="Cluster has no or working implementation for this test") + + def skip_if_not_password_protected_nodes(): """ """ From 8615f5d9f08729dc54f98165f27faa5e6307b0c5 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 25 Apr 2019 22:42:12 +0200 Subject: [PATCH 098/263] Add new script that can be used to generate and bruteforce keys to match a given slot --- examples/generate_slot_keys.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 examples/generate_slot_keys.py diff --git a/examples/generate_slot_keys.py b/examples/generate_slot_keys.py new file mode 100644 index 00000000..45001fa9 --- /dev/null +++ b/examples/generate_slot_keys.py @@ -0,0 +1,26 @@ +import random +import string +import sys +from rediscluster import RedisCluster + +startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] + +# Note: decode_responses must be set to True when used with python3 +rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) + +# 10 batches +batch_set = {i: [] for i in range(0, 16384)} + +# Do 100000 slot randos in each block +for j in range(0, 100000): + rando_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) + + keyslot = rc.connection_pool.nodes.keyslot(rando_string) + + # batch_set.setdefault(keyslot) + batch_set[keyslot].append(rando_string) + +for i in range(0, 16384): + if len(batch_set[i]) > 0: + print(i, ':', batch_set[i]) + sys.exit(0) From 6883f482735b447eb36f704482ff9eea77c579ea Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:14:12 +0200 Subject: [PATCH 099/263] Major update to test_commands and conftest.py to match the redis-py 3.0.1 version source code. All tests that is not skipped now passes. --- tests/conftest.py | 3 +- tests/test_commands.py | 1707 +++++++++++++++++++++++++++++++++------- 2 files changed, 1426 insertions(+), 284 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index f2fe40a9..19ed432d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,9 +10,10 @@ # 3rd party imports import pytest +from distutils.version import StrictVersion +from mock import Mock from redis import Redis from redis.exceptions import ResponseError -from distutils.version import StrictVersion # put our path in front so we can be sure we are testing locally not against the global package basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) diff --git a/tests/test_commands.py b/tests/test_commands.py index 01081bd7..3e6fab94 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -10,10 +10,11 @@ import rediscluster from rediscluster.exceptions import RedisClusterException, ClusterCrossSlotError from rediscluster.utils import dict_merge -from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt +from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt, skip_if_server_version_gte, skip_for_no_cluster_impl # 3rd party imports import pytest +import redis from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode from redis.client import parse_info from redis.exceptions import ResponseError, DataError, RedisError, DataError @@ -22,7 +23,7 @@ @pytest.fixture() def slowlog(request, r): - current_config = r.config_get() + current_config = get_main_cluster_node_data(r.config_get()) old_slower_than_value = current_config['slowlog-log-slower-than'] old_max_legnth_value = current_config['slowlog-max-len'] @@ -52,6 +53,16 @@ def get_stream_message(client, stream, message_id): return response[0] +def get_main_cluster_node_data(command_result): + """ + Tries to find whatever node is running on port :7000 in the cluster resonse + """ + for node_id, node_data in command_result.items(): + if '7000' in node_id: + return node_data + return None + + # RESPONSE CALLBACKS class TestResponseCallbacks(object): "Tests for the response callback system" @@ -71,59 +82,109 @@ def test_response_callbacks(self, r): class TestRedisCommands(object): - @skip_if_server_version_lt('2.9.9') - def test_zrevrangebylex(self, r): - r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) - assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a'] - assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a'] - assert r.zrevrangebylex('a', '(g', '[aaa') == \ - [b'f', b'e', b'd', b'c', b'b'] - assert r.zrevrangebylex('a', '+', '[f') == [b'g', b'f'] - assert r.zrevrangebylex('a', '+', '-', start=3, num=2) == \ - [b'd', b'c'] - def test_command_on_invalid_key_type(self, r): r.lpush('a', '1') - with pytest.raises(ResponseError): + with pytest.raises(redis.ResponseError): r['a'] # SERVER INFORMATION def test_client_list(self, r): - for server, clients in r.client_list().items(): - assert isinstance(clients[0], dict) - assert 'addr' in clients[0] - + clients = r.client_list() + client_data = get_main_cluster_node_data(clients)[0] + assert isinstance(client_data, dict) + assert 'addr' in client_data + + @skip_if_server_version_lt('5.0.0') + def test_client_list_type(self, r): + with pytest.raises(exceptions.RedisError): + r.client_list(_type='not a client type') + for client_type in ['normal', 'master', 'replica', 'pubsub']: + clients = get_main_cluster_node_data(r.client_list(_type=client_type)) + assert isinstance(clients, list) + + @skip_if_server_version_lt('5.0.0') + def test_client_id(self, r): + assert get_main_cluster_node_data(r.client_id()) > 0 + + @skip_if_server_version_lt('5.0.0') + def test_client_unblock(self, r): + myid = get_main_cluster_node_data(r.client_id()) + assert not r.client_unblock(myid) + assert not r.client_unblock(myid, error=True) + assert not r.client_unblock(myid, error=False) + + @skip_if_server_version_lt('2.6.9') def test_client_getname(self, r): - for server, name in r.client_getname().items(): - assert name is None + assert get_main_cluster_node_data(r.client_getname()) is None + @skip_if_server_version_lt('2.6.9') + @skip_for_no_cluster_impl() def test_client_setname(self, r): - with pytest.raises(RedisClusterException): - assert r.client_setname('redis_py_test') + assert r.client_setname('redis_py_test') + assert r.client_getname() == 'redis_py_test' + + @skip_if_server_version_lt('2.6.9') + @skip_for_no_cluster_impl() + def test_client_list_after_client_setname(self, r): + r.client_setname('redis_py_test') + clients = r.client_list() + # we don't know which client ours will be + assert 'redis_py_test' in [c['name'] for c in clients] + + @skip_if_server_version_lt('2.9.50') + def test_client_pause(self, r): + assert r.client_pause(1) + assert r.client_pause(timeout=1) + with pytest.raises(exceptions.RedisError): + r.client_pause(timeout='not an integer') def test_config_get(self, r): - for server, data in r.config_get().items(): - assert 'maxmemory' in data - assert data['maxmemory'].isdigit() + data = get_main_cluster_node_data(r.config_get()) + assert 'maxmemory' in data + assert data['maxmemory'].isdigit() def test_config_resetstat(self, r): r.ping() - for server, info in r.info().items(): - prior_commands_processed = int(info['total_commands_processed']) - assert prior_commands_processed >= 1 + + prior_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) + assert prior_commands_processed >= 1 r.config_resetstat() - for server, info in r.info().items(): - reset_commands_processed = int(info['total_commands_processed']) - assert reset_commands_processed < prior_commands_processed + reset_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) + assert reset_commands_processed < prior_commands_processed def test_config_set(self, r): - assert r.config_set('dbfilename', 'redis_py_test.rdb') - for server, config in r.config_get().items(): - assert config['dbfilename'] == 'redis_py_test.rdb' + data = get_main_cluster_node_data(r.config_get()) + rdbname = data['dbfilename'] + try: + assert r.config_set('dbfilename', 'redis_py_test.rdb') + assert get_main_cluster_node_data(r.config_get())['dbfilename'] == 'redis_py_test.rdb' + finally: + assert r.config_set('dbfilename', rdbname) + + def test_dbsize(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + # Count all commands sent to the DB. Since we have one slave + # for every master we will look for 4 and not 2 + dbsize_sum = sum([db_size_count for node_id, db_size_count in r.dbsize().items()]) + assert dbsize_sum == 4 def test_echo(self, r): - for server, res in r.echo('foo bar').items(): - assert res == b'foo bar' + assert get_main_cluster_node_data(r.echo('foo bar')) == b'foo bar' + + def test_info(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + info = get_main_cluster_node_data(r.info()) + assert isinstance(info, dict) + # We only have a "db0" in cluster mode and only one of the commands will bind to node :7000 + assert info['db0']['keys'] == 1 + # Sum all keys in all slots + keys_sum = sum([node_data.get('db0', {}).get('keys', 0) for node_id, node_data in r.info().items()]) + assert keys_sum == 4 + + def test_lastsave(self, r): + assert isinstance(get_main_cluster_node_data(r.lastsave()), datetime.datetime) def test_object(self, r): r['a'] = 'foo' @@ -135,11 +196,50 @@ def test_object(self, r): def test_ping(self, r): assert r.ping() + @skip_for_no_cluster_impl() + def test_slowlog_get(self, r, slowlog): + assert r.slowlog_reset() + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + r.get(unicode_string) + slowlog = get_main_cluster_node_data(r.slowlog_get()) + assert isinstance(slowlog, list) + commands = [log['command'] for log in slowlog] + + get_command = b' '.join((b'GET', unicode_string.encode('utf-8'))) + assert get_command in commands + assert b'SLOWLOG RESET' in commands + # the order should be ['GET ', 'SLOWLOG RESET'], + # but if other clients are executing commands at the same time, there + # could be commands, before, between, or after, so just check that + # the two we care about are in the appropriate order. + assert commands.index(get_command) < commands.index(b'SLOWLOG RESET') + + # make sure other attributes are typed correctly + assert isinstance(slowlog[0]['start_time'], int) + assert isinstance(slowlog[0]['duration'], int) + + @skip_for_no_cluster_impl() + def test_slowlog_get_limit(self, r, slowlog): + assert r.slowlog_reset() + r.get('foo') + r.get('bar') + slowlog = r.slowlog_get(1) + assert isinstance(slowlog, list) + commands = [log['command'] for log in slowlog] + assert b'GET foo' not in commands + assert b'GET bar' in commands + + @skip_for_no_cluster_impl() + def test_slowlog_length(self, r, slowlog): + r.get('foo') + assert isinstance(r.slowlog_len(), int) + + @skip_if_server_version_lt('2.6.0') def test_time(self, r): - for t in r.time().values(): - assert len(t) == 2 - assert isinstance(t[0], int) - assert isinstance(t[1], int) + t = get_main_cluster_node_data(r.time()) + assert len(t) == 2 + assert isinstance(t[0], int) + assert isinstance(t[1], int) # BASIC KEY COMMANDS def test_append(self, r): @@ -148,6 +248,7 @@ def test_append(self, r): assert r.append('a', 'a2') == 4 assert r['a'] == b'a1a2' + @skip_if_server_version_lt('2.6.0') def test_bitcount(self, r): r.setbit('a', 5, True) assert r.bitcount('a') == 1 @@ -167,16 +268,64 @@ def test_bitcount(self, r): assert r.bitcount('a', 1, 1) == 1 def test_bitop_not_supported(self, r): + """ + Validate that the command is blocked in cluster mode and throws an Exception + """ r['a'] = '' with pytest.raises(RedisClusterException): r.bitop('not', 'r', 'a') + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_not_empty_string(self, r): + r['a'] = '' + r.bitop('not', 'r', 'a') + assert r.get('r') is None + + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_not(self, r): + test_str = b'\xAA\x00\xFF\x55' + correct = ~0xAA00FF55 & 0xFFFFFFFF + r['a'] = test_str + r.bitop('not', 'r', 'a') + assert int(binascii.hexlify(r['r']), 16) == correct + + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_not_in_place(self, r): + test_str = b'\xAA\x00\xFF\x55' + correct = ~0xAA00FF55 & 0xFFFFFFFF + r['a'] = test_str + r.bitop('not', 'a', 'a') + assert int(binascii.hexlify(r['a']), 16) == correct + + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_single_string(self, r): + test_str = b'\x01\x02\xFF' + r['a'] = test_str + r.bitop('and', 'res1', 'a') + r.bitop('or', 'res2', 'a') + r.bitop('xor', 'res3', 'a') + assert r['res1'] == test_str + assert r['res2'] == test_str + assert r['res3'] == test_str + + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_string_operands(self, r): + r['a'] = b'\x01\x02\xFF\xFF' + r['b'] = b'\x01\x02\xFF' + r.bitop('and', 'res1', 'a', 'b') + r.bitop('or', 'res2', 'a', 'b') + r.bitop('xor', 'res3', 'a', 'b') + assert int(binascii.hexlify(r['res1']), 16) == 0x0102FF00 + assert int(binascii.hexlify(r['res2']), 16) == 0x0102FFFF + assert int(binascii.hexlify(r['res3']), 16) == 0x000000FF + @skip_if_server_version_lt('2.8.7') - @skip_if_redis_py_version_lt("2.10.2") def test_bitpos(self, r): - """ - Bitpos was added in redis-py in version 2.10.2 - """ key = 'key:bitpos' r.set(key, b'\xff\xf0\x00') assert r.bitpos(key, 0) == 12 @@ -185,20 +334,16 @@ def test_bitpos(self, r): r.set(key, b'\x00\xff\xf0') assert r.bitpos(key, 1, 0) == 8 assert r.bitpos(key, 1, 1) == 8 - r.set(key, '\x00\x00\x00') + r.set(key, b'\x00\x00\x00') assert r.bitpos(key, 1) == -1 @skip_if_server_version_lt('2.8.7') - @skip_if_redis_py_version_lt("2.10.2") def test_bitpos_wrong_arguments(self, r): - """ - Bitpos was added in redis-py in version 2.10.2 - """ key = 'key:bitpos:wrong:args' r.set(key, b'\xff\xf0\x00') - with pytest.raises(RedisError): + with pytest.raises(exceptions.RedisError): r.bitpos(key, 0, end=1) == 12 - with pytest.raises(RedisError): + with pytest.raises(exceptions.RedisError): r.bitpos(key, 7) == 12 def test_decr(self, r): @@ -231,6 +376,23 @@ def test_delitem(self, r): del r['a'] assert r.get('a') is None + @skip_if_server_version_lt('4.0.0') + def test_unlink(self, r): + assert r.unlink('a') == 0 + r['a'] = 'foo' + assert r.unlink('a') == 1 + assert r.get('a') is None + + @skip_if_server_version_lt('4.0.0') + @skip_for_no_cluster_impl() + def test_unlink_with_multiple_keys(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + assert r.unlink('a', 'b') == 2 + assert r.get('a') is None + assert r.get('b') is None + + @skip_if_server_version_lt('2.6.0') def test_dump_and_restore(self, r): r['a'] = 'foo' dumped = r.dump('a') @@ -238,28 +400,22 @@ def test_dump_and_restore(self, r): r.restore('a', 0, dumped) assert r['a'] == b'foo' + @skip_if_server_version_lt('3.0.0') + def test_dump_and_restore_and_replace(self, r): + r['a'] = 'bar' + dumped = r.dump('a') + with pytest.raises(redis.ResponseError): + r.restore('a', 0, dumped) + + r.restore('a', 0, dumped, replace=True) + assert r['a'] == b'bar' + def test_exists(self, r): assert r.exists('a') == 0 - r['a'] = 'foo' - r['b'] = 'bar' - assert r.exists('a') == 1 - assert r.exists('b') == 1 - # This no longer works in cluster. See test_exists_fail_not_same_slots() for failing test - # assert r.exists('a', 'b') == 2 - - def test_exists_fail_not_same_slots(self, r): - """ - This test is conditioned on that the 2 keys will be in different slots - """ - key_a = 'a' - key_b = 'b' - assert r.cluster_keyslot(key_a) != r.cluster_keyslot(key_b) - r[key_a] = 'foo' - r[key_b] = 'bar' - assert r.exists('a') == 1 - assert r.exists('b') == 1 - with pytest.raises(ClusterCrossSlotError): - r.exists('a', 'b') + r['G0B96'] = 'foo' + r['TEFX5'] = 'bar' + assert r.exists('G0B96') == 1 + assert r.exists('G0B96', 'TEFX5') == 2 def test_exists_contains(self, r): assert 'a' not in r @@ -313,6 +469,10 @@ def test_getitem_raises_keyerror_for_missing_key(self, r): with pytest.raises(KeyError): r['a'] + def test_getitem_does_not_raise_keyerror_for_empty_string(self, r): + r['a'] = b"" + assert r['a'] == b"" + def test_get_set_bit(self, r): # no value assert not r.getbit('a', 5) @@ -353,6 +513,7 @@ def test_incrby(self, r): assert r.incrby('a', 4) == 5 assert r['a'] == b'5' + @skip_if_server_version_lt('2.6.0') def test_incrbyfloat(self, r): assert r.incrbyfloat('a') == 1.0 assert r['a'] == b'1' @@ -391,26 +552,28 @@ def test_msetnx(self, r): assert r[k] == v assert r.get('d') is None + @skip_if_server_version_lt('2.6.0') def test_pexpire(self, r): assert not r.pexpire('a', 60000) r['a'] = 'foo' assert r.pexpire('a', 60000) assert 0 < r.pttl('a') <= 60000 assert r.persist('a') - # redis-py tests seemed to be for older version of redis? - # redis-2.8+ returns -1 if key exists but is non-expiring: http://redis.io/commands/pttl assert r.pttl('a') == -1 + @skip_if_server_version_lt('2.6.0') def test_pexpireat_datetime(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) r['a'] = 'foo' assert r.pexpireat('a', expire_at) assert 0 < r.pttl('a') <= 61000 + @skip_if_server_version_lt('2.6.0') def test_pexpireat_no_key(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) assert not r.pexpireat('a', expire_at) + @skip_if_server_version_lt('2.6.0') def test_pexpireat_unixtime(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) r['a'] = 'foo' @@ -418,17 +581,33 @@ def test_pexpireat_unixtime(self, r): assert r.pexpireat('a', expire_at_seconds) assert 0 < r.pttl('a') <= 61000 + @skip_if_server_version_lt('2.6.0') def test_psetex(self, r): assert r.psetex('a', 1000, 'value') assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 + @skip_if_server_version_lt('2.6.0') def test_psetex_timedelta(self, r): expire_at = datetime.timedelta(milliseconds=1000) assert r.psetex('a', expire_at, 'value') assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 + @skip_if_server_version_lt('2.6.0') + def test_pttl(self, r): + assert not r.pexpire('a', 10000) + r['a'] = '1' + assert r.pexpire('a', 10000) + assert 0 < r.pttl('a') <= 10000 + assert r.persist('a') + assert r.pttl('a') == -1 + + @skip_if_server_version_lt('2.8.0') + def test_pttl_no_key(self, r): + "PTTL on servers 2.8 and after return -2 when the key doesn't exist" + assert r.pttl('a') == -2 + def test_randomkey(self, r): assert r.randomkey() is None for key in ('a', 'b', 'c'): @@ -441,15 +620,6 @@ def test_rename(self, r): assert r.get('a') is None assert r['b'] == b'1' - with pytest.raises(ResponseError) as ex: - r.rename("foo", "foo") - assert unicode(ex.value).startswith("source and destination objects are the same") - - assert r.get("foo") is None - with pytest.raises(ResponseError) as ex: - r.rename("foo", "bar") - assert unicode(ex.value).startswith("no such key") - def test_renamenx(self, r): r['a'] = '1' r['b'] = '2' @@ -457,14 +627,13 @@ def test_renamenx(self, r): assert r['a'] == b'1' assert r['b'] == b'2' - assert r.renamenx('a', 'c') - assert r['c'] == b'1' - + @skip_if_server_version_lt('2.6.0') def test_set_nx(self, r): assert r.set('a', '1', nx=True) assert not r.set('a', '2', nx=True) assert r['a'] == b'1' + @skip_if_server_version_lt('2.6.0') def test_set_xx(self, r): assert not r.set('a', '1', xx=True) assert r.get('a') is None @@ -472,27 +641,32 @@ def test_set_xx(self, r): assert r.set('a', '2', xx=True) assert r.get('a') == b'2' + @skip_if_server_version_lt('2.6.0') def test_set_px(self, r): assert r.set('a', '1', px=10000) assert r['a'] == b'1' assert 0 < r.pttl('a') <= 10000 assert 0 < r.ttl('a') <= 10 + @skip_if_server_version_lt('2.6.0') def test_set_px_timedelta(self, r): expire_at = datetime.timedelta(milliseconds=1000) assert r.set('a', '1', px=expire_at) assert 0 < r.pttl('a') <= 1000 assert 0 < r.ttl('a') <= 1 + @skip_if_server_version_lt('2.6.0') def test_set_ex(self, r): assert r.set('a', '1', ex=10) assert 0 < r.ttl('a') <= 10 + @skip_if_server_version_lt('2.6.0') def test_set_ex_timedelta(self, r): expire_at = datetime.timedelta(seconds=60) assert r.set('a', '1', ex=expire_at) assert 0 < r.ttl('a') <= 60 + @skip_if_server_version_lt('2.6.0') def test_set_multipleoptions(self, r): r['a'] = 'val' assert r.set('a', '1', xx=True, px=10000) @@ -527,6 +701,18 @@ def test_substr(self, r): assert r.substr('a', 3, 5) == b'345' assert r.substr('a', 3, -2) == b'345678' + def test_ttl(self, r): + r['a'] = '1' + assert r.expire('a', 10) + assert 0 < r.ttl('a') <= 10 + assert r.persist('a') + assert r.ttl('a') == -1 + + @skip_if_server_version_lt('2.8.0') + def test_ttl_nokey(self, r): + "TTL on servers 2.8 and after return -2 when the key doesn't exist" + assert r.ttl('a') == -2 + def test_type(self, r): assert r.type('a') == b'none' r['a'] = '1' @@ -543,35 +729,43 @@ def test_type(self, r): # LIST COMMANDS def test_blpop(self, r): - r.rpush('a{foo}', '1', '2') - r.rpush('b{foo}', '3', '4') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'3') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'4') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'1') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'2') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) is None - r.rpush('c{foo}', '1') - assert r.blpop('c{foo}', timeout=1) == (b'c{foo}', b'1') + """ + Generated keys for slot + 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] + """ + r.rpush('0J8KD', '1', '2') + r.rpush('822JO', '3', '4') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') + assert r.blpop(['822JO', '0J8KD'], timeout=1) is None + r.rpush('c', '1') + assert r.blpop('c', timeout=1) == (b'c', b'1') def test_brpop(self, r): - r.rpush('a{foo}', '1', '2') - r.rpush('b{foo}', '3', '4') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'4') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'3') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'2') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'1') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) is None - r.rpush('c{foo}', '1') - assert r.brpop('c{foo}', timeout=1) == (b'c{foo}', b'1') + """ + Generated keys for slot + 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] + """ + r.rpush('0J8KD', '1', '2') + r.rpush('822JO', '3', '4') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') + assert r.brpop(['822JO', '0J8KD'], timeout=1) is None + r.rpush('c', '1') + assert r.brpop('c', timeout=1) == (b'c', b'1') def test_brpoplpush(self, r): - r.rpush('a{foo}', '1', '2') - r.rpush('b{foo}', '3', '4') - assert r.brpoplpush('a{foo}', 'b{foo}') == b'2' - assert r.brpoplpush('a{foo}', 'b{foo}') == b'1' - assert r.brpoplpush('a{foo}', 'b{foo}', timeout=1) is None - assert r.lrange('a{foo}', 0, -1) == [] - assert r.lrange('b{foo}', 0, -1) == [b'1', b'2', b'3', b'4'] + r.rpush('a', '1', '2') + r.rpush('b', '3', '4') + assert r.brpoplpush('a', 'b') == b'2' + assert r.brpoplpush('a', 'b') == b'1' + assert r.brpoplpush('a', 'b', timeout=1) is None + assert r.lrange('a', 0, -1) == [] + assert r.lrange('b', 0, -1) == [b'1', b'2', b'3', b'4'] def test_brpoplpush_empty_string(self, r): r.rpush('a', '') @@ -672,55 +866,41 @@ def test_rpushx(self, r): assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] # SCAN COMMANDS - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_scan(self, r): + """ + Generated keys for slot + 0 : ['GQ5KU', 'IFWJL', 'X582D'] + """ + r.set('GQ5KU', 1) + r.set('IFWJL', 2) + r.set('X582D', 3) + cursor, keys = get_main_cluster_node_data(r.scan()) + assert cursor == 0 + assert set(keys) == {b'GQ5KU', b'IFWJL', b'X582D'} + _, keys = get_main_cluster_node_data(r.scan(match='GQ5KU')) + assert set(keys) == {b'GQ5KU'} + + @skip_if_server_version_lt('2.8.0') + def test_scan_iter(self, r): r.set('a', 1) r.set('b', 2) r.set('c', 3) - keys = [] - for result in r.scan().values(): - cursor, partial_keys = result - assert cursor == 0 - keys += partial_keys - - assert set(keys) == {b'a', b'b', b'c'} - - keys = [] - for result in r.scan(match='a').values(): - cursor, partial_keys = result - assert cursor == 0 - keys += partial_keys - assert set(keys) == {b'a'} - - @pytest.mark.skip(reason="WIP") - def test_scan_iter(self, r): - alphabet = 'abcdefghijklmnopqrstuvwABCDEFGHIJKLMNOPQRSTUVW' - for i, c in enumerate(alphabet): - r.set(c, i) keys = list(r.scan_iter()) - expected_result = [b"{0}".format(c) for c in alphabet] - assert set(keys) == set(expected_result) - + assert set(keys) == {b'a', b'b', b'c'} keys = list(r.scan_iter(match='a')) assert set(keys) == {b'a'} - r.set('Xa', 1) - r.set('Xb', 2) - r.set('Xc', 3) - keys = list(r.scan_iter('X*', count=1000)) - assert len(keys) == 3 - assert set(keys) == {b'Xa', b'Xb', b'Xc'} - - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_sscan(self, r): r.sadd('a', 1, 2, 3) cursor, members = r.sscan('a') assert cursor == 0 - assert set(members) == {b'a', b'2', b'3'} + assert set(members) == {b'1', b'2', b'3'} _, members = r.sscan('a', match=b'1') assert set(members) == {b'1'} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_sscan_iter(self, r): r.sadd('a', 1, 2, 3) members = list(r.sscan_iter('a')) @@ -728,7 +908,7 @@ def test_sscan_iter(self, r): members = list(r.sscan_iter('a', match=b'1')) assert set(members) == {b'1'} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_hscan(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) cursor, dic = r.hscan('a') @@ -737,7 +917,7 @@ def test_hscan(self, r): _, dic = r.hscan('a', match='a') assert dic == {b'a': b'1'} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_hscan_iter(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) dic = dict(r.hscan_iter('a')) @@ -745,18 +925,18 @@ def test_hscan_iter(self, r): dic = dict(r.hscan_iter('a', match='a')) assert dic == {b'a': b'1'} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_zscan(self, r): - r.zadd('a', 1, 'a', 2, 'b', 3, 'c') + r.zadd('a', {'a': 1, 'b': 2, 'c': 3}) cursor, pairs = r.zscan('a') assert cursor == 0 - assert set(pairs) == {(b'a', 1), (b'b, 2'), (b'c', 3)} + assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)} _, pairs = r.zscan('a', match='a') - assert set(pairs == {(b'a', 1)}) + assert set(pairs) == {(b'a', 1)} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_zscan_iter(self, r): - r.zadd('a', 1, 'a', 2, 'b', 3, 'c') + r.zadd('a', {'a': 1, 'b': 2, 'c': 3}) pairs = list(r.zscan_iter('a')) assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)} pairs = list(r.zscan_iter('a', match='a')) @@ -764,7 +944,7 @@ def test_zscan_iter(self, r): # SET COMMANDS def test_sadd(self, r): - members = set([b'1', b'2', b'3']) + members = {b'1', b'2', b'3'} r.sadd('a', *members) assert r.smembers('a') == members @@ -773,35 +953,32 @@ def test_scard(self, r): assert r.scard('a') == 3 def test_sdiff(self, r): - r.sadd('a{foo}', '1', '2', '3') - assert r.sdiff('a{foo}', 'b{foo}') == set([b'1', b'2', b'3']) - r.sadd('b{foo}', '2', '3') - assert r.sdiff('a{foo}', 'b{foo}') == set([b'1']) + r.sadd('a', '1', '2', '3') + assert r.sdiff('a', 'b') == {b'1', b'2', b'3'} + r.sadd('b', '2', '3') + assert r.sdiff('a', 'b') == {b'1'} def test_sdiffstore(self, r): - r.sadd('a{foo}', '1', '2', '3') - assert r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 3 - assert r.smembers('c{foo}') == set([b'1', b'2', b'3']) - r.sadd('b{foo}', '2', '3') - assert r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 1 - assert r.smembers('c{foo}') == set([b'1']) - - # Diff:s that return empty set should not fail - r.sdiffstore('d{foo}', 'e{foo}') == 0 + r.sadd('a', '1', '2', '3') + assert r.sdiffstore('c', 'a', 'b') == 3 + assert r.smembers('c') == {b'1', b'2', b'3'} + r.sadd('b', '2', '3') + assert r.sdiffstore('c', 'a', 'b') == 1 + assert r.smembers('c') == {b'1'} def test_sinter(self, r): - r.sadd('a{foo}', '1', '2', '3') - assert r.sinter('a{foo}', 'b{foo}') == set() - r.sadd('b{foo}', '2', '3') - assert r.sinter('a{foo}', 'b{foo}') == set([b'2', b'3']) + r.sadd('a', '1', '2', '3') + assert r.sinter('a', 'b') == set() + r.sadd('b', '2', '3') + assert r.sinter('a', 'b') == {b'2', b'3'} def test_sinterstore(self, r): - r.sadd('a{foo}', '1', '2', '3') - assert r.sinterstore('c{foo}', 'a{foo}', 'b{foo}') == 0 - assert r.smembers('c{foo}') == set() - r.sadd('b{foo}', '2', '3') - assert r.sinterstore('c{foo}', 'a{foo}', 'b{foo}') == 2 - assert r.smembers('c{foo}') == set([b'2', b'3']) + r.sadd('a', '1', '2', '3') + assert r.sinterstore('c', 'a', 'b') == 0 + assert r.smembers('c') == set() + r.sadd('b', '2', '3') + assert r.sinterstore('c', 'a', 'b') == 2 + assert r.smembers('c') == {b'2', b'3'} def test_sismember(self, r): r.sadd('a', '1', '2', '3') @@ -815,11 +992,11 @@ def test_smembers(self, r): assert r.smembers('a') == {b'1', b'2', b'3'} def test_smove(self, r): - r.sadd('a{foo}', 'a1', 'a2') - r.sadd('b{foo}', 'b1', 'b2') - assert r.smove('a{foo}', 'b{foo}', 'a1') - assert r.smembers('a{foo}') == {b'a2'} - assert r.smembers('b{foo}') == {b'b1', b'b2', b'a1'} + r.sadd('a', 'a1', 'a2') + r.sadd('b', 'b1', 'b2') + assert r.smove('a', 'b', 'a1') + assert r.smembers('a') == {b'a2'} + assert r.smembers('b') == {b'b1', b'b2', b'a1'} def test_spop(self, r): s = [b'1', b'2', b'3'] @@ -828,11 +1005,23 @@ def test_spop(self, r): assert value in s assert r.smembers('a') == set(s) - {value} + def test_spop_multi_value(self, r): + s = [b'1', b'2', b'3'] + r.sadd('a', *s) + values = r.spop('a', 2) + assert len(values) == 2 + + for value in values: + assert value in s + + assert r.spop('a', 1) == list(set(s) - set(values)) + def test_srandmember(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) assert r.srandmember('a') in s + @skip_if_server_version_lt('2.6.0') def test_srandmember_multi_value(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) @@ -844,18 +1033,18 @@ def test_srem(self, r): r.sadd('a', '1', '2', '3', '4') assert r.srem('a', '5') == 0 assert r.srem('a', '2', '4') == 2 - assert r.smembers('a') == set([b'1', b'3']) + assert r.smembers('a') == {b'1', b'3'} def test_sunion(self, r): - r.sadd('a{foo}', '1', '2') - r.sadd('b{foo}', '2', '3') - assert r.sunion('a{foo}', 'b{foo}') == set([b'1', b'2', b'3']) + r.sadd('a', '1', '2') + r.sadd('b', '2', '3') + assert r.sunion('a', 'b') == {b'1', b'2', b'3'} def test_sunionstore(self, r): - r.sadd('a{foo}', '1', '2') - r.sadd('b{foo}', '2', '3') - assert r.sunionstore('c{foo}', 'a{foo}', 'b{foo}') == 3 - assert r.smembers('c{foo}') == set([b'1', b'2', b'3']) + r.sadd('a', '1', '2') + r.sadd('b', '2', '3') + assert r.sunionstore('c', 'a', 'b') == 3 + assert r.smembers('c') == {b'1', b'2', b'3'} # SORTED SET COMMANDS def test_zadd(self, r): @@ -898,8 +1087,12 @@ def test_zadd_incr(self, r): assert r.zadd('a', {'a1': 1}) == 1 assert r.zadd('a', {'a1': 4.5}, incr=True) == 5.5 - @pytest.mark.skip(reason="Test works if done against keys in same slot") + @skip_for_no_cluster_impl() def test_zadd_incr_with_xx(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ # this asks zadd to incr 'a1' only if it exists, but it clearly # doesn't. Redis returns a null value in this case and so should # redis-py @@ -930,95 +1123,113 @@ def test_zlexcount(self, r): assert r.zlexcount('a', '-', '+') == 7 assert r.zlexcount('a', '[b', '[f') == 5 - def test_zinterstore_fail_cross_slot(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('a', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('a', {'a1': 6, 'a2': 5, 'a3': 4}) - with pytest.raises(ResponseError) as excinfo: - r.zinterstore('d', ['a', 'b', 'c']) - assert re.search('ClusterCrossSlotError', str(excinfo)) - - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_sum(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d', ['a', 'b', 'c']) == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1', 9)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_max(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MAX') == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_min(self, r): - r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) - r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MIN') == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 3, 'a3': 5}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_with_weight(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d', {'a': 1, 'b': 2, 'c': 3}) == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a3', 20), (b'a1', 23)] @skip_if_server_version_lt('4.9.0') - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zpopmax(self, r): - r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) - assert r.zpopmax('a') == [(b'a3', 3)] + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmax('60ZE7') == [(b'a3', 3)] # with count - assert r.zpopmax('a', count=2) == \ + assert r.zpopmax('60ZE7', count=2) == \ [(b'a2', 2), (b'a1', 1)] @skip_if_server_version_lt('4.9.0') - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zpopmin(self, r): - r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) - assert r.zpopmin('a') == [(b'a1', 1)] + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmin('60ZE7') == [(b'a1', 1)] # with count - assert r.zpopmin('a', count=2) == \ + assert r.zpopmin('60ZE7', count=2) == \ [(b'a2', 2), (b'a3', 3)] @skip_if_server_version_lt('4.9.0') - @pytest.mark.skip(reason="Test works if done against keys in same slot") + @skip_for_no_cluster_impl() def test_bzpopmax(self, r): - r.zadd('a', {'a1': 1, 'a2': 2}) - r.zadd('b', {'b1': 10, 'b2': 20}) - assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b2', 20) - assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b1', 10) - assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a2', 2) - assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a1', 1) - assert r.bzpopmax(['b', 'a'], timeout=1) is None - r.zadd('c', {'c1': 100}) - assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100) + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2}) + r.zadd('8I2EQ', {'b1': 10, 'b2': 20}) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) is None + r.zadd('R8H1V', {'c1': 100}) + assert r.bzpopmax('R8H1V', timeout=1) == (b'c', b'c1', 100) @skip_if_server_version_lt('4.9.0') - @pytest.mark.skip(reason="Test works if done against keys in same slot") + @skip_for_no_cluster_impl() def test_bzpopmin(self, r): - r.zadd('a', {'a1': 1, 'a2': 2}) - r.zadd('b', {'b1': 10, 'b2': 20}) - assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b1', 10) - assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b2', 20) - assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a1', 1) - assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a2', 2) - assert r.bzpopmin(['b', 'a'], timeout=1) is None - r.zadd('c', {'c1': 100}) - assert r.bzpopmin('c', timeout=1) == (b'c', b'c1', 100) + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2}) + r.zadd('8I2EQ', {'b1': 10, 'b2': 20}) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) is None + r.zadd('R8H1V', {'c1': 100}) + assert r.bzpopmin('R8H1V', timeout=1) == (b'c', b'c1', 100) def test_zrange(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) @@ -1158,51 +1369,56 @@ def test_zscore(self, r): assert r.zscore('a', 'a2') == 2.0 assert r.zscore('a', 'a4') is None - def test_zunionstore_fail_crossslot(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a2': 5, 'a3': 4}) - with pytest.raises(ResponseError) as excinfo: - r.zunionstore('d', ['a', 'b', 'c']) - assert re.search('ClusterCrossSlotError', str(excinfo)) - - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_sum(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d', ['a', 'b', 'c']) == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_max(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_min(self, r): - r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 4}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_with_weight(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] -# # HYPERLOGLOG TESTS + # HYPERLOGLOG TESTS + @skip_if_server_version_lt('2.8.9') def test_pfadd(self, r): members = {b'1', b'2', b'3'} assert r.pfadd('a', *members) == 1 @@ -1220,6 +1436,7 @@ def test_pfcount(self, r): assert r.pfcount('b') == len(members_b) assert r.pfcount('a', 'b') == len(members_b.union(members)) + @skip_if_server_version_lt('2.8.9') def test_pfmerge(self, r): mema = {b'1', b'2', b'3'} memb = {b'2', b'3', b'4'} @@ -1272,6 +1489,7 @@ def test_hincrby(self, r): assert r.hincrby('a', '1', amount=2) == 3 assert r.hincrby('a', '1', amount=-2) == 1 + @skip_if_server_version_lt('2.6.0') def test_hincrbyfloat(self, r): assert r.hincrbyfloat('a', '1') == 1.0 assert r.hincrbyfloat('a', '1') == 2.0 @@ -1311,6 +1529,929 @@ def test_hvals(self, r): remote_vals = r.hvals('a') assert sorted(local_vals) == sorted(remote_vals) + @skip_if_server_version_lt('3.2.0') + def test_hstrlen(self, r): + r.hmset('a', {'1': '22', '2': '333'}) + assert r.hstrlen('a', '1') == 2 + assert r.hstrlen('a', '2') == 3 + + # SORT + def test_sort_basic(self, r): + r.rpush('a', '3', '2', '1', '4') + assert r.sort('a') == [b'1', b'2', b'3', b'4'] + + def test_sort_limited(self, r): + r.rpush('a', '3', '2', '1', '4') + assert r.sort('a', start=1, num=2) == [b'2', b'3'] + + @skip_for_no_cluster_impl() + def test_sort_by(self, r): + r['score:1'] = 8 + r['score:2'] = 3 + r['score:3'] = 5 + r.rpush('a', '3', '2', '1') + assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] + + @skip_for_no_cluster_impl() + def test_sort_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] + + @skip_for_no_cluster_impl() + def test_sort_get_multi(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', '#')) == \ + [b'u1', b'1', b'u2', b'2', b'u3', b'3'] + + @skip_for_no_cluster_impl() + def test_sort_get_groups_two(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', '#'), groups=True) == \ + [(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')] + + def test_sort_groups_string_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + with pytest.raises(exceptions.DataError): + r.sort('a', get='user:*', groups=True) + + def test_sort_groups_just_one_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + with pytest.raises(exceptions.DataError): + r.sort('a', get=['user:*'], groups=True) + + def test_sort_groups_no_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + with pytest.raises(exceptions.DataError): + r.sort('a', groups=True) + + @skip_for_no_cluster_impl() + def test_sort_groups_three_gets(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r['door:1'] = 'd1' + r['door:2'] = 'd2' + r['door:3'] = 'd3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == \ + [ + (b'u1', b'd1', b'1'), + (b'u2', b'd2', b'2'), + (b'u3', b'd3', b'3') + ] + + def test_sort_desc(self, r): + r.rpush('a', '2', '3', '1') + assert r.sort('a', desc=True) == [b'3', b'2', b'1'] + + def test_sort_alpha(self, r): + r.rpush('a', 'e', 'c', 'b', 'd', 'a') + assert r.sort('a', alpha=True) == \ + [b'a', b'b', b'c', b'd', b'e'] + + def test_sort_store(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.rpush('60ZE7', '2', '3', '1') + assert r.sort('60ZE7', store='8I2EQ') == 3 + assert r.lrange('8I2EQ', 0, -1) == [b'1', b'2', b'3'] + + @skip_for_no_cluster_impl() + def test_sort_all_options(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r['user:1:username'] = 'zeus' + r['user:2:username'] = 'titan' + r['user:3:username'] = 'hermes' + r['user:4:username'] = 'hercules' + r['user:5:username'] = 'apollo' + r['user:6:username'] = 'athena' + r['user:7:username'] = 'hades' + r['user:8:username'] = 'dionysus' + + r['user:1:favorite_drink'] = 'yuengling' + r['user:2:favorite_drink'] = 'rum' + r['user:3:favorite_drink'] = 'vodka' + r['user:4:favorite_drink'] = 'milk' + r['user:5:favorite_drink'] = 'pinot noir' + r['user:6:favorite_drink'] = 'water' + r['user:7:favorite_drink'] = 'gin' + r['user:8:favorite_drink'] = 'apple juice' + + r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4') + num = r.sort('gods', start=2, num=4, by='user:*:username', + get='user:*:favorite_drink', desc=True, alpha=True, + store='sorted') + assert num == 4 + assert r.lrange('sorted', 0, 10) == \ + [b'vodka', b'milk', b'gin', b'apple juice'] + + def test_sort_issue_924(self, r): + # Tests for issue https://github.com/andymccurdy/redis-py/issues/924 + r.execute_command('SADD', 'issue#924', 1) + r.execute_command('SORT', 'issue#924') + + @skip_for_no_cluster_impl() + def test_cluster_addslots(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('ADDSLOTS', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_count_failure_reports(self, mock_cluster_resp_int): + assert isinstance(mock_cluster_resp_int.cluster( + 'COUNT-FAILURE-REPORTS', 'node'), int) + + @skip_for_no_cluster_impl() + def test_cluster_countkeysinslot(self, mock_cluster_resp_int): + assert isinstance(mock_cluster_resp_int.cluster( + 'COUNTKEYSINSLOT', 2), int) + + @skip_for_no_cluster_impl() + def test_cluster_delslots(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('DELSLOTS', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_failover(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('FAILOVER', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_forget(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('FORGET', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_info(self, mock_cluster_resp_info): + assert isinstance(mock_cluster_resp_info.cluster('info'), dict) + + @skip_for_no_cluster_impl() + def test_cluster_keyslot(self, mock_cluster_resp_int): + assert isinstance(mock_cluster_resp_int.cluster( + 'keyslot', 'asdf'), int) + + @skip_for_no_cluster_impl() + def test_cluster_meet(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('meet', 'ip', 'port', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_nodes(self, mock_cluster_resp_nodes): + assert isinstance(mock_cluster_resp_nodes.cluster('nodes'), dict) + + @skip_for_no_cluster_impl() + def test_cluster_replicate(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('replicate', 'nodeid') is True + + @skip_for_no_cluster_impl() + def test_cluster_reset(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('reset', 'hard') is True + + @skip_for_no_cluster_impl() + def test_cluster_saveconfig(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('saveconfig') is True + + @skip_for_no_cluster_impl() + def test_cluster_setslot(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('setslot', 1, + 'IMPORTING', 'nodeid') is True + + @skip_for_no_cluster_impl() + def test_cluster_slaves(self, mock_cluster_resp_slaves): + assert isinstance(mock_cluster_resp_slaves.cluster( + 'slaves', 'nodeid'), dict) + + # GEO COMMANDS + @skip_if_server_version_lt('3.2.0') + def test_geoadd(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + assert r.geoadd('barcelona', *values) == 2 + assert r.zcard('barcelona') == 2 + + @skip_if_server_version_lt('3.2.0') + def test_geoadd_invalid_params(self, r): + with pytest.raises(exceptions.RedisError): + r.geoadd('barcelona', *(1, 2)) + + @skip_if_server_version_lt('3.2.0') + def test_geodist(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + assert r.geoadd('barcelona', *values) == 2 + assert r.geodist('barcelona', 'place1', 'place2') == 3067.4157 + + @skip_if_server_version_lt('3.2.0') + def test_geodist_units(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.geodist('barcelona', 'place1', 'place2', 'km') == 3.0674 + + @skip_if_server_version_lt('3.2.0') + def test_geodist_missing_one_member(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') + r.geoadd('barcelona', *values) + assert r.geodist('barcelona', 'place1', 'missing_member', 'km') is None + + @skip_if_server_version_lt('3.2.0') + def test_geodist_invalid_units(self, r): + with pytest.raises(exceptions.RedisError): + assert r.geodist('x', 'y', 'z', 'inches') + + @skip_if_server_version_lt('3.2.0') + def test_geohash(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.geohash('barcelona', 'place1', 'place2') ==\ + ['sp3e9yg3kd0', 'sp3e9cbc3t0'] + + @skip_if_server_version_lt('3.2.0') + def test_geopos(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + # redis uses 52 bits precision, hereby small errors may be introduced. + assert r.geopos('barcelona', 'place1', 'place2') ==\ + [(2.19093829393386841, 41.43379028184083523), + (2.18737632036209106, 41.40634178640635099)] + + @skip_if_server_version_lt('4.0.0') + def test_geopos_no_value(self, r): + assert r.geopos('barcelona', 'place1', 'place2') == [None, None] + + @skip_if_server_version_lt('3.2.0') + @skip_if_server_version_gte('4.0.0') + def test_old_geopos_no_value(self, r): + assert r.geopos('barcelona', 'place1', 'place2') == [] + + @skip_if_server_version_lt('3.2.0') + def test_georadius(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 2.191, 41.433, 1000) == ['place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_no_values(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 1, 2, 1000) == [] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_units(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km') ==\ + ['place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_with(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + + # test a bunch of combinations to test the parse response + # function. + assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', + withdist=True, withcoord=True, withhash=True) ==\ + [['place1', 0.0881, 3471609698139488, + (2.19093829393386841, 41.43379028184083523)]] + + assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', + withdist=True, withcoord=True) ==\ + [['place1', 0.0881, + (2.19093829393386841, 41.43379028184083523)]] + + assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', + withhash=True, withcoord=True) ==\ + [['place1', 3471609698139488, + (2.19093829393386841, 41.43379028184083523)]] + + # test no values. + assert r.georadius('barcelona', 2, 1, 1, unit='km', + withdist=True, withcoord=True, withhash=True) == [] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_count(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 2.191, 41.433, 3000, count=1) ==\ + ['place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_sort(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='ASC') ==\ + ['place1', 'place2'] + assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='DESC') ==\ + ['place2', 'place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_store(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ'] + """ + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('60ZE7', *values) + r.georadius('60ZE7', 2.191, 41.433, 1000, store='8I2EQ') + assert r.zrange('8I2EQ', 0, -1) == [b'place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_store_dist(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ'] + """ + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('60ZE7', *values) + r.georadius('60ZE7', 2.191, 41.433, 1000, + store_dist='8I2EQ') + # instead of save the geo score, the distance is saved. + assert r.zscore('8I2EQ', 'place1') == 88.05060698409301 + + @skip_if_server_version_lt('3.2.0') + def test_georadiusmember(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadiusbymember('barcelona', 'place1', 4000) ==\ + ['place2', 'place1'] + assert r.georadiusbymember('barcelona', 'place1', 10) == ['place1'] + + assert r.georadiusbymember('barcelona', 'place1', 4000, + withdist=True, withcoord=True, + withhash=True) ==\ + [['place2', 3067.4157, 3471609625421029, + (2.187376320362091, 41.40634178640635)], + ['place1', 0.0, 3471609698139488, + (2.1909382939338684, 41.433790281840835)]] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xack(self, r): + stream = 'stream' + group = 'group' + consumer = 'consumer' + # xack on a stream that doesn't exist + assert r.xack(stream, group, '0-0') == 0 + + m1 = r.xadd(stream, {'one': 'one'}) + m2 = r.xadd(stream, {'two': 'two'}) + m3 = r.xadd(stream, {'three': 'three'}) + + # xack on a group that doesn't exist + assert r.xack(stream, group, m1) == 0 + + r.xgroup_create(stream, group, 0) + r.xreadgroup(group, consumer, streams={stream: 0}) + # xack returns the number of ack'd elements + assert r.xack(stream, group, m1) == 1 + assert r.xack(stream, group, m2, m3) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xadd(self, r): + stream = 'stream' + message_id = r.xadd(stream, {'foo': 'bar'}) + assert re.match(br'[0-9]+\-[0-9]+', message_id) + + # explicit message id + message_id = b'9999999999999999999-0' + assert message_id == r.xadd(stream, {'foo': 'bar'}, id=message_id) + + # with maxlen, the list evicts the first message + r.xadd(stream, {'foo': 'bar'}, maxlen=2, approximate=False) + assert r.xlen(stream) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xclaim(self, r): + stream = 'stream' + group = 'group' + consumer1 = 'consumer1' + consumer2 = 'consumer2' + + message_id = r.xadd(stream, {'john': 'wick'}) + message = get_stream_message(r, stream, message_id) + r.xgroup_create(stream, group, 0) + + # trying to claim a message that isn't already pending doesn't + # do anything + response = r.xclaim(stream, group, consumer2, + min_idle_time=0, message_ids=(message_id,)) + assert response == [] + + # read the group as consumer1 to initially claim the messages + r.xreadgroup(group, consumer1, streams={stream: 0}) + + # claim the message as consumer2 + response = r.xclaim(stream, group, consumer2, + min_idle_time=0, message_ids=(message_id,)) + assert response[0] == message + + # reclaim the message as consumer1, but use the justid argument + # which only returns message ids + assert r.xclaim(stream, group, consumer1, + min_idle_time=0, message_ids=(message_id,), + justid=True) == [message_id] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xdel(self, r): + stream = 'stream' + + # deleting from an empty stream doesn't do anything + assert r.xdel(stream, 1) == 0 + + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + m3 = r.xadd(stream, {'foo': 'bar'}) + + # xdel returns the number of deleted elements + assert r.xdel(stream, m1) == 1 + assert r.xdel(stream, m2, m3) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_create(self, r): + # tests xgroup_create and xinfo_groups + stream = 'stream' + group = 'group' + r.xadd(stream, {'foo': 'bar'}) + + # no group is setup yet, no info to obtain + assert r.xinfo_groups(stream) == [] + + assert r.xgroup_create(stream, group, 0) + expected = [{ + 'name': group.encode(), + 'consumers': 0, + 'pending': 0, + 'last-delivered-id': b'0-0' + }] + assert r.xinfo_groups(stream) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_create_mkstream(self, r): + # tests xgroup_create and xinfo_groups + stream = 'stream' + group = 'group' + + # an error is raised if a group is created on a stream that + # doesn't already exist + with pytest.raises(exceptions.ResponseError): + r.xgroup_create(stream, group, 0) + + # however, with mkstream=True, the underlying stream is created + # automatically + assert r.xgroup_create(stream, group, 0, mkstream=True) + expected = [{ + 'name': group.encode(), + 'consumers': 0, + 'pending': 0, + 'last-delivered-id': b'0-0' + }] + assert r.xinfo_groups(stream) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_delconsumer(self, r): + stream = 'stream' + group = 'group' + consumer = 'consumer' + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + r.xgroup_create(stream, group, 0) + + # a consumer that hasn't yet read any messages doesn't do anything + assert r.xgroup_delconsumer(stream, group, consumer) == 0 + + # read all messages from the group + r.xreadgroup(group, consumer, streams={stream: 0}) + + # deleting the consumer should return 2 pending messages + assert r.xgroup_delconsumer(stream, group, consumer) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_destroy(self, r): + stream = 'stream' + group = 'group' + r.xadd(stream, {'foo': 'bar'}) + + # destroying a nonexistent group returns False + assert not r.xgroup_destroy(stream, group) + + r.xgroup_create(stream, group, 0) + assert r.xgroup_destroy(stream, group) + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_setid(self, r): + stream = 'stream' + group = 'group' + message_id = r.xadd(stream, {'foo': 'bar'}) + + r.xgroup_create(stream, group, 0) + # advance the last_delivered_id to the message_id + r.xgroup_setid(stream, group, message_id) + expected = [{ + 'name': group.encode(), + 'consumers': 0, + 'pending': 0, + 'last-delivered-id': message_id + }] + assert r.xinfo_groups(stream) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xinfo_consumers(self, r): + stream = 'stream' + group = 'group' + consumer1 = 'consumer1' + consumer2 = 'consumer2' + r.xadd(stream, {'foo': 'bar'}) + + r.xgroup_create(stream, group, 0) + r.xreadgroup(group, consumer1, streams={stream: 0}) + r.xreadgroup(group, consumer2, streams={stream: 0}) + info = r.xinfo_consumers(stream, group) + assert len(info) == 2 + expected = [ + {'name': consumer1.encode(), 'pending': 1}, + {'name': consumer2.encode(), 'pending': 0}, + ] + + # we can't determine the idle time, so just make sure it's an int + assert isinstance(info[0].pop('idle'), (int, long)) + assert isinstance(info[1].pop('idle'), (int, long)) + assert info == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xinfo_stream(self, r): + stream = 'stream' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + info = r.xinfo_stream(stream) + + assert info['length'] == 2 + assert info['first-entry'] == get_stream_message(r, stream, m1) + assert info['last-entry'] == get_stream_message(r, stream, m2) + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xlen(self, r): + stream = 'stream' + assert r.xlen(stream) == 0 + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + assert r.xlen(stream) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xpending(self, r): + stream = 'stream' + group = 'group' + consumer1 = 'consumer1' + consumer2 = 'consumer2' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + r.xgroup_create(stream, group, 0) + + # xpending on a group that has no consumers yet + expected = { + 'pending': 0, + 'min': None, + 'max': None, + 'consumers': [] + } + assert r.xpending(stream, group) == expected + + # read 1 message from the group with each consumer + r.xreadgroup(group, consumer1, streams={stream: 0}, count=1) + r.xreadgroup(group, consumer2, streams={stream: m1}, count=1) + + expected = { + 'pending': 2, + 'min': m1, + 'max': m2, + 'consumers': [ + {'name': consumer1.encode(), 'pending': 1}, + {'name': consumer2.encode(), 'pending': 1}, + ] + } + assert r.xpending(stream, group) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xpending_range(self, r): + stream = 'stream' + group = 'group' + consumer1 = 'consumer1' + consumer2 = 'consumer2' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + r.xgroup_create(stream, group, 0) + + # xpending range on a group that has no consumers yet + assert r.xpending_range(stream, group) == [] + + # read 1 message from the group with each consumer + r.xreadgroup(group, consumer1, streams={stream: 0}, count=1) + r.xreadgroup(group, consumer2, streams={stream: m1}, count=1) + + response = r.xpending_range(stream, group) + assert len(response) == 2 + assert response[0]['message_id'] == m1 + assert response[0]['consumer'] == consumer1.encode() + assert response[1]['message_id'] == m2 + assert response[1]['consumer'] == consumer2.encode() + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xrange(self, r): + stream = 'stream' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + m3 = r.xadd(stream, {'foo': 'bar'}) + m4 = r.xadd(stream, {'foo': 'bar'}) + + def get_ids(results): + return [result[0] for result in results] + + results = r.xrange(stream, min=m1) + assert get_ids(results) == [m1, m2, m3, m4] + + results = r.xrange(stream, min=m2, max=m3) + assert get_ids(results) == [m2, m3] + + results = r.xrange(stream, max=m3) + assert get_ids(results) == [m1, m2, m3] + + results = r.xrange(stream, max=m2, count=1) + assert get_ids(results) == [m1] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xread(self, r): + stream = 'stream' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'bing': 'baz'}) + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m1), + get_stream_message(r, stream, m2), + ] + ] + ] + # xread starting at 0 returns both messages + assert r.xread(streams={stream: 0}) == expected + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m1), + ] + ] + ] + # xread starting at 0 and count=1 returns only the first message + assert r.xread(streams={stream: 0}, count=1) == expected + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m2), + ] + ] + ] + # xread starting at m1 returns only the second message + assert r.xread(streams={stream: m1}) == expected + + # xread starting at the last message returns an empty list + assert r.xread(streams={stream: m2}) == [] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xreadgroup(self, r): + stream = 'stream' + group = 'group' + consumer = 'consumer' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'bing': 'baz'}) + r.xgroup_create(stream, group, 0) + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m1), + get_stream_message(r, stream, m2), + ] + ] + ] + # xread starting at 0 returns both messages + assert r.xreadgroup(group, consumer, streams={stream: 0}) == expected + + r.xgroup_destroy(stream, group) + r.xgroup_create(stream, group, 0) + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m1), + ] + ] + ] + # xread starting at 0 and count=1 returns only the first message + assert r.xreadgroup(group, consumer, streams={stream: 0}, count=1) == \ + expected + + r.xgroup_destroy(stream, group) + r.xgroup_create(stream, group, 0) + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m2), + ] + ] + ] + # xread starting at m1 returns only the second message + assert r.xreadgroup(group, consumer, streams={stream: m1}) == expected + + r.xgroup_destroy(stream, group) + r.xgroup_create(stream, group, 0) + + # xread starting at the last message returns an empty message list + expected = [ + [ + stream, + [] + ] + ] + assert r.xreadgroup(group, consumer, streams={stream: m2}) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xrevrange(self, r): + stream = 'stream' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + m3 = r.xadd(stream, {'foo': 'bar'}) + m4 = r.xadd(stream, {'foo': 'bar'}) + + def get_ids(results): + return [result[0] for result in results] + + results = r.xrevrange(stream, max=m4) + assert get_ids(results) == [m4, m3, m2, m1] + + results = r.xrevrange(stream, max=m3, min=m2) + assert get_ids(results) == [m3, m2] + + results = r.xrevrange(stream, min=m3) + assert get_ids(results) == [m4, m3] + + results = r.xrevrange(stream, min=m2, count=1) + assert get_ids(results) == [m4] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xtrim(self, r): + stream = 'stream' + + # trimming an empty key doesn't do anything + assert r.xtrim(stream, 1000) == 0 + + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + + # trimming an amount large than the number of messages + # doesn't do anything + assert r.xtrim(stream, 5, approximate=False) == 0 + + # 1 message is trimmed + assert r.xtrim(stream, 3, approximate=False) == 1 + + def test_bitfield_operations(self, r): + # comments show affected bits + bf = r.bitfield('a') + resp = (bf + .set('u8', 8, 255) # 00000000 11111111 + .get('u8', 0) # 00000000 + .get('u4', 8) # 1111 + .get('u4', 12) # 1111 + .get('u4', 13) # 111 0 + .execute()) + assert resp == [0, 0, 15, 15, 14] + + # .set() returns the previous value... + resp = (bf + .set('u8', 4, 1) # 0000 0001 + .get('u16', 0) # 00000000 00011111 + .set('u16', 0, 0) # 00000000 00000000 + .execute()) + assert resp == [15, 31, 31] + + # incrby adds to the value + resp = (bf + .incrby('u8', 8, 254) # 00000000 11111110 + .incrby('u8', 8, 1) # 00000000 11111111 + .get('u16', 0) # 00000000 11111111 + .execute()) + assert resp == [254, 255, 255] + + # Verify overflow protection works as a method: + r.delete('a') + resp = (bf + .set('u8', 8, 254) # 00000000 11111110 + .overflow('fail') + .incrby('u8', 8, 2) # incrby 2 would overflow, None returned + .incrby('u8', 8, 1) # 00000000 11111111 + .incrby('u8', 8, 1) # incrby 1 would overflow, None returned + .get('u16', 0) # 00000000 11111111 + .execute()) + assert resp == [0, None, 255, None, 255] + + # Verify overflow protection works as arg to incrby: + r.delete('a') + resp = (bf + .set('u8', 8, 255) # 00000000 11111111 + .incrby('u8', 8, 1) # 00000000 00000000 wrap default + .set('u8', 8, 255) # 00000000 11111111 + .incrby('u8', 8, 1, 'FAIL') # 00000000 11111111 fail + .incrby('u8', 8, 1) # 00000000 11111111 still fail + .get('u16', 0) # 00000000 11111111 + .execute()) + assert resp == [0, 0, 0, None, None, 255] + + # test default default_overflow + r.delete('a') + bf = r.bitfield('a', default_overflow='FAIL') + resp = (bf + .set('u8', 8, 255) # 00000000 11111111 + .incrby('u8', 8, 1) # 00000000 11111111 fail default + .get('u16', 0) # 00000000 11111111 + .execute()) + assert resp == [0, None, 255] + + @skip_if_server_version_lt('4.0.0') + def test_memory_usage(self, r): + r.set('foo', 'bar') + assert isinstance(r.memory_usage('foo'), int) + class TestRedisCommandsSort(object): # SORT From dfa78eef1598f96e4a1f6546ed5cd32a4340dfc0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:33:10 +0200 Subject: [PATCH 100/263] Add output of redis-py version when running travis-ci to allow for verify that we are working on the correct version of the upstream lib --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 376c0df0..1e206456 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,7 @@ install: - pip install -r dev-requirements.txt - pip install -e . - "if [[ $HIREDIS == '1' ]]; then pip install hiredis; fi" + - "pip freeze | grep redis" env: # Redis 3.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=3.0 From ccc1d5d0fb73e89415cd1d8dd4f9086e1dfc0c2c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:37:35 +0200 Subject: [PATCH 101/263] python 3.7 is still not possible and nightly is still the correct 3.7 python version to use --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1e206456..ffed6369 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,6 @@ python: - "3.4" - "3.5" - "3.6" - - "3.7" - "nightly" services: - redis-server From bb7581f62be8d13223e45746cea39e88a7ab671a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:41:10 +0200 Subject: [PATCH 102/263] Print out all installed pythoon packages --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index ffed6369..8b0165c6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,7 @@ install: - pip install -e . - "if [[ $HIREDIS == '1' ]]; then pip install hiredis; fi" - "pip freeze | grep redis" + - "pip freeze" env: # Redis 3.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=3.0 From 3a57b3f418c708da760fcb706194b424109006b4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:52:21 +0200 Subject: [PATCH 103/263] Add version block to spop and bitfield opertion that do not work on redis 3.0.x line --- tests/test_commands.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_commands.py b/tests/test_commands.py index 3e6fab94..6b50f73f 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1005,6 +1005,7 @@ def test_spop(self, r): assert value in s assert r.smembers('a') == set(s) - {value} + @skip_if_server_version_lt('3.2.0') def test_spop_multi_value(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) @@ -2385,6 +2386,7 @@ def test_xtrim(self, r): # 1 message is trimmed assert r.xtrim(stream, 3, approximate=False) == 1 + @skip_if_server_version_lt('3.2.0') def test_bitfield_operations(self, r): # comments show affected bits bf = r.bitfield('a') From 67f5125ac1763841c16ab8474be640edf1e58f5b Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:52:35 +0200 Subject: [PATCH 104/263] Install pysnooper as a default dev python package --- dev-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/dev-requirements.txt b/dev-requirements.txt index 57454c80..93a5bca0 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -9,3 +9,4 @@ tox python-coveralls ptpdb ptpython +pysnooper From b3d6d74b791969154fcc056eb2457abec9f1dd39 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 23:35:40 +0200 Subject: [PATCH 105/263] Fix all usages of _get_client() calls to use the correct Client class --- tests/conftest.py | 2 +- tests/test_cluster_obj.py | 12 ++++++------ tests/test_pipeline.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 19ed432d..8d2166a3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -137,7 +137,7 @@ def s(*args, **kwargs): """ Create a RedisCluster instance with 'init_slot_cache' set to false """ - s = _get_client(init_slot_cache=False, **kwargs) + s = _get_client(RedisCluster, init_slot_cache=False, **kwargs) assert s.connection_pool.nodes.slots == {} assert s.connection_pool.nodes.nodes == {} return s diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 756890dd..3d0161bd 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -79,7 +79,7 @@ def test_blocked_strict_redis_args(): assert c.connection_pool.connection_kwargs["socket_timeout"] == ClusterConnectionPool.RedisClusterDefaultTimeout with pytest.raises(RedisClusterException) as ex: - _get_client(db=1) + _get_client(RedisCluster, db=1) assert unicode(ex.value).startswith("Argument 'db' is not possible to use in cluster mode") @@ -91,14 +91,14 @@ def test_password_procted_nodes(): startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] password_protected_startup_nodes = [{"host": "127.0.0.1", "port": "7100"}] with pytest.raises(RedisClusterException) as ex: - _get_client(startup_nodes=password_protected_startup_nodes) + _get_client(RedisCluster, startup_nodes=password_protected_startup_nodes) assert unicode(ex.value).startswith("ERROR sending 'cluster slots' command to redis server:") - _get_client(startup_nodes=password_protected_startup_nodes, password='password_is_protected') + _get_client(RedisCluster, startup_nodes=password_protected_startup_nodes, password='password_is_protected') with pytest.raises(RedisClusterException) as ex: - _get_client(startup_nodes=startup_nodes, password='password_is_protected') + _get_client(RedisCluster, startup_nodes=startup_nodes, password='password_is_protected') assert unicode(ex.value).startswith("ERROR sending 'cluster slots' command to redis server:") - _get_client(startup_nodes=startup_nodes) + _get_client(RedisCluster, startup_nodes=startup_nodes) def test_host_port_startup_node(): @@ -116,7 +116,7 @@ def test_empty_startup_nodes(): Test that exception is raised when empty providing empty startup_nodes """ with pytest.raises(RedisClusterException) as ex: - _get_client(init_slot_cache=False, startup_nodes=[]) + _get_client(RedisCluster, init_slot_cache=False, startup_nodes=[]) assert unicode(ex.value).startswith("No startup nodes provided"), unicode(ex.value) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 72d190c1..e6b7d405 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -352,7 +352,7 @@ def test_redis_cluster_pipeline(self): """ Test that we can use a pipeline with the RedisCluster class """ - r = _get_client(cls=None) + r = _get_client(RedisCluster) with r.pipeline(transaction=False) as pipe: pipe.get("foobar") From 87096fdf91b15363e7c264f3daf6bad0ace551e9 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 23:51:54 +0200 Subject: [PATCH 106/263] Fix test_representation and test_empty_startupnodes tests --- tests/test_cluster_obj.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 3d0161bd..5ff41e70 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -67,7 +67,7 @@ def execute_command(self, *args, **kwargs): def test_representation(r): - assert re.search('^RedisCluster<[0-9\.\:\,].+>$', str(r)) + assert re.search('^RedisCluster<[a-z0-9\.\:\,].+>$', str(r)) def test_blocked_strict_redis_args(): @@ -116,7 +116,7 @@ def test_empty_startup_nodes(): Test that exception is raised when empty providing empty startup_nodes """ with pytest.raises(RedisClusterException) as ex: - _get_client(RedisCluster, init_slot_cache=False, startup_nodes=[]) + r = RedisCluster(startup_nodes=[]) assert unicode(ex.value).startswith("No startup nodes provided"), unicode(ex.value) From a3b5adf85164ac25bc3c180beb2cd03984a6278d Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 29 Apr 2019 12:52:54 +0200 Subject: [PATCH 107/263] Pytest xfail for pubsub tests that is not supported fully in cluster mode --- tests/test_pubsub.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 566fd78b..4d49d626 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -481,6 +481,7 @@ def t_run(rc): class TestPubSubPubSubSubcommands(object): @skip_if_server_version_lt('2.8.0') + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_channels(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo', 'bar', 'baz', 'quux') @@ -488,6 +489,7 @@ def test_pubsub_channels(self, r): assert channels == [b'bar', b'baz', b'foo', b'quux'] @skip_if_server_version_lt('2.8.0') + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numsub(self, r): p1 = r.pubsub(ignore_subscribe_messages=True) p1.subscribe('foo', 'bar', 'baz') @@ -500,6 +502,7 @@ def test_pubsub_numsub(self, r): assert channels == r.pubsub_numsub('foo', 'bar', 'baz') @skip_if_server_version_lt('2.8.0') + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numpat(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe('*oo', '*ar', 'b*z') @@ -509,6 +512,7 @@ def test_pubsub_numpat(self, r): class TestPubSubPings(object): @skip_if_server_version_lt('3.0.0') + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_send_pubsub_ping(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') @@ -518,7 +522,7 @@ def test_send_pubsub_ping(self, r): pattern=None) @skip_if_server_version_lt('3.0.0') - @pytest.mark.xfail(reason="Pattern pubsub do not work currently") + @pytest.mark.xfail(reason="Pattern pubsub is not fully supported in cluster mode") def test_send_pubsub_ping_message(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') From 29924049f57bc92f8e48d0a69c2e8a58769b0c85 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 29 Apr 2019 14:55:33 +0200 Subject: [PATCH 108/263] Only run bitfield test on redis version >= 3.2.0 --- tests/test_pipeline.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index e6b7d405..aa1617d2 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -8,7 +8,7 @@ from rediscluster.client import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool from rediscluster.exceptions import RedisClusterException -from tests.conftest import _get_client +from tests.conftest import _get_client, skip_if_server_version_lt # 3rd party imports import pytest @@ -285,6 +285,7 @@ def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): assert r[key] == b'1' + @skip_if_server_version_lt('3.2.0') def test_pipeline_with_bitfield(self, r): with r.pipeline() as pipe: pipe.set('a', '1') From a70d220c894541bc9189a54b9e330c6e66e74404 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 30 Apr 2019 22:29:53 +0200 Subject: [PATCH 109/263] Update test_scripting to match upstream redis-py 3.0.1 --- tests/test_scripting.py | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/tests/test_scripting.py b/tests/test_scripting.py index 968fdc61..ca5afc21 100644 --- a/tests/test_scripting.py +++ b/tests/test_scripting.py @@ -93,45 +93,51 @@ def test_script_loading(self, r): def test_script_object(self, r): r.set('a', 2) multiply = r.register_script(multiply_script) - # test evalsha fail -> script load + retry + precalculated_sha = multiply.sha + assert precalculated_sha + assert r.script_exists(multiply.sha) == [False] + # Test second evalsha block (after NoScriptError) assert multiply(keys=['a'], args=[3]) == 6 - assert multiply.sha + # At this point, the script should be loaded assert r.script_exists(multiply.sha) == [True] - # test first evalsha + # Test that the precalculated sha matches the one from redis + assert multiply.sha == precalculated_sha + # Test first evalsha block assert multiply(keys=['a'], args=[3]) == 6 - @pytest.mark.xfail(reason="Not Yet Implemented") + @pytest.mark.xfail(reason="Script object not supported in cluster") def test_script_object_in_pipeline(self, r): multiply = r.register_script(multiply_script) - assert not multiply.sha + precalculated_sha = multiply.sha + assert precalculated_sha pipe = r.pipeline() pipe.set('a', 2) pipe.get('a') multiply(keys=['a'], args=[3], client=pipe) - # even though the pipeline wasn't executed yet, we made sure the - # script was loaded and got a valid sha - assert multiply.sha - assert r.script_exists(multiply.sha) == [True] + assert r.script_exists(multiply.sha) == [False] # [SET worked, GET 'a', result of multiple script] assert pipe.execute() == [True, b'2', 6] + # The script should have been loaded by pipe.execute() + assert r.script_exists(multiply.sha) == [True] + # The precalculated sha should have been the correct one + assert multiply.sha == precalculated_sha # purge the script from redis's cache and re-run the pipeline - # the multiply script object knows it's sha, so it shouldn't get - # reloaded until pipe.execute() + # the multiply script should be reloaded by pipe.execute() r.script_flush() pipe = r.pipeline() pipe.set('a', 2) pipe.get('a') - assert multiply.sha multiply(keys=['a'], args=[3], client=pipe) assert r.script_exists(multiply.sha) == [False] # [SET worked, GET 'a', result of multiple script] assert pipe.execute() == [True, b'2', 6] + assert r.script_exists(multiply.sha) == [True] - @pytest.mark.xfail(reason="Not Yet Implemented") + @pytest.mark.xfail(reason="LUA is not supported in cluster") def test_eval_msgpack_pipeline_error_in_lua(self, r): msgpack_hello = r.register_script(msgpack_hello_script) - assert not msgpack_hello.sha + assert msgpack_hello.sha pipe = r.pipeline() @@ -141,8 +147,9 @@ def test_eval_msgpack_pipeline_error_in_lua(self, r): msgpack_hello(args=[msgpack_message_1], client=pipe) - assert r.script_exists(msgpack_hello.sha) == [True] + assert r.script_exists(msgpack_hello.sha) == [False] assert pipe.execute()[0] == b'hello Joe' + assert r.script_exists(msgpack_hello.sha) == [True] msgpack_hello_broken = r.register_script(msgpack_hello_script_broken) From f8b7db519315fb893fd94d7b45fada979d618d04 Mon Sep 17 00:00:00 2001 From: James Ward Date: Wed, 6 Feb 2019 01:25:33 -0500 Subject: [PATCH 110/263] add test to support both kinds of cluster slots errors --- tests/test_node_manager.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/tests/test_node_manager.py b/tests/test_node_manager.py index 5972d59d..cd4fecc2 100644 --- a/tests/test_node_manager.py +++ b/tests/test_node_manager.py @@ -14,7 +14,7 @@ from mock import patch, Mock from redis import Redis from redis._compat import unicode -from redis import ConnectionError +from redis import ConnectionError, ResponseError pytestmark = skip_if_server_version_lt('2.9.0') @@ -282,11 +282,29 @@ def test_cluster_slots_error(): with patch.object(RedisCluster, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = Exception("foobar") - n = NodeManager(startup_nodes=[{}]) + n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) - with pytest.raises(RedisClusterException): + with pytest.raises(RedisClusterException) as e: n.initialize() + assert "ERROR sending 'cluster slots' command" in unicode(e) + + +def test_cluster_slots_error_expected_responseerror(): + """ + Check that exception is not raised if initialize can't execute + 'CLUSTER SLOTS' command but can hit other nodes. + """ + with patch.object(StrictRedis, 'execute_command') as execute_command_mock: + execute_command_mock.side_effect = ResponseError("MASTERDOWN") + + n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) + + with pytest.raises(RedisClusterException) as e: + n.initialize() + + assert 'Redis Cluster cannot be connected' in unicode(e) + def test_set_node(): """ From f6e93525bc902993a6b85dd84e2be0a1c1b41cfa Mon Sep 17 00:00:00 2001 From: James Ward Date: Wed, 6 Feb 2019 01:26:09 -0500 Subject: [PATCH 111/263] handle ResponseError gracefully in python 2 and 3 in Python 3 the responserror doesn't have a `message` value on it. instead, it just needs to be cast to a string fixes #278 --- rediscluster/nodemanager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 2c69e38d..e46bd6b1 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -165,7 +165,8 @@ def initialize(self): continue except ResponseError as e: # Isn't a cluster connection, so it won't parse these exceptions automatically - if 'CLUSTERDOWN' in e.message or 'MASTERDOWN' in e.message: + message = e.__str__() + if 'CLUSTERDOWN' in message or 'MASTERDOWN' in message: continue else: raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node)) From 41afbc907ac1d3acd0dd561677ba7a27ded91ef7 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 23 Jul 2019 21:51:57 +0200 Subject: [PATCH 112/263] Fixed wrong classes that was patches to induce mocked exceptions --- tests/test_node_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_node_manager.py b/tests/test_node_manager.py index cd4fecc2..c8dbcb90 100644 --- a/tests/test_node_manager.py +++ b/tests/test_node_manager.py @@ -279,7 +279,7 @@ def test_cluster_slots_error(): Check that exception is raised if initialize can't execute 'CLUSTER SLOTS' command. """ - with patch.object(RedisCluster, 'execute_command') as execute_command_mock: + with patch.object(Redis, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = Exception("foobar") n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) @@ -295,7 +295,7 @@ def test_cluster_slots_error_expected_responseerror(): Check that exception is not raised if initialize can't execute 'CLUSTER SLOTS' command but can hit other nodes. """ - with patch.object(StrictRedis, 'execute_command') as execute_command_mock: + with patch.object(Redis, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = ResponseError("MASTERDOWN") n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) From b6ebf53a2f38906be38260a570e35f5708ae67d0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:02:15 +0200 Subject: [PATCH 113/263] Add line in readme about the supported redis-py version range for this major release --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 432b8b2f..27b430ff 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,8 @@ Latest stable release from pypi $ pip install redis-py-cluster ``` +This major version of `redis-py-cluster` supports `redis-py>=3.0.0,<3.1.0`. + ## Usage example From de944503981eeb17c5e741e345debdc1b91b96c4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:03:16 +0200 Subject: [PATCH 114/263] Update range of year for copyright --- LICENSE | 2 +- README.md | 2 +- docs/License.txt | 2 +- docs/license.rst | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/LICENSE b/LICENSE index 66ccb488..f2a09d18 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2018 Johan Andersson +Copyright (c) 2014-2019 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation diff --git a/README.md b/README.md index 27b430ff..925942f0 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ True ## License & Authors -Copyright (c) 2013-2018 Johan Andersson +Copyright (c) 2013-2019 Johan Andersson MIT (See docs/License.txt file) diff --git a/docs/License.txt b/docs/License.txt index bf0afb13..ceabc499 100644 --- a/docs/License.txt +++ b/docs/License.txt @@ -1,4 +1,4 @@ -Copyright (c) 2014-2018 Johan Andersson +Copyright (c) 2014-2019 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation diff --git a/docs/license.rst b/docs/license.rst index 305c9087..d023468c 100644 --- a/docs/license.rst +++ b/docs/license.rst @@ -1,7 +1,7 @@ Licensing --------- -Copyright (c) 2013-2018 Johan Andersson +Copyright (c) 2013-2019 Johan Andersson MIT (See docs/License.txt file) From c6bf328a533f3dc02320ea61f592d371bf3f97ce Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:08:52 +0200 Subject: [PATCH 115/263] Remove gitter link as that chatt room is no longer in use --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 925942f0..ce98a95d 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,6 @@ This client provides a client for redis cluster that was added in redis 3.0. This project is a port of `redis-rb-cluster` by antirez, with alot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster -Gitter chat room: [![Gitter](https://badges.gitter.im/Grokzen/redis-py-cluster.svg)](https://gitter.im/Grokzen/redis-py-cluster?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) - [![Build Status](https://travis-ci.org/Grokzen/redis-py-cluster.svg?branch=master)](https://travis-ci.org/Grokzen/redis-py-cluster) [![Coverage Status](https://coveralls.io/repos/Grokzen/redis-py-cluster/badge.png)](https://coveralls.io/r/Grokzen/redis-py-cluster) [![PyPI version](https://badge.fury.io/py/redis-py-cluster.svg)](http://badge.fury.io/py/redis-py-cluster) From 2543b1adbc409008469c4924365c3a509c0a6aa0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:32:57 +0200 Subject: [PATCH 116/263] Remove two broken tests since readonly from clients was introduced. Updated variabels in get_mocked_redis_client to avoid overwriting variables between inner and outer scope --- tests/test_cluster_obj.py | 60 ++------------------------------------- 1 file changed, 3 insertions(+), 57 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 5ff41e70..02bbc8ff 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -38,8 +38,8 @@ def get_mocked_redis_client(*args, **kwargs): on different installations and machines. """ with patch.object(Redis, 'execute_command') as execute_command_mock: - def execute_command(self, *args, **kwargs): - if args[0] == 'slots': + def execute_command(self, *_args, **_kwargs): + if _args[0] == 'slots': mock_cluster_slots = [ [ 0, 5460, @@ -58,7 +58,7 @@ def execute_command(self, *args, **kwargs): ] ] return mock_cluster_slots - elif args[0] == 'cluster-require-full-coverage': + elif _args[0] == 'cluster-require-full-coverage': return {'cluster-require-full-coverage': 'yes'} execute_command_mock.side_effect = execute_command @@ -410,60 +410,6 @@ def ok_response(connection, *args, **options): assert p.execute() == ["MOCK_OK"] -def assert_moved_redirection_on_slave(sr, connection_pool_cls, cluster_obj): - """ - """ - # we assume this key is set on 127.0.0.1:7000(7003) - sr.set('foo16706', 'foo') - time.sleep(1) - - with patch.object(connection_pool_cls, 'get_node_by_slot') as return_slave_mock: - return_slave_mock.return_value = { - 'name': '127.0.0.1:7004', - 'host': '127.0.0.1', - 'port': 7004, - 'server_type': 'slave', - } - - master_value = { - 'host': '127.0.0.1', - 'name': '127.0.0.1:7000', - 'port': 7000, - 'server_type': 'master', - } - - with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: - return_master_mock.return_value = master_value - assert cluster_obj.get('foo16706') == b'foo' - assert return_master_mock.call_count == 1 - - -def test_moved_redirection_on_slave_with_default_client(sr): - """ - Test that the client is redirected normally with default - (readonly_mode=False) client even when we connect always to slave. - """ - r = get_mocked_redis_client(host="127.0.0.1", port=7000) - - assert_moved_redirection_on_slave( - sr, - ClusterConnectionPool, - # RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) - get_mocked_redis_client(host="127.0.0.1", port=7000, reinitialize_steps=1) - ) - - -def test_moved_redirection_on_slave_with_readonly_mode_client(sr): - """ - Ditto with READONLY mode. - """ - assert_moved_redirection_on_slave( - sr, - ClusterReadOnlyConnectionPool, - RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) - ) - - def test_access_correct_slave_with_readonly_mode_client(sr): """ Test that the client can get value normally with readonly mode From 025d75655814969abf03f49265405290ab500216 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:51:20 +0200 Subject: [PATCH 117/263] Remove old parallel docs about threaded pipelines and add a updated section about how it is implemented right now. Fixes #250 --- docs/index.rst | 1 - docs/pipelines.rst | 13 ++++++++++- docs/threads.rst | 57 ---------------------------------------------- 3 files changed, 12 insertions(+), 59 deletions(-) delete mode 100644 docs/threads.rst diff --git a/docs/index.rst b/docs/index.rst index 9041cbcf..70396bb5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -116,7 +116,6 @@ The Usage Guide commands limitations-and-differences pipelines - threads pubsub readonly-mode diff --git a/docs/pipelines.rst b/docs/pipelines.rst index c092b604..942cd7f4 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -70,7 +70,18 @@ Packing Commands When issuing only a single command, there is only one network round trip to be made. But what if you issue 100 pipelined commands? In a single-instance redis configuration, you still only need to make one network hop. The commands are packed into a single request and the server responds with all the data for those requests in a single response. But with redis cluster, those keys could be spread out over many different nodes. -The client is responsible for figuring out which commands map to which nodes. Let's say for example that your 100 pipelined commands need to route to 3 different nodes? The first thing the client does is break out the commands that go to each node, so it only has 3 network requests to make instead of 100. +The client is responsible for figuring out which commands map to which nodes. Let's say for example that your 100 pipelined commands need to route to 3 different nodes? The first thing the client does is break out the commands that go to each node, so it only has 3 network requests to make instead of 100. + + +Parallel execution of pipeline +------------------------------ + +In older version of `redis-py-cluster`, there was a thread implementation that helped to increaes the performance of running pipelines by running the connections and execution of all commands to all nodes in the pipeline in paralell. This implementation was later removed in favor of a much simpler and faster implementation. + +In this new implementation we execute everything in the same thread, but we do all the writing to all sockets in order to each different server and then start to wait for them in sequence until all of them is complete. There is no real need to run them in parralell since we still have to wait for a thread join of all parralell executions before the code can continue, so we can wait in sequence for all of them to complete. This is not the absolute fastest implementation, but it much simpler to implement and maintain and cause less issues becuase there is no threads or other parallel ipmlementation that will use some overhead and add complexity to the method. + +This feature is implemented by default and will be used in all pipeline requests. + Transactions and WATCH diff --git a/docs/threads.rst b/docs/threads.rst deleted file mode 100644 index 790db8c7..00000000 --- a/docs/threads.rst +++ /dev/null @@ -1,57 +0,0 @@ -Threaded Pipeline -================= - -Redis cluster optionally supports parallel execution of pipelined commands to reduce latency of pipelined requests via threads. - - -Rationale ---------- - -When pipelining a bunch of commands to the cluster, many of the commands may be routed to different nodes in the cluster. The client-server design in redis-cluster dictates that the client communicates directly with each node in the cluster rather than treating each node as a homogenous group. - -The advantage to this design is that a smart client can communicate with the cluster with the same latency characteristics as it might communicate with a single-instance redis cluster. But only if the client can communicate with each node in parallel. - - - -Parallel network i/o using threads ----------------------------------- - -That's pretty good. But we are still issuing those 3 network requests in serial order. The code loops through each node and issues a request, then gets the response, then issues the next one. - -We improve the situation by using python threads, making each request in parallel over the network. Now we are only as slow as the slowest single request. - -### Disabling Threads -You can disable threaded execution either in the class constructor: - -.. code-block:: python - - r = rediscluster.RedisCluster( ... pipeline_use_threads=False) #true by default - pipe = r.pipeline() - -Or you can disable it on a case by case basis as you instantiate the pipeline object. - -.. code-block:: python - - pipe = r.pipeline(use_threads=False) - -The later example always overrides if explicitly set. Otherwise, it falls back on the value passed to the RedisCluster constructor. - - - -Footnote: Gevent ----------------- - -Python offers something even more lightweight and efficient than threads to perform tasks in parallel: GEVENT. - -You can read up more about gevent here: http://www.gevent.org/ - -If you want to try to get the benefits of gevent in redis-py-cluster, you can monkey patch your code with the following lines at the very beginning of your application: - -.. code-block:: python - - import os - os.environ["GEVENT_RESOLVER"] = "ares" - import gevent.monkey - gevent.monkey.patch_all() - -This will patch the python socket code, threaded libraries, and dns resolution into a single threaded application substituting coroutines for parallel threads. From 3fb29e90ff61aa71465431f70712e7aa4f4fb83c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:57:51 +0200 Subject: [PATCH 118/263] Update some text and reformat layout of some text blocks in index.rst --- docs/index.rst | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 70396bb5..5b330c91 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,8 +6,9 @@ Welcome to redis-py-cluster's documentation! ============================================ -This project is a port of `redis-rb-cluster` by antirez, with a lot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster. +This project is a port of `redis-rb-cluster` by antirez, with a lot of added functionality. +The original source can be found at https://github.com/antirez/redis-rb-cluster. The source code is `available on github`_. @@ -52,18 +53,29 @@ Small sample script that shows how to get started with RedisCluster. It can also >>> print(rc.get("foo")) 'bar' - .. note:: Python 3 - Since Python 3 changed to Unicode strings from Python 2's ASCII, the return type of *most* commands will be binary strings, unless the class is instantiated with the option ``decode_responses=True``. In this case, the responses will be Python 3 strings (Unicode). For the init argument `decode_responses`, when set to False, redis-py-cluster will not attempt to decode the responses it receives. In Python 3, this means the responses will be of type `bytes`. In Python 2, they will be native strings (`str`). If `decode_responses` is set to True, for Python 3 responses will be `str`, for Python 2 they will be `unicode`. + Since Python 3 changed to Unicode strings from Python 2's ASCII, the return type of *most* commands will be binary strings, + unless the class is instantiated with the option ``decode_responses=True``. + + In this case, the responses will be Python 3 strings (Unicode). + + For the init argument `decode_responses`, when set to False, redis-py-cluster will not attempt to decode the responses it receives. + + In Python 3, this means the responses will be of type `bytes`. In Python 2, they will be native strings (`str`). + + If `decode_responses` is set to True, for Python 3 responses will be `str`, for Python 2 they will be `unicode`. + + Dependencies & supported python versions ---------------------------------------- -- Python: redis >= `2.10.2`, <= `2.10.5` is required. - Older versions in the `2.10.x` series can work but using the latest one is allways recommended. +It is always recommended to use the latest version of the dependencies of this project. + +- Redis-py: 'redis>=3.0.0,<3.1.0' is required in this major version of this cluster lib. - Optional Python: hiredis >= `0.2.0`. Older versions might work but is not tested. -- A working Redis cluster based on version >= `3.0.0` is required. Only `3.0.x` releases is supported. +- A working Redis cluster based on version `>=3.0.0` is required. @@ -78,7 +90,13 @@ Supported python versions .. note:: Python 3.4.0 - A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python `3.4.0`. Because of this both `redis-py` and `redis-py-cluster` will not work when running with `3.4.0`. This lib has decided to block the lib from execution on `3.4.0` and you will get a exception when trying to import the code. The only solution is to use python `3.4.1` or some other higher minor version in the `3.4` series. + A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python `3.4.0`. + + Because of this both `redis-py` and `redis-py-cluster` will not work when running with `3.4.0`. + + This lib has decided to block the lib from execution on `3.4.0` and you will get a exception when trying to import the code. + + The only solution is to use python `3.4.1` or some other higher minor version in the `3.4` series. From f14e7735d8cd8d1c1adeb2415161cc86a3a00558 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 00:29:53 +0200 Subject: [PATCH 119/263] Add a better exception message to get_master_node_by_slot() in the case where a full cluster is not initialied yet. Fixes #288 --- rediscluster/connection.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 62a89c91..c8798740 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -308,7 +308,7 @@ def get_connection_by_slot(self, slot): try: return self.get_connection_by_node(self.get_node_by_slot(slot)) - except KeyError: + except (KeyError, RedisClusterException) as exc: return self.get_random_connection() def get_connection_by_node(self, node): @@ -331,7 +331,12 @@ def get_connection_by_node(self, node): def get_master_node_by_slot(self, slot): """ """ - return self.nodes.slots[slot][0] + try: + return self.nodes.slots[slot][0] + except KeyError as ke: + raise RedisClusterException('Slot "{slot}" not covered by the cluster. "skip_full_coverage_check={skip_full_coverage_check}"'.format( + slot=slot, skip_full_coverage_check=self.nodes._skip_full_coverage_check, + )) def get_node_by_slot(self, slot, *args, **kwargs): """ From d5a1b703b08ea6c3dd32cd654f9f087618b1e26c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 00:35:13 +0200 Subject: [PATCH 120/263] Add python 3.7 to the compatible python version classifier list --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 4d8fcca0..00d2d82f 100644 --- a/setup.py +++ b/setup.py @@ -60,6 +60,7 @@ 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Environment :: Web Environment', 'Operating System :: POSIX', 'License :: OSI Approved :: MIT License', From db5235dfa229469a98f721a1b536555afc103d93 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 00:37:00 +0200 Subject: [PATCH 121/263] Update default REDIS_VERSION in Makefile to be 5.0.5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0c2e1766..563494de 100644 --- a/Makefile +++ b/Makefile @@ -216,7 +216,7 @@ ifndef REDIS_TRIB_RB endif ifndef REDIS_VERSION - REDIS_VERSION=4.0.10 + REDIS_VERSION=5.0.5 endif export REDIS_CLUSTER_NODE1_CONF From c46913b169e5dc5df615024b8d3d7bcf34230bdd Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 00:51:29 +0200 Subject: [PATCH 122/263] Minor updates to index.rst to make text flow and read a bit better. Add some minor additional details to some parts to clearify some certain things. --- docs/index.rst | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 5b330c91..49e38c1a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,7 +10,7 @@ This project is a port of `redis-rb-cluster` by antirez, with a lot of added fun The original source can be found at https://github.com/antirez/redis-rb-cluster. -The source code is `available on github`_. +The source code for this project is `available on github`_. .. _available on github: http://github.com/grokzen/redis-py-cluster @@ -33,10 +33,12 @@ or from source code -Usage example +Basic usage example ------------- -Small sample script that shows how to get started with RedisCluster. It can also be found in the file `exmaples/basic.py` +Small sample script that shows how to get started with RedisCluster. It can also be found in the file `exmaples/basic.py`. + +Additional code examples of more advance functionality can be found in the `examples/` folder in the source code git repo. .. code-block:: python @@ -68,8 +70,8 @@ Small sample script that shows how to get started with RedisCluster. It can also -Dependencies & supported python versions ----------------------------------------- +Library Dependencies +-------------------- It is always recommended to use the latest version of the dependencies of this project. @@ -82,15 +84,19 @@ It is always recommended to use the latest version of the dependencies of this p Supported python versions ------------------------- -- 2.7 +Python versions should follow the same supported python versions as specificed by the upstream package `redis-py`, based on what major version(s) that is specified. + +If this library supports more then one major version line of `redis-py`, then the supported python versions must include the set of supported python versions by all major version lines. + +- 2.7.x - 3.4.1+ (See note) -- 3.5 -- 3.6 -- 3.7 +- 3.5.x +- 3.6.x +- 3.7.x .. note:: Python 3.4.0 - A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python `3.4.0`. + A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python itself in the version `3.4.0`. Because of this both `redis-py` and `redis-py-cluster` will not work when running with `3.4.0`. @@ -98,14 +104,16 @@ Supported python versions The only solution is to use python `3.4.1` or some other higher minor version in the `3.4` series. + When python `3.8.0` is released and when it is added to as a supported pythoon version, python 3.4.x will be removed from supported versions and this hard block will be removed from the source code. + -Regarding duplicate pypi and python naming ------------------------------------------- +Regarding duplicate package name on pypi +---------------------------------------- It has been found that the python module name that is used in this library (rediscluster) is already shared with a similar but older project. -This lib will not change the naming of the module to something else to prevent collisions between the libs. +This lib will `NOT` change the naming of the module to something else to prevent collisions between the libs. My reasoning for this is the following From a59784941c997792d2348d7242243042f6e0c50f Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 01:29:29 +0200 Subject: [PATCH 123/263] Write release notes for 2.0.0 release and added upgrade instructions for the release --- docs/release-notes.rst | 19 +++++++++++++++++++ docs/upgrading.rst | 14 ++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index e0c89498..c5579bdf 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,6 +1,25 @@ Release Notes ============= +2.0.0 (Aug 12, 2019) + +Specific changes to redis-py-cluster is mentioned below here. + + * Update entire code base to now support all redis-py version in the 3.0.x version line. Any future redis-py version will be supported at a later time. + * Major update to all tests to mirror the code of the same tests from redis-py + * Dropped support for the 2.10.6 redis-py release. + * Add pythoncodestyle lint validation check to travis-ci runs to check for proper linting before accepting PR:s + * Class StrictRedisCluster was renamed to RedisCluster + * Class StrictRedis has been removed to mirror upstream class structure + * Class StrictClusterPipeline was renamed to ClusterPipeline + * Fixed travis-ci tests not running properly on python 3.7 + * Fixed documentation regarding threads in pipelines + * Update lit of command callbacks and parsers. Added in "CLIENT ID" + * Removed custom implementation of SORT and revert back to use same-slot mechanism for that command. + * Added better exception message to get_master_node_by_slot command to help the user understand the error. + * Improved the exception object message parsing when running on python3 + + 1.3.6 (Nov 16, 2018) -------------------- diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 66d20c7d..fa7cedd5 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -3,6 +3,20 @@ Upgrading redis-py-cluster This document describes what must be done when upgrading between different versions to ensure that code still works. +1.3.x --> 2.0.0 +--------------- + +Redis-py upstream package dependency has now been updated to be any of the releases in the major version line 3.0.x. This means that you must upgrade your dependency from 2.10.6 to the latest version. Several internal components have been updated to reflect the code from 3.0.x. + +Class StrictRedisCluster was renamed to RedisCluster. All usages of this class must be updated. + +Class StrictRedis has been removed to mirror upstream class structure. + +Class StrictClusterPipeline was renamed to ClusterPipeline. + +Method SORT has been changed back to only allow to be executed if keys is in the same slot. No more client side parsing and handling of the keys and values. + + 1.3.2 --> Next Release ---------------------- From 50894a6225f120c5b7089d1f46c578af4bb766dc Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 14 Sep 2019 18:26:48 +0200 Subject: [PATCH 124/263] Add line to readme about stabilty of master branch --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index ce98a95d..1ac694be 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,8 @@ This project is a port of `redis-rb-cluster` by antirez, with alot of added func [![Build Status](https://travis-ci.org/Grokzen/redis-py-cluster.svg?branch=master)](https://travis-ci.org/Grokzen/redis-py-cluster) [![Coverage Status](https://coveralls.io/repos/Grokzen/redis-py-cluster/badge.png)](https://coveralls.io/r/Grokzen/redis-py-cluster) [![PyPI version](https://badge.fury.io/py/redis-py-cluster.svg)](http://badge.fury.io/py/redis-py-cluster) +The branch `master` will always contain the latest unstable/development code that has been merged from Pull Requests. Use the latest commit from master branch on your own risk, there is no guarantees of compatibility or stability of non tagged commits on the master branch. Only tagged releases on the `master` branch is considered stable for use. + # Documentation From 2fb0b1815901882749e62fa09d919756366a7914 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 14 Sep 2019 18:35:09 +0200 Subject: [PATCH 125/263] Update the requirements for redis-py to allow for any release up to and including the latest 3.3.x release of redis-py. Fixes #326 --- README.md | 2 +- docs/release-notes.rst | 4 ++++ requirements.txt | 2 +- setup.py | 2 +- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 1ac694be..213532fe 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ Latest stable release from pypi $ pip install redis-py-cluster ``` -This major version of `redis-py-cluster` supports `redis-py>=3.0.0,<3.1.0`. +This major version of `redis-py-cluster` supports `redis-py>=3.0.0,<3.4.0`. diff --git a/docs/release-notes.rst b/docs/release-notes.rst index c5579bdf..a9f630bb 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,6 +1,10 @@ Release Notes ============= +2.1.0 (xxx yy, 2019) + + * Updated redis-py compatbile version to support any version in the major version 3.0.x, 3.1.x, 3.2.x, 3.3.x. (#326) + 2.0.0 (Aug 12, 2019) Specific changes to redis-py-cluster is mentioned below here. diff --git a/requirements.txt b/requirements.txt index 53e431b5..82404bae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -redis>=3.0.0,<3.1.0 +redis>=3.0.0,<3.4.0 diff --git a/setup.py b/setup.py index 00d2d82f..b62ed733 100644 --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ url='http://github.com/grokzen/redis-py-cluster', license='MIT', install_requires=[ - 'redis>=3.0.0,<3.1.0' + 'redis>=3.0.0,<3.4.0' ], python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", extras_require={ From 17c4e068c5756f0b32f5cebaa3b48cf7f0f702b4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 14 Sep 2019 18:43:29 +0200 Subject: [PATCH 126/263] Add python 3.7 to travis-ci tests --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 8b0165c6..35cdc2ca 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ python: - "3.4" - "3.5" - "3.6" + - "3.7" - "nightly" services: - redis-server From 3e26a85cfbae79c56ee459f2f4746080c30beade Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 14 Sep 2019 18:47:56 +0200 Subject: [PATCH 127/263] Bump version of lib to 2.1.0 that will be the future package version --- rediscluster/__init__.py | 4 ++-- setup.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 7bcc442a..72798f6e 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -15,7 +15,7 @@ setattr(redis, "ClusterPipeline", ClusterPipeline) # Major, Minor, Fix version -__version__ = (2, 0, 0) +__version__ = (2, 1, 0) def int_or_str(value): try: @@ -24,7 +24,7 @@ def int_or_str(value): return value -__version__ = '2.0.0' +__version__ = '2.1.0' VERSION = tuple(map(int_or_str, __version__.split('.'))) if sys.version_info[0:3] == (3, 4, 0): diff --git a/setup.py b/setup.py index b62ed733..ffbc2408 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="2.0.0", + version="2.1.0", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, long_description_content_type="text/markdown", From 5f6b8ab65f32207ad27db1537bf7f269fdda4c8b Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 14 Sep 2019 18:58:36 +0200 Subject: [PATCH 128/263] Remove the usage of Token class. Fixed import error in connection.py. Travis-ci tests now works but still has plenty of errors --- rediscluster/client.py | 15 +++++++-------- rediscluster/nodemanager.py | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index ff77648c..dd8f0ee8 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -36,7 +36,6 @@ # 3rd party imports from redis import Redis from redis.client import list_or_args, parse_info -from redis.connection import Token from redis._compat import iteritems, basestring, izip, nativestr, long from redis.exceptions import RedisError, ResponseError, TimeoutError, DataError, ConnectionError, BusyLoadingError @@ -536,7 +535,7 @@ def cluster_failover(self, node_id, option): Sends to specefied node """ assert option.upper() in ('FORCE', 'TAKEOVER') # TODO: change this option handling - return self.execute_command('CLUSTER FAILOVER', Token(option)) + return self.execute_command('CLUSTER FAILOVER', option) def cluster_info(self): """ @@ -587,7 +586,7 @@ def cluster_reset(self, node_id, soft=True): Sends to specefied node """ - return self.execute_command('CLUSTER RESET', Token('SOFT' if soft else 'HARD'), node_id=node_id) + return self.execute_command('CLUSTER RESET', b'SOFT' if soft else b'HARD', node_id=node_id) def cluster_reset_all_nodes(self, soft=True): """ @@ -601,7 +600,7 @@ def cluster_reset_all_nodes(self, soft=True): return [ self.execute_command( 'CLUSTER RESET', - Token('SOFT' if soft else 'HARD'), + 'SOFT' if soft else 'HARD', node_id=node['id'], ) for node in self.cluster_nodes() @@ -637,9 +636,9 @@ def cluster_setslot(self, node_id, slot_id, state, bind_to_node_id=None): Sends to specefied node """ if state.upper() in ('IMPORTING', 'MIGRATING', 'NODE') and node_id is not None: - return self.execute_command('CLUSTER SETSLOT', slot_id, Token(state), node_id) + return self.execute_command('CLUSTER SETSLOT', slot_id, state, node_id) elif state.upper() == 'STABLE': - return self.execute_command('CLUSTER SETSLOT', slot_id, Token('STABLE')) + return self.execute_command('CLUSTER SETSLOT', slot_id, 'STABLE') else: raise RedisError('Invalid slot state: {0}'.format(state)) @@ -695,9 +694,9 @@ def scan_iter(self, match=None, count=None): pieces = ['SCAN', cursors[node]] if match is not None: - pieces.extend([Token('MATCH'), match]) + pieces.extend([b'MATCH', match]) if count is not None: - pieces.extend([Token('COUNT'), count]) + pieces.extend([b'COUNT', count]) conn.send_command(*pieces) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index e46bd6b1..90f95693 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -9,7 +9,7 @@ # 3rd party imports from redis import Redis -from redis._compat import unicode, bytes, long, basestring +from redis._compat import unicode, long, basestring from redis.connection import Encoder from redis import ConnectionError, TimeoutError, ResponseError From b13613344700799d71b01624a9273ea24a0beadb Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Sep 2019 21:27:43 +0200 Subject: [PATCH 129/263] Update test functions in test_pubsub.py to match latest redis-py code --- tests/test_pubsub.py | 128 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 113 insertions(+), 15 deletions(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 4d49d626..5d113769 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -216,6 +216,54 @@ def test_ignore_individual_subscribe_messages(self, r): assert message is None assert p.subscribed is False + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_sub_unsub_resub_channels(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'channel') + self._test_sub_unsub_resub(**kwargs) + + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_sub_unsub_resub_patterns(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') + self._test_sub_unsub_resub(**kwargs) + + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def _test_sub_unsub_resub(self, p, sub_type, unsub_type, sub_func, + unsub_func, keys): + # https://github.com/andymccurdy/redis-py/issues/764 + key = keys[0] + sub_func(key) + unsub_func(key) + sub_func(key) + assert p.subscribed is True + assert wait_for_message(p) == make_message(sub_type, key, 1) + assert wait_for_message(p) == make_message(unsub_type, key, 0) + assert wait_for_message(p) == make_message(sub_type, key, 1) + assert p.subscribed is True + + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_sub_unsub_all_resub_channels(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'channel') + self._test_sub_unsub_all_resub(**kwargs) + + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_sub_unsub_all_resub_patterns(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') + self._test_sub_unsub_all_resub(**kwargs) + + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def _test_sub_unsub_all_resub(self, p, sub_type, unsub_type, sub_func, + unsub_func, keys): + # https://github.com/andymccurdy/redis-py/issues/764 + key = keys[0] + sub_func(key) + unsub_func() + sub_func(key) + assert p.subscribed is True + assert wait_for_message(p) == make_message(sub_type, key, 1) + assert wait_for_message(p) == make_message(unsub_type, key, 0) + assert wait_for_message(p) == make_message(sub_type, key, 1) + assert p.subscribed is True + class TestPubSubMessages(object): """ @@ -237,8 +285,9 @@ def message_handler(self, message): def test_published_message_to_channel(self): node = self.get_strict_redis_node(7000) - p = node.pubsub(ignore_subscribe_messages=True) + p = node.pubsub() p.subscribe('foo') + assert wait_for_message(p) == make_message('subscribe', 'foo', 1) assert node.publish('foo', 'test message') == 1 @@ -271,9 +320,11 @@ def test_publish_message_to_channel_other_server(self): @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_published_message_to_pattern(self, r): - p = r.pubsub(ignore_subscribe_messages=True) + p = r.pubsub() p.subscribe('foo') p.psubscribe('f*') + assert wait_for_message(p) == make_message('subscribe', 'foo', 1) + assert wait_for_message(p) == make_message('psubscribe', 'f*', 2) # 1 to pattern, 1 to channel assert r.publish('foo', 'test message') == 2 @@ -294,6 +345,7 @@ def test_published_message_to_pattern(self, r): def test_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(foo=self.message_handler) + assert wait_for_message(p) is None assert r.publish('foo', 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('message', 'foo', 'test message') @@ -302,6 +354,7 @@ def test_channel_message_handler(self, r): def test_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{'f*': self.message_handler}) + assert wait_for_message(p) is None assert r.publish('foo', 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('pmessage', 'foo', 'test message', @@ -314,6 +367,8 @@ def test_unicode_channel_message_handler(self, r): channels = {channel: self.message_handler} print(channels) p.subscribe(**channels) + assert wait_for_message(p) is None + assert wait_for_message(p) is None assert r.publish(channel, 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('message', channel, 'test message') @@ -372,19 +427,23 @@ def test_pattern_subscribe_unsubscribe(self, o): assert wait_for_message(p) == self.make_message('punsubscribe', self.pattern, 0) - def test_channel_publish(self, o): - p = o.pubsub(ignore_subscribe_messages=True) + def test_channel_publish(self, r): + p = r.pubsub() p.subscribe(self.channel) - o.publish(self.channel, self.data) + assert wait_for_message(p) == self.make_message('subscribe', + self.channel, 1) + r.publish(self.channel, self.data) assert wait_for_message(p) == self.make_message('message', self.channel, self.data) @pytest.mark.xfail(reason="Pattern pubsub do not work currently") - def test_pattern_publish(self, o): - p = o.pubsub(ignore_subscribe_messages=True) + def test_pattern_publish(self, r): + p = r.pubsub() p.psubscribe(self.pattern) - o.publish(self.channel, self.data) + assert wait_for_message(p) == self.make_message('psubscribe', + self.pattern, 1) + r.publish(self.channel, self.data) assert wait_for_message(p) == self.make_message('pmessage', self.channel, self.data, @@ -399,6 +458,7 @@ def test_channel_message_handler(self, o): self.data) # test that we reconnected to the correct channel + self.message = None p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + 'new data' @@ -411,6 +471,7 @@ def test_channel_message_handler(self, o): def test_pattern_message_handler(self, o): p = o.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{self.pattern: self.message_handler}) + assert wait_for_message(p) is None o.publish(self.channel, self.data) assert wait_for_message(p) is None assert self.message == self.make_message('pmessage', self.channel, @@ -418,6 +479,7 @@ def test_pattern_message_handler(self, o): pattern=self.pattern) # test that we reconnected to the correct pattern + self.message = None p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + 'new data' @@ -478,25 +540,35 @@ def t_run(rc): print("Error: unable to start thread") -class TestPubSubPubSubSubcommands(object): +class TestPubSubSubcommands(object): @skip_if_server_version_lt('2.8.0') @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_channels(self, r): - p = r.pubsub(ignore_subscribe_messages=True) + p = r.pubsub() p.subscribe('foo', 'bar', 'baz', 'quux') + for i in range(4): + assert wait_for_message(p)['type'] == 'subscribe' channels = sorted(r.pubsub_channels()) - assert channels == [b'bar', b'baz', b'foo', b'quux'] + # assert channels == [b'bar', b'baz', b'foo', b'quux'] + if channels != [b'bar', b'baz', b'foo', b'quux']: + import pdb + pdb.set_trace() @skip_if_server_version_lt('2.8.0') @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numsub(self, r): - p1 = r.pubsub(ignore_subscribe_messages=True) + p1 = r.pubsub() p1.subscribe('foo', 'bar', 'baz') - p2 = r.pubsub(ignore_subscribe_messages=True) + for i in range(3): + assert wait_for_message(p1)['type'] == 'subscribe' + p2 = r.pubsub() p2.subscribe('bar', 'baz') - p3 = r.pubsub(ignore_subscribe_messages=True) + for i in range(2): + assert wait_for_message(p2)['type'] == 'subscribe' + p3 = r.pubsub() p3.subscribe('baz') + assert wait_for_message(p3)['type'] == 'subscribe' channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)] assert channels == r.pubsub_numsub('foo', 'bar', 'baz') @@ -504,8 +576,10 @@ def test_pubsub_numsub(self, r): @skip_if_server_version_lt('2.8.0') @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numpat(self, r): - p = r.pubsub(ignore_subscribe_messages=True) + p = r.pubsub() p.psubscribe('*oo', '*ar', 'b*z') + for i in range(3): + assert wait_for_message(p)['type'] == 'psubscribe' assert r.pubsub_numpat() == 3 @@ -530,3 +604,27 @@ def test_send_pubsub_ping_message(self, r): assert wait_for_message(p) == make_message(type='pong', channel=None, data='hello world', pattern=None) + + +class TestPubSubConnectionKilled(object): + + @skip_if_server_version_lt('3.0.0') + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_connection_error_raised_when_connection_dies(self, r): + p = r.pubsub() + p.subscribe('foo') + assert wait_for_message(p) == make_message('subscribe', 'foo', 1) + for client in r.client_list(): + if client['cmd'] == 'subscribe': + r.client_kill_filter(_id=client['id']) + with pytest.raises(ConnectionError): + wait_for_message(p) + + +class TestPubSubTimeouts(object): + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_get_message_with_timeout_returns_none(self, r): + p = r.pubsub() + p.subscribe('foo') + assert wait_for_message(p) == make_message('subscribe', 'foo', 1) + assert p.get_message(timeout=0.01) is None From 1e1591f0742701fdf21676291a11491ea67b1338 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Sep 2019 21:31:56 +0200 Subject: [PATCH 130/263] Add the brand new test_multiprocessing.py test file --- tests/test_multiprocessing.py | 166 ++++++++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 tests/test_multiprocessing.py diff --git a/tests/test_multiprocessing.py b/tests/test_multiprocessing.py new file mode 100644 index 00000000..17bdcc26 --- /dev/null +++ b/tests/test_multiprocessing.py @@ -0,0 +1,166 @@ +import pytest +import multiprocessing +import contextlib + +import redis +from redis.connection import Connection, ConnectionPool +from redis.exceptions import ConnectionError + +from .conftest import _get_client + + +@contextlib.contextmanager +def exit_callback(callback, *args): + try: + yield + finally: + callback(*args) + + +class TestMultiprocessing(object): + # Test connection sharing between forks. + # See issue #1085 for details. + + # use a multi-connection client as that's the only type that is + # actuall fork/process-safe + @pytest.fixture() + def r(self, request): + return _get_client( + redis.Redis, + request=request, + single_connection_client=False) + + def test_close_connection_in_child(self): + """ + A connection owned by a parent and closed by a child doesn't + destroy the file descriptors so a parent can still use it. + """ + conn = Connection() + conn.send_command('ping') + assert conn.read_response() == b'PONG' + + def target(conn): + conn.send_command('ping') + assert conn.read_response() == b'PONG' + conn.disconnect() + + proc = multiprocessing.Process(target=target, args=(conn,)) + proc.start() + proc.join(3) + assert proc.exitcode is 0 + + # The connection was created in the parent but disconnected in the + # child. The child called socket.close() but did not call + # socket.shutdown() because it wasn't the "owning" process. + # Therefore the connection still works in the parent. + conn.send_command('ping') + assert conn.read_response() == b'PONG' + + def test_close_connection_in_parent(self): + """ + A connection owned by a parent is unusable by a child if the parent + (the owning process) closes the connection. + """ + conn = Connection() + conn.send_command('ping') + assert conn.read_response() == b'PONG' + + def target(conn, ev): + ev.wait() + # the parent closed the connection. because it also created the + # connection, the connection is shutdown and the child + # cannot use it. + with pytest.raises(ConnectionError): + conn.send_command('ping') + + ev = multiprocessing.Event() + proc = multiprocessing.Process(target=target, args=(conn, ev)) + proc.start() + + conn.disconnect() + ev.set() + + proc.join(3) + assert proc.exitcode is 0 + + @pytest.mark.parametrize('max_connections', [1, 2, None]) + def test_pool(self, max_connections): + """ + A child will create its own connections when using a pool created + by a parent. + """ + pool = ConnectionPool.from_url('redis://localhost', + max_connections=max_connections) + + conn = pool.get_connection('ping') + main_conn_pid = conn.pid + with exit_callback(pool.release, conn): + conn.send_command('ping') + assert conn.read_response() == b'PONG' + + def target(pool): + with exit_callback(pool.disconnect): + conn = pool.get_connection('ping') + assert conn.pid != main_conn_pid + with exit_callback(pool.release, conn): + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + + proc = multiprocessing.Process(target=target, args=(pool,)) + proc.start() + proc.join(3) + assert proc.exitcode is 0 + + # Check that connection is still alive after fork process has exited + # and disconnected the connections in its pool + conn = pool.get_connection('ping') + with exit_callback(pool.release, conn): + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + + @pytest.mark.parametrize('max_connections', [1, 2, None]) + def test_close_pool_in_main(self, max_connections): + """ + A child process that uses the same pool as its parent isn't affected + when the parent disconnects all connections within the pool. + """ + pool = ConnectionPool.from_url('redis://localhost', + max_connections=max_connections) + + conn = pool.get_connection('ping') + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + + def target(pool, disconnect_event): + conn = pool.get_connection('ping') + with exit_callback(pool.release, conn): + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + disconnect_event.wait() + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + + ev = multiprocessing.Event() + + proc = multiprocessing.Process(target=target, args=(pool, ev)) + proc.start() + + pool.disconnect() + ev.set() + proc.join(3) + assert proc.exitcode is 0 + + def test_redis_client(self, r): + "A redis client created in a parent can also be used in a child" + assert r.ping() is True + + def target(client): + assert client.ping() is True + del client + + proc = multiprocessing.Process(target=target, args=(r,)) + proc.start() + proc.join(3) + assert proc.exitcode is 0 + + assert r.ping() is True From b69f2f74f746fba59fa85e7bc68994307fd1b86d Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Sep 2019 21:35:39 +0200 Subject: [PATCH 131/263] Add new test method test_monitor.py --- tests/test_monitor.py | 58 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 tests/test_monitor.py diff --git a/tests/test_monitor.py b/tests/test_monitor.py new file mode 100644 index 00000000..09ec21bd --- /dev/null +++ b/tests/test_monitor.py @@ -0,0 +1,58 @@ +from __future__ import unicode_literals +from redis._compat import unicode + + +def wait_for_command(client, monitor, command): + # issue a command with a key name that's local to this process. + # if we find a command with our key before the command we're waiting + # for, something went wrong + key = '__REDIS-PY-%s__' % str(client.client_id()) + client.get(key) + while True: + monitor_response = monitor.next_command() + if command in monitor_response['command']: + return monitor_response + if key in monitor_response['command']: + return None + + +class TestPipeline(object): + def test_wait_command_not_found(self, r): + "Make sure the wait_for_command func works when command is not found" + with r.monitor() as m: + response = wait_for_command(r, m, 'nothing') + assert response is None + + def test_response_values(self, r): + with r.monitor() as m: + r.ping() + response = wait_for_command(r, m, 'PING') + assert isinstance(response['time'], float) + assert response['db'] == 9 + assert response['client_type'] in ('tcp', 'unix') + assert isinstance(response['client_address'], unicode) + assert isinstance(response['client_port'], unicode) + assert response['command'] == 'PING' + + def test_command_with_quoted_key(self, r): + with r.monitor() as m: + r.get('foo"bar') + response = wait_for_command(r, m, 'GET foo"bar') + assert response['command'] == 'GET foo"bar' + + def test_command_with_binary_data(self, r): + with r.monitor() as m: + byte_string = b'foo\x92' + r.get(byte_string) + response = wait_for_command(r, m, 'GET foo\\x92') + assert response['command'] == 'GET foo\\x92' + + def test_lua_script(self, r): + with r.monitor() as m: + script = 'return redis.call("GET", "foo")' + assert r.eval(script, 0) is None + response = wait_for_command(r, m, 'GET foo') + assert response['command'] == 'GET foo' + assert response['client_type'] == 'lua' + assert response['client_address'] == 'lua' + assert response['client_port'] == '' From 55e0497f259a50b3449f42b6d7c5f84f223b969c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Sep 2019 21:56:31 +0200 Subject: [PATCH 132/263] Add new test file test_encoding.py --- tests/test_encoding.py | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 tests/test_encoding.py diff --git a/tests/test_encoding.py b/tests/test_encoding.py new file mode 100644 index 00000000..3f430064 --- /dev/null +++ b/tests/test_encoding.py @@ -0,0 +1,69 @@ +from __future__ import unicode_literals +import pytest +import redis + +from redis._compat import unichr, unicode +from .conftest import _get_client + + +class TestEncoding(object): + @pytest.fixture() + def r(self, request): + return _get_client(redis.Redis, request=request, decode_responses=True) + + def test_simple_encoding(self, r): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + r['unicode-string'] = unicode_string + cached_val = r['unicode-string'] + assert isinstance(cached_val, unicode) + assert unicode_string == cached_val + + def test_list_encoding(self, r): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + result = [unicode_string, unicode_string, unicode_string] + r.rpush('a', *result) + assert r.lrange('a', 0, -1) == result + + +class TestEncodingErrors(object): + def test_ignore(self, request): + r = _get_client(redis.Redis, request=request, decode_responses=True, + encoding_errors='ignore') + r.set('a', b'foo\xff') + assert r.get('a') == 'foo' + + def test_replace(self, request): + r = _get_client(redis.Redis, request=request, decode_responses=True, + encoding_errors='replace') + r.set('a', b'foo\xff') + assert r.get('a') == 'foo\ufffd' + + +class TestCommandsAreNotEncoded(object): + @pytest.fixture() + def r(self, request): + return _get_client(redis.Redis, request=request, encoding='utf-16') + + def test_basic_command(self, r): + r.set('hello', 'world') + + +class TestInvalidUserInput(object): + def test_boolean_fails(self, r): + with pytest.raises(redis.DataError): + r.set('a', True) + + def test_none_fails(self, r): + with pytest.raises(redis.DataError): + r.set('a', None) + + def test_user_type_fails(self, r): + class Foo(object): + def __str__(self): + return 'Foo' + + def __unicode__(self): + return 'Foo' + + with pytest.raises(redis.DataError): + r.set('a', Foo()) From 98610a646c47c343dcdd0209b76bb83a39555929 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 22 Sep 2019 21:57:38 +0200 Subject: [PATCH 133/263] Add new test file test_lock.py --- tests/test_lock.py | 204 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 204 insertions(+) create mode 100644 tests/test_lock.py diff --git a/tests/test_lock.py b/tests/test_lock.py new file mode 100644 index 00000000..f92afeca --- /dev/null +++ b/tests/test_lock.py @@ -0,0 +1,204 @@ +import pytest +import time + +from redis.exceptions import LockError, LockNotOwnedError +from redis.client import Redis +from redis.lock import Lock +from .conftest import _get_client + + +class TestLock(object): + @pytest.fixture() + def r_decoded(self, request): + return _get_client(Redis, request=request, decode_responses=True) + + def get_lock(self, redis, *args, **kwargs): + kwargs['lock_class'] = Lock + return redis.lock(*args, **kwargs) + + def test_lock(self, r): + lock = self.get_lock(r, 'foo') + assert lock.acquire(blocking=False) + assert r.get('foo') == lock.local.token + assert r.ttl('foo') == -1 + lock.release() + assert r.get('foo') is None + + def test_lock_token(self, r): + lock = self.get_lock(r, 'foo') + assert lock.acquire(blocking=False, token='test') + assert r.get('foo') == b'test' + assert lock.local.token == b'test' + assert r.ttl('foo') == -1 + lock.release() + assert r.get('foo') is None + assert lock.local.token is None + + def test_locked(self, r): + lock = self.get_lock(r, 'foo') + assert lock.locked() is False + lock.acquire(blocking=False) + assert lock.locked() is True + lock.release() + assert lock.locked() is False + + def _test_owned(self, client): + lock = self.get_lock(client, 'foo') + assert lock.owned() is False + lock.acquire(blocking=False) + assert lock.owned() is True + lock.release() + assert lock.owned() is False + + lock2 = self.get_lock(client, 'foo') + assert lock.owned() is False + assert lock2.owned() is False + lock2.acquire(blocking=False) + assert lock.owned() is False + assert lock2.owned() is True + lock2.release() + assert lock.owned() is False + assert lock2.owned() is False + + def test_owned(self, r): + self._test_owned(r) + + def test_owned_with_decoded_responses(self, r_decoded): + self._test_owned(r_decoded) + + def test_competing_locks(self, r): + lock1 = self.get_lock(r, 'foo') + lock2 = self.get_lock(r, 'foo') + assert lock1.acquire(blocking=False) + assert not lock2.acquire(blocking=False) + lock1.release() + assert lock2.acquire(blocking=False) + assert not lock1.acquire(blocking=False) + lock2.release() + + def test_timeout(self, r): + lock = self.get_lock(r, 'foo', timeout=10) + assert lock.acquire(blocking=False) + assert 8 < r.ttl('foo') <= 10 + lock.release() + + def test_float_timeout(self, r): + lock = self.get_lock(r, 'foo', timeout=9.5) + assert lock.acquire(blocking=False) + assert 8 < r.pttl('foo') <= 9500 + lock.release() + + def test_blocking_timeout(self, r): + lock1 = self.get_lock(r, 'foo') + assert lock1.acquire(blocking=False) + lock2 = self.get_lock(r, 'foo', blocking_timeout=0.2) + start = time.time() + assert not lock2.acquire() + assert (time.time() - start) > 0.2 + lock1.release() + + def test_context_manager(self, r): + # blocking_timeout prevents a deadlock if the lock can't be acquired + # for some reason + with self.get_lock(r, 'foo', blocking_timeout=0.2) as lock: + assert r.get('foo') == lock.local.token + assert r.get('foo') is None + + def test_context_manager_raises_when_locked_not_acquired(self, r): + r.set('foo', 'bar') + with pytest.raises(LockError): + with self.get_lock(r, 'foo', blocking_timeout=0.1): + pass + + def test_high_sleep_raises_error(self, r): + "If sleep is higher than timeout, it should raise an error" + with pytest.raises(LockError): + self.get_lock(r, 'foo', timeout=1, sleep=2) + + def test_releasing_unlocked_lock_raises_error(self, r): + lock = self.get_lock(r, 'foo') + with pytest.raises(LockError): + lock.release() + + def test_releasing_lock_no_longer_owned_raises_error(self, r): + lock = self.get_lock(r, 'foo') + lock.acquire(blocking=False) + # manually change the token + r.set('foo', 'a') + with pytest.raises(LockNotOwnedError): + lock.release() + # even though we errored, the token is still cleared + assert lock.local.token is None + + def test_extend_lock(self, r): + lock = self.get_lock(r, 'foo', timeout=10) + assert lock.acquire(blocking=False) + assert 8000 < r.pttl('foo') <= 10000 + assert lock.extend(10) + assert 16000 < r.pttl('foo') <= 20000 + lock.release() + + def test_extend_lock_float(self, r): + lock = self.get_lock(r, 'foo', timeout=10.0) + assert lock.acquire(blocking=False) + assert 8000 < r.pttl('foo') <= 10000 + assert lock.extend(10.0) + assert 16000 < r.pttl('foo') <= 20000 + lock.release() + + def test_extending_unlocked_lock_raises_error(self, r): + lock = self.get_lock(r, 'foo', timeout=10) + with pytest.raises(LockError): + lock.extend(10) + + def test_extending_lock_with_no_timeout_raises_error(self, r): + lock = self.get_lock(r, 'foo') + assert lock.acquire(blocking=False) + with pytest.raises(LockError): + lock.extend(10) + lock.release() + + def test_extending_lock_no_longer_owned_raises_error(self, r): + lock = self.get_lock(r, 'foo', timeout=10) + assert lock.acquire(blocking=False) + r.set('foo', 'a') + with pytest.raises(LockNotOwnedError): + lock.extend(10) + + def test_reacquire_lock(self, r): + lock = self.get_lock(r, 'foo', timeout=10) + assert lock.acquire(blocking=False) + assert r.pexpire('foo', 5000) + assert r.pttl('foo') <= 5000 + assert lock.reacquire() + assert 8000 < r.pttl('foo') <= 10000 + lock.release() + + def test_reacquiring_unlocked_lock_raises_error(self, r): + lock = self.get_lock(r, 'foo', timeout=10) + with pytest.raises(LockError): + lock.reacquire() + + def test_reacquiring_lock_with_no_timeout_raises_error(self, r): + lock = self.get_lock(r, 'foo') + assert lock.acquire(blocking=False) + with pytest.raises(LockError): + lock.reacquire() + lock.release() + + def test_reacquiring_lock_no_longer_owned_raises_error(self, r): + lock = self.get_lock(r, 'foo', timeout=10) + assert lock.acquire(blocking=False) + r.set('foo', 'a') + with pytest.raises(LockNotOwnedError): + lock.reacquire() + + +class TestLockClassSelection(object): + def test_lock_class_argument(self, r): + class MyLock(object): + def __init__(self, *args, **kwargs): + + pass + lock = r.lock('foo', lock_class=MyLock) + assert type(lock) == MyLock From 6ee67ad7140b4727b0da229725d176242c654bce Mon Sep 17 00:00:00 2001 From: Claudio Freire Date: Tue, 1 Oct 2019 16:34:43 -0300 Subject: [PATCH 134/263] Fixed bug preventing reinitialization after fixing MOVED errors --- docs/release-notes.rst | 1 + rediscluster/nodemanager.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index a9f630bb..b2c7b2e4 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -4,6 +4,7 @@ Release Notes 2.1.0 (xxx yy, 2019) * Updated redis-py compatbile version to support any version in the major version 3.0.x, 3.1.x, 3.2.x, 3.3.x. (#326) + * Fixed bug preventing reinitialization after getting MOVED errors 2.0.0 (Aug 12, 2019) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 90f95693..9e2d15aa 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -240,7 +240,7 @@ def initialize(self): self.reinitialize_counter = 0 def increment_reinitialize_counter(self, ct=1): - for i in range(1, ct): + for i in range(min(ct, self.reinitialize_steps)): self.reinitialize_counter += 1 if self.reinitialize_counter % self.reinitialize_steps == 0: self.initialize() From c6b770d16be84583869e43ad7a26e3758d9d2829 Mon Sep 17 00:00:00 2001 From: Kyle Hersey Date: Fri, 13 Sep 2019 20:14:30 +0000 Subject: [PATCH 135/263] fix(XREAD, XREADGROUP): fix key lookup for stream read commands --- rediscluster/client.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index dd8f0ee8..64188e43 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -10,6 +10,7 @@ # rediscluster imports from .connection import ( ClusterConnectionPool, + ClusterCrossSlotError, ClusterReadOnlyConnectionPool, ClusterWithReadReplicasConnectionPool, SSLClusterConnection, @@ -308,7 +309,17 @@ def _determine_slot(self, *args): keys = args[3: 3 + numkeys] slots = {self.connection_pool.nodes.keyslot(key) for key in keys} if len(slots) != 1: - raise RedisClusterException("{0} - all keys must map to the same key slot".format(command)) + raise ClusterCrossSlotError("Keys in request don't hash to the same slot") + return slots.pop() + + if command in ['XREADGROUP', 'XREAD']: + tokens = {args[i].value: i for i in range(len(args)) if type(args[i]) == Token} + keys_ids = list(args[tokens['STREAMS'] + 1: ]) + idx_split = len(keys_ids) // 2 + keys = keys_ids[: idx_split] + slots = {self.connection_pool.nodes.keyslot(key) for key in keys} + if len(slots) != 1: + raise ClusterCrossSlotError("Keys in request don't hash to the same slot") return slots.pop() key = args[1] From e1ef5ba59d5c3cd27d406758611c6b4d80288e96 Mon Sep 17 00:00:00 2001 From: Kyle Hersey Date: Fri, 13 Sep 2019 21:26:42 +0000 Subject: [PATCH 136/263] fix(tests): ClusterCrossSlotError => RedisClusterError to please travis --- rediscluster/client.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 64188e43..191148d3 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -10,7 +10,6 @@ # rediscluster imports from .connection import ( ClusterConnectionPool, - ClusterCrossSlotError, ClusterReadOnlyConnectionPool, ClusterWithReadReplicasConnectionPool, SSLClusterConnection, @@ -309,7 +308,7 @@ def _determine_slot(self, *args): keys = args[3: 3 + numkeys] slots = {self.connection_pool.nodes.keyslot(key) for key in keys} if len(slots) != 1: - raise ClusterCrossSlotError("Keys in request don't hash to the same slot") + raise RedisClusterException("{0} - all keys must map to the same key slot".format(command)) return slots.pop() if command in ['XREADGROUP', 'XREAD']: @@ -319,7 +318,7 @@ def _determine_slot(self, *args): keys = keys_ids[: idx_split] slots = {self.connection_pool.nodes.keyslot(key) for key in keys} if len(slots) != 1: - raise ClusterCrossSlotError("Keys in request don't hash to the same slot") + raise RedisClusterException("{0} - all keys must map to the same key slot".format(command)) return slots.pop() key = args[1] From f73cf42739203bbd3228976cd64a61655565ce25 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Thu, 14 Nov 2019 18:06:02 -0800 Subject: [PATCH 137/263] fixed some basic grammar and spelling --- CONTRIBUTING.md | 10 +++++----- README.md | 4 ++-- docs/commands.rst | 4 ++-- docs/disclaimer.rst | 4 ++-- docs/limitations-and-differences.rst | 8 ++++---- docs/pipelines.rst | 2 +- docs/upgrading.rst | 18 +++++++++--------- tests/test_cluster_connection_pool.py | 2 +- 8 files changed, 26 insertions(+), 26 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d3e51f8a..1fbfd164 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,13 +7,13 @@ All CI tests must pass (Travis-CI) Follow the code quality standards described in this file. -You are responsible for ensuring the code is mergable and fix any issues that can occur if other code was merged before your code. +You are responsible for ensuring the code is mergeable and fix any issues that can occur if other code was merged before your code. -Allways ensure docs is up to date based on your changes. If docs is missing and you think it should exists you are responsible to write it. +Always ensure docs is up to date based on your changes. If docs is missing and you think it should exists you are responsible to write it. For all PR you should do/include the following - A line about the change in the `CHANGES` file Add it in the section `Next release`, create it if needed. - - If you change something already implemented, for example add/remove argument you should add a line in `docs/Upgrading.md` describing how to migrate existing code from the old to the new code. Add it in the section `Next release`, create it if needed. + - If you change something already implemented, for example adding/removing an argument you should add a line in `docs/Upgrading.md` describing how to migrate existing code from the old to the new code. Add it in the section `Next release`, create it if needed. - Add yourself to `docs/Authors` file (This is optional if you want) @@ -53,10 +53,10 @@ print("foobar {barfoo} {qwert}".format( I (Johan/Grokzen) have been allowed (by andymccurdy) explicitly to use all test code that already exists inside `redis-py` lib. If possible you should reuse code that exists in there. -All code should aim to have 100% test coverage. This is just a target and not a requirements. +All code should aim to have 100% test coverage. This is just a target and not a requirement. All new features must implement tests to show that it works as intended. -All implemented tests must pass on all supported python versions. List of supported versions can be found in the `README.md`. +All implemented tests must pass on all supported python versions. List of supported versions can be found in `README.md`. All tests should be assumed to work against the test environment that is implemented when running in `travis-ci`. Currently that means 6 nodes in the cluster, 3 masters, 3 slaves, using port `7000-7005` and the node on port `7000` must be accessible on `127.0.0.1` diff --git a/README.md b/README.md index 213532fe..a461e29f 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ This client provides a client for redis cluster that was added in redis 3.0. -This project is a port of `redis-rb-cluster` by antirez, with alot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster +This project is a port of `redis-rb-cluster` by antirez, with a lot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster [![Build Status](https://travis-ci.org/Grokzen/redis-py-cluster.svg?branch=master)](https://travis-ci.org/Grokzen/redis-py-cluster) [![Coverage Status](https://coveralls.io/repos/Grokzen/redis-py-cluster/badge.png)](https://coveralls.io/r/Grokzen/redis-py-cluster) [![PyPI version](https://badge.fury.io/py/redis-py-cluster.svg)](http://badge.fury.io/py/redis-py-cluster) @@ -18,7 +18,7 @@ This Readme contains a reduced version of the full documentation. Upgrading instructions between each released version can be found [here](docs/upgrading.rst) -Changelog for next release and all older releasess can be found [here](docs/release-notes.rst) +Changelog for next release and all older releases can be found [here](docs/release-notes.rst) diff --git a/docs/commands.rst b/docs/commands.rst index 42f26931..b618a423 100644 --- a/docs/commands.rst +++ b/docs/commands.rst @@ -1,7 +1,7 @@ Implemented commands ==================== -This will describe all changes that RedisCluster have done to make a command to work in a cluster environment. +This will describe all changes made in RedisCluster enable a command for a clustered environment. If a command is not listed here then the default implementation from `Redis` in the `redis-py` library is used. @@ -64,7 +64,7 @@ This following commands will be sent to the master nodes in the cluster. - script flush - the result is `True` if the command succeeds on all master nodes, else `False` - script exists - the result is an array of booleans. An entry is `True` only if the script exists on all the master nodes. -The following commands will be sent to the sever that matches the specefied key. +The following commands will be sent to the sever that matches the specified key. - hscan - hscan_iter diff --git a/docs/disclaimer.rst b/docs/disclaimer.rst index 02e61ce0..fcccf0d5 100644 --- a/docs/disclaimer.rst +++ b/docs/disclaimer.rst @@ -3,8 +3,8 @@ Disclaimer Both Redis cluster and redis-py-cluster is considered stable and production ready. -But this depends on what you are going to use clustering for. In the simple use cases with SET/GET and other single key functions there is not issues. If you require multi key functinoality or pipelines then you must be very careful when developing because they work slightly different from the normal redis server. +But this depends on what you are going to use clustering for. In the simple use cases with SET/GET and other single key functions there is not issues. If you require multi key functionality or pipelines then you must be very careful when developing because they work slightly different from the normal redis server. If you require advance features like pubsub or scripting, this lib and redis do not handle that kind of use-cases very well. You either need to develop a custom solution yourself or use a non clustered redis server for that. -Finally, this lib itself is very stable and i know of atleast 2 companies that use this in production with high loads and big cluster sizes. +Finally, this lib itself is very stable and I know of at least 2 companies that use this in production with high loads and big cluster sizes. diff --git a/docs/limitations-and-differences.rst b/docs/limitations-and-differences.rst index 7b37e62d..a2e78ee8 100644 --- a/docs/limitations-and-differences.rst +++ b/docs/limitations-and-differences.rst @@ -3,13 +3,13 @@ Limitations and differences This will compare against `redis-py` -There is alot of differences that have to be taken into consideration when using redis cluster. +There is a lot of differences that have to be taken into consideration when using redis cluster. -Any method that can operate on multiple keys have to be reimplemented in the client and in some cases that is not possible to do. In general any method that is overriden in RedisCluster have lost the ability of being atomic. +Any method that can operate on multiple keys have to be reimplemented in the client and in some cases that is not possible to do. In general any method that is overridden in RedisCluster have lost the ability of being atomic. -Pipelines do not work the same way in a cluster. In `Redis` it batch all commands so that they can be executed at the same time when requested. But with RedisCluster pipelines will send the command directly to the server when it is called, but it will still store the result internally and return the same data from .execute(). This is done so that the code still behaves like a pipeline and no code will break. A better solution will be implemented in the future. +Pipelines do not work the same way in a cluster. In `Redis` it batches all commands so that they can be executed at the same time when requested. But with RedisCluster pipelines will send the command directly to the server when it is called, but it will still store the result internally and return the same data from .execute(). This is done so that the code still behaves like a pipeline and no code will break. A better solution will be implemented in the future. -Alot of methods will behave very different when using RedisCluster. Some methods send the same request to all servers and return the result in another format then `Redis` do. Some methods is blocked because they do not work / is not implemented / is dangerous to use in redis cluster. +A lot of methods will behave very different when using RedisCluster. Some methods send the same request to all servers and return the result in another format than `Redis` does. Some methods are blocked because they do not work / are not implemented / are dangerous to use in redis cluster. Some of the commands are only partially supported when using RedisCluster. The commands ``zinterstore`` and ``zunionstore`` are only supported if all the keys map to the same key slot in the cluster. This can be achieved by namespacing related keys with a prefix followed by a bracketed common key. Example: diff --git a/docs/pipelines.rst b/docs/pipelines.rst index 1c0f7ac8..5ab9bdd8 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -42,7 +42,7 @@ After playing around with pipelines and thinking about possible solutions that c Why can't we reuse the pipeline code in `redis-py`? In short it is almost the same reason why code from the normal redis client can't be reused in a cluster environment and that is because of the slots system. Redis cluster consist of a number of slots that is distributed across a number of servers and each key belongs in one of these slots. -In the normal pipeline implementation in `redis-py` we can batch send all the commands and send them to the server at once, thus speeding up the code by not issuing many requests one after another. We can say that we have defined and guaranteed execution order becuase of this. +In the normal pipeline implementation in `redis-py` we can batch send all the commands and send them to the server at once, thus speeding up the code by not issuing many requests one after another. We can say that we have defined and guaranteed execution order because of this. One problem that appears when you want to do pipelines in a cluster environment is that you can't have guaranteed execution order in the same way as a single server pipeline. The problem is that because you can queue a command to any key, we will end up in most of the cases having to talk to 2 or more nodes in the cluster to execute the pipeline. The problem with that is that there is no single place/node/way to send the pipeline and redis will sort everything out by itself via some internal mechanisms. Because of that when we build a pipeline for a cluster we have to build several smaller pipelines that we each send to the designated node in the cluster. diff --git a/docs/upgrading.rst b/docs/upgrading.rst index fa7cedd5..18a93ca3 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -14,7 +14,7 @@ Class StrictRedis has been removed to mirror upstream class structure. Class StrictClusterPipeline was renamed to ClusterPipeline. -Method SORT has been changed back to only allow to be executed if keys is in the same slot. No more client side parsing and handling of the keys and values. +Method SORT has been changed back to only allow execution if keys are in the same slot. No more client side parsing and handling of the keys and values. 1.3.2 --> Next Release @@ -26,25 +26,25 @@ If you created the `StrictRedisCluster` (or `RedisCluster`) instance via the `fr 1.3.1 --> 1.3.2 --------------- -If your redis instance is configured to not have the `CONFIG ...` commands enabled due to security reasons you need to pass this into the client object `skip_full_coverage_check=True`. Benefits is that the client class no longer requires the `CONFIG ...` commands to be enabled on the server. Downsides is that you can't use the option in your redis server and still use the same feature in this client. +If your redis instance is configured to not have the `CONFIG ...` commands enabled due to security reasons you need to pass this into the client object `skip_full_coverage_check=True`. Benefits are that the client class no longer requires the `CONFIG ...` commands to be enabled on the server. A downside is that you can't use the option in your redis server and still use the same feature in this client. 1.3.0 --> 1.3.1 --------------- -Method `scan_iter` was rebuilt becuase it was broken and did not perform as expected. If you are using this method you should be carefull with this new implementation and test it through before using it. The expanded testing for that method indicates it should work without problems. If you find any issues with the new method please open a issue on github. +Method `scan_iter` was rebuilt because it was broken and did not perform as expected. If you are using this method you should be careful with this new implementation and test it through before using it. The expanded testing for that method indicates it should work without problems. If you find any issues with the new method please open a issue on github. -A major refactoring was performed in the pipeline system that improved error handling and reliability of execution. It also simplified the code alot to make it easier to understand and continue to develop in the future. Becuase of this major refactoring you should really test throuhg your pipeline code to ensure that none of your code is broken because of this refactoring. +A major refactoring was performed in the pipeline system that improved error handling and reliability of execution. It also simplified the code, making it easier to understand and to continue development in the future. Because of this major refactoring you should thoroughly test your pipeline code to ensure that none of your code is broken. 1.2.0 --> Next release ---------------------- -Class RedisClusterMgt has been removed. You should use the `CLUSTER ...` methods that exists in the `StrictRedisCluster` client class. +Class RedisClusterMgt has been removed. You should use the `CLUSTER ...` methods that exist in the `StrictRedisCluster` client class. -Method `cluster_delslots` changed argument specification from `self, node_id, *slots` to `self, *slots` and changed the behaviour of the method to now automatically determine the slot_id based on the current cluster structure and where each slot that you want to delete is loated. +Method `cluster_delslots` changed argument specification from `self, node_id, *slots` to `self, *slots` and changed the behaviour of the method to now automatically determine the slot_id based on the current cluster structure and where each slot that you want to delete is loaded. Method pfcount no longer has custom logic and exceptions to prevent CROSSSLOT errors. If method is used with different slots then a regular CROSSSLOT error (rediscluster.exceptions.ClusterCrossSlotError) will be returned. @@ -59,7 +59,7 @@ Also discontinue passing `use_threads` flag to the pipeline() method. In 1.1.0 and prior, you could use `pipeline_use_threads` flag to tell the client to perform queries to the different nodes in parallel via threads. We exposed this as a flag because using threads might have been risky and we wanted people to be able to disable it if needed. -With this release we figured out how to get parallelization of the commands without the need for threads. We write to all the nodes before reading from them, essentially multiplexing the connections (but without the need for complicated socket multiplexing). We found this approach to be faster and more scalable as more nodes are added to the cluster. +With this release we figured out how parallelize commands without the need for threads. We write to all the nodes before reading from them, essentially multiplexing the connections (but without the need for complicated socket multiplexing). We found this approach to be faster and more scalable as more nodes are added to the cluster. That means we don't need the `pipeline_use_threads` flag anymore, or the `use_threads` flag that could be passed into the instantiation of the pipeline object itself. @@ -99,9 +99,9 @@ Added optional `max_connections_per_node` parameter to `ClusterConnectionPool` w Reinitialize on `MOVED` errors will not run on every error but instead on every 25 error to avoid excessive cluster reinitialize when used in multiple threads and resharding at the same time. If you want to go back to the old behaviour with reinitialize on every error you should pass in `reinitialize_steps=1` to the client constructor. If you want to increase or decrease the intervall of this new behaviour you should set `reinitialize_steps` in the client constructor to a value that you want. -Pipelines in general have recieved alot of attention so if you are using pipelines in your code, ensure that you test the new code out alot before using it to make sure it still works as you expect. +Pipelines in general have received a lot of attention so if you are using pipelines in your code, ensure that you test the new code out a lot before using it to make sure it still works as you expect. -The entire client code should now be safer to use in a threaded environment. Some race conditions was found and have now been fixed and it should prevent the code from behaving wierd during reshard operations. +The entire client code should now be safer to use in a threaded environment. Some race conditions was found and have now been fixed and it should prevent the code from behaving weird during reshard operations. diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index c586d1c1..125d6b16 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -47,7 +47,7 @@ def get_pool(self, connection_kwargs=None, max_connections=None, max_connections def test_in_use_not_exists(self): """ - Test that if for some reason, the node that it tries to get the connectino for + Test that if for some reason, the node that it tries to get the connection for do not exists in the _in_use_connection variable. """ pool = self.get_pool() From 27392f931b9ac994ea5f48dad95328ce6fa1ffe6 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Thu, 14 Nov 2019 19:03:32 -0800 Subject: [PATCH 138/263] Update CONTRIBUTING.md --- CONTRIBUTING.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1fbfd164..dc4b6488 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,11 +7,11 @@ All CI tests must pass (Travis-CI) Follow the code quality standards described in this file. -You are responsible for ensuring the code is mergeable and fix any issues that can occur if other code was merged before your code. +You are responsible for ensuring the code is mergeable and for fixing any issues that can occur if other code was merged before your code. -Always ensure docs is up to date based on your changes. If docs is missing and you think it should exists you are responsible to write it. +Always ensure docs are up to date based on your changes. If docs are missing and you think it should exists you are responsible for writing it. -For all PR you should do/include the following +For all PRs you should do/include the following - A line about the change in the `CHANGES` file Add it in the section `Next release`, create it if needed. - If you change something already implemented, for example adding/removing an argument you should add a line in `docs/Upgrading.md` describing how to migrate existing code from the old to the new code. Add it in the section `Next release`, create it if needed. - Add yourself to `docs/Authors` file (This is optional if you want) From 8fd3632024455d8ea29799454810c9cf51125737 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 13 Jan 2020 15:40:39 +0100 Subject: [PATCH 139/263] Remove support for python 3.4 & 3.5. Added versions 3.8 to supported version --- setup.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index ffbc2408..665ddccf 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ install_requires=[ 'redis>=3.0.0,<3.4.0' ], - python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", + python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4, !=3.5", extras_require={ 'hiredis': [ "hiredis>=0.1.3", @@ -57,10 +57,9 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', 'Environment :: Web Environment', 'Operating System :: POSIX', 'License :: OSI Approved :: MIT License', From 3dec875a910e7d58789cb8cdc4c4f07d4d1e750e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 13 Jan 2020 15:48:02 +0100 Subject: [PATCH 140/263] Updated the supported python versions to now not support 3.4 & 3.5 and added 3.8 as supported. Updated some docs that was there for some of the now unsupported versions --- .travis.yml | 3 +-- docs/index.rst | 21 ++++----------------- docs/upgrading.rst | 6 ++++++ tox.ini | 24 +++++++++--------------- 4 files changed, 20 insertions(+), 34 deletions(-) diff --git a/.travis.yml b/.travis.yml index 35cdc2ca..7fd934a2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,10 +4,9 @@ language: python cache: pip python: - "2.7" - - "3.4" - - "3.5" - "3.6" - "3.7" + - "3.8" - "nightly" services: - redis-server diff --git a/docs/index.rst b/docs/index.rst index 49e38c1a..3721c6eb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -88,23 +88,10 @@ Python versions should follow the same supported python versions as specificed b If this library supports more then one major version line of `redis-py`, then the supported python versions must include the set of supported python versions by all major version lines. -- 2.7.x -- 3.4.1+ (See note) -- 3.5.x -- 3.6.x -- 3.7.x - -.. note:: Python 3.4.0 - - A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python itself in the version `3.4.0`. - - Because of this both `redis-py` and `redis-py-cluster` will not work when running with `3.4.0`. - - This lib has decided to block the lib from execution on `3.4.0` and you will get a exception when trying to import the code. - - The only solution is to use python `3.4.1` or some other higher minor version in the `3.4` series. - - When python `3.8.0` is released and when it is added to as a supported pythoon version, python 3.4.x will be removed from supported versions and this hard block will be removed from the source code. +- 2.7 +- 3.6 +- 3.7 +- 3.8 diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 18a93ca3..ffae3ef6 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -3,6 +3,12 @@ Upgrading redis-py-cluster This document describes what must be done when upgrading between different versions to ensure that code still works. +2.0.0 --> 2.1.0 +--------------- + +Python3 version must now be one of 3.6, 3.7, 3.8 + + 1.3.x --> 2.0.0 --------------- diff --git a/tox.ini b/tox.ini index 0c3125fa..5300e4d6 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ # install tox" and then run "tox" from this directory. [tox] -envlist = py27, py34, py35, py36, py37, hi27, hi34, hi35, hi36, hi37, flake8-py34, flake8-py27 +envlist = py27, py36, py37, py38, hi27, hi36, hi37, hi38, flake8-py34, flake8-py27 [testenv] deps = -r{toxinidir}/dev-requirements.txt @@ -16,18 +16,6 @@ deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 -[testenv:hi34] -basepython = python3.4 -deps = - -r{toxinidir}/dev-requirements.txt - hiredis == 0.2.0 - -[testenv:hi35] -basepython = python3.5 -deps = - -r{toxinidir}/dev-requirements.txt - hiredis == 0.2.0 - [testenv:hi36] basepython = python3.6 deps = @@ -40,8 +28,14 @@ deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 -[testenv:flake8-py34] -basepython= python3.4 +[testenv:hi38] +basepython = python3.8 +deps = + -r{toxinidir}/dev-requirements.txt + hiredis == 0.2.0 + +[testenv:flake8-py36] +basepython= python3.6 deps = flake8==2.2.5 commands = flake8 --show-source --exclude=.venv,.tox,dist,docs,build,.git --ignore=E501,E731,E402 . From 847d3778406c239606768c765bc578ca63260cdc Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 13 Jan 2020 16:24:41 +0100 Subject: [PATCH 141/263] Update tests for test_encoding.py file to match version in redis-py --- tests/test_encoding.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/test_encoding.py b/tests/test_encoding.py index 3f430064..05766d14 100644 --- a/tests/test_encoding.py +++ b/tests/test_encoding.py @@ -2,8 +2,10 @@ import pytest import redis +from rediscluster import RedisCluster + from redis._compat import unichr, unicode -from .conftest import _get_client +from .conftest import _get_client, _init_client class TestEncoding(object): @@ -18,7 +20,8 @@ def test_simple_encoding(self, r): assert isinstance(cached_val, unicode) assert unicode_string == cached_val - def test_list_encoding(self, r): + def test_list_encoding(self, request): + r = _init_client(request, cls=RedisCluster, decode_responses=True) unicode_string = unichr(3456) + 'abcd' + unichr(3421) result = [unicode_string, unicode_string, unicode_string] r.rpush('a', *result) @@ -27,14 +30,12 @@ def test_list_encoding(self, r): class TestEncodingErrors(object): def test_ignore(self, request): - r = _get_client(redis.Redis, request=request, decode_responses=True, - encoding_errors='ignore') + r = _init_client(request, cls=RedisCluster, decode_responses=True, encoding_errors='ignore') r.set('a', b'foo\xff') assert r.get('a') == 'foo' def test_replace(self, request): - r = _get_client(redis.Redis, request=request, decode_responses=True, - encoding_errors='replace') + r = _init_client(request, cls=RedisCluster, decode_responses=True, encoding_errors='replace') r.set('a', b'foo\xff') assert r.get('a') == 'foo\ufffd' From b1c87a3259f77a00ae9dce698236439868a3c911 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 13 Jan 2020 16:33:08 +0100 Subject: [PATCH 142/263] Update and fix broken tests for test_lock.py --- tests/test_lock.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/test_lock.py b/tests/test_lock.py index f92afeca..6a7f794b 100644 --- a/tests/test_lock.py +++ b/tests/test_lock.py @@ -1,10 +1,12 @@ import pytest import time +from rediscluster import RedisCluster + from redis.exceptions import LockError, LockNotOwnedError from redis.client import Redis from redis.lock import Lock -from .conftest import _get_client +from .conftest import _get_client, _init_client class TestLock(object): @@ -60,10 +62,12 @@ def _test_owned(self, client): assert lock.owned() is False assert lock2.owned() is False - def test_owned(self, r): + def test_owned(self, request): + r = _init_client(request, cls=RedisCluster, decode_responses=False) self._test_owned(r) - def test_owned_with_decoded_responses(self, r_decoded): + def test_owned_with_decoded_responses(self, request): + r_decoded = _init_client(request, cls=RedisCluster, decode_responses=True) self._test_owned(r_decoded) def test_competing_locks(self, r): From 98189eb2b1273b1c0b8bb1b0db8f9fe1d1ad004e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 21 Jan 2020 18:20:29 +0100 Subject: [PATCH 143/263] Use CaseInsenitiveDict object for response_callbacks in the client --- rediscluster/client.py | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 191148d3..799dfeb9 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -40,6 +40,33 @@ from redis.exceptions import RedisError, ResponseError, TimeoutError, DataError, ConnectionError, BusyLoadingError +class CaseInsensitiveDict(dict): + "Case insensitive dict implementation. Assumes string keys only." + + def __init__(self, data): + for k, v in iteritems(data): + self[k.upper()] = v + + def __contains__(self, k): + return super(CaseInsensitiveDict, self).__contains__(k.upper()) + + def __delitem__(self, k): + super(CaseInsensitiveDict, self).__delitem__(k.upper()) + + def __getitem__(self, k): + return super(CaseInsensitiveDict, self).__getitem__(k.upper()) + + def get(self, k, default=None): + return super(CaseInsensitiveDict, self).get(k.upper(), default) + + def __setitem__(self, k, v): + super(CaseInsensitiveDict, self).__setitem__(k.upper(), v) + + def update(self, data): + data = CaseInsensitiveDict(data) + super(CaseInsensitiveDict, self).update(data) + + class RedisCluster(Redis): """ If a command is implemented over the one in Redis then it requires some changes compared to @@ -218,7 +245,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non self.refresh_table_asap = False self.nodes_flags = self.__class__.NODES_FLAGS.copy() self.result_callbacks = self.__class__.RESULT_CALLBACKS.copy() - self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy() + self.response_callbacks = CaseInsensitiveDict(self.__class__.RESPONSE_CALLBACKS) self.response_callbacks = dict_merge(self.response_callbacks, self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS) self.read_from_replicas = read_from_replicas @@ -671,6 +698,9 @@ def cluster_slots(self): ########## # All methods that must have custom implementation + def client_kill_filter(self, _id=None, _type=None, addr=None, skipme=None): + raise NotImplementedError('Method not yet implemented') + def _parse_scan(self, response, **options): """ Borrowed from redis-py::client.py From c9a7eeadafd7a611a13d1596cfc9d329bc6312f8 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 21 Jan 2020 18:44:47 +0100 Subject: [PATCH 144/263] Update test_pubsub.py to the same code as redis-py. Blocked out all test methods as pubsub is not really supported in cluster mode and is discouraged to be used in a cluster environment. --- tests/test_pubsub.py | 123 ++++++++++++++++++++++++++++++------------- 1 file changed, 85 insertions(+), 38 deletions(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 5d113769..4b254b6d 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -19,7 +19,7 @@ from .conftest import _get_client from .conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt -def wait_for_message(pubsub, timeout=0.5, ignore_subscribe_messages=False): +def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False): now = time.time() timeout = now + timeout while now < timeout: @@ -60,7 +60,7 @@ def make_subscribe_test_data(pubsub, type): 'unsub_func': pubsub.punsubscribe, 'keys': ['f*', 'b*', 'uni' + unichr(4456) + '*'] } - assert False, 'invalid subscribe type: {0}'.format(type) + assert False, 'invalid subscribe type: %s' % type class TestPubSubSubscribeUnsubscribe(object): @@ -82,15 +82,17 @@ def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func, unsub_f i = len(keys) - 1 - i assert wait_for_message(p) == make_message(unsub_type, key, i) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_channel_subscribe_unsubscribe(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_subscribe_unsubscribe(**kwargs) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pattern_subscribe_unsubscribe(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_subscribe_unsubscribe(**kwargs) - def _test_resubscribe_on_reconnection(self, p, sub_type, sub_func, keys, *args, **kwargs): + def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): for key in keys: assert sub_func(key) is None @@ -105,7 +107,7 @@ def _test_resubscribe_on_reconnection(self, p, sub_type, sub_func, keys, *args, # note, we may not re-subscribe to channels in exactly the same order # so we have to do some extra checks to make sure we got them all messages = [] - for i, _ in enumerate(keys): + for i in range(len(keys)): messages.append(wait_for_message(p)) unique_channels = set() @@ -121,10 +123,12 @@ def _test_resubscribe_on_reconnection(self, p, sub_type, sub_func, keys, *args, for channel in unique_channels: assert channel in keys + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_resubscribe_to_channels_on_reconnection(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_resubscribe_on_reconnection(**kwargs) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_resubscribe_to_patterns_on_reconnection(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_resubscribe_on_reconnection(**kwargs) @@ -173,14 +177,17 @@ def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func, unsub_fun # now we're finally unsubscribed assert p.subscribed is False + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_subscribe_property_with_channels(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_subscribed_property(**kwargs) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_subscribe_property_with_patterns(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_subscribed_property(**kwargs) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_ignore_all_subscribe_messages(self, r): p = r.pubsub(ignore_subscribe_messages=True) @@ -198,6 +205,25 @@ def test_ignore_all_subscribe_messages(self, r): assert wait_for_message(p) is None assert p.subscribed is False + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_ignore_individual_subscribe_messages(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + + checks = ( + (p.subscribe, 'foo'), + (p.unsubscribe, 'foo'), + # (p.psubscribe, 'f*'), + # (p.punsubscribe, 'f*'), + ) + + assert p.subscribed is False + for func, channel in checks: + assert func(channel) is None + assert p.subscribed is True + assert wait_for_message(p) is None + assert p.subscribed is False + + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_ignore_individual_subscribe_messages(self, r): p = r.pubsub() @@ -227,8 +253,7 @@ def test_sub_unsub_resub_patterns(self, r): self._test_sub_unsub_resub(**kwargs) @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") - def _test_sub_unsub_resub(self, p, sub_type, unsub_type, sub_func, - unsub_func, keys): + def _test_sub_unsub_resub(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): # https://github.com/andymccurdy/redis-py/issues/764 key = keys[0] sub_func(key) @@ -251,8 +276,7 @@ def test_sub_unsub_all_resub_patterns(self, r): self._test_sub_unsub_all_resub(**kwargs) @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") - def _test_sub_unsub_all_resub(self, p, sub_type, unsub_type, sub_func, - unsub_func, keys): + def _test_sub_unsub_all_resub(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): # https://github.com/andymccurdy/redis-py/issues/764 key = keys[0] sub_func(key) @@ -283,22 +307,18 @@ def setup_method(self, *args): def message_handler(self, message): self.message = message - def test_published_message_to_channel(self): - node = self.get_strict_redis_node(7000) - p = node.pubsub() + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_published_message_to_channel(self, r): + p = r.pubsub() p.subscribe('foo') assert wait_for_message(p) == make_message('subscribe', 'foo', 1) - - assert node.publish('foo', 'test message') == 1 + assert r.publish('foo', 'test message') == 1 message = wait_for_message(p) assert isinstance(message, dict) assert message == make_message('message', 'foo', 'test message') - # Cleanup pubsub connections - p.close() - - @pytest.mark.xfail(reason="This test is buggy and fails randomly") + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_publish_message_to_channel_other_server(self): """ Test that pubsub still works across the cluster on different nodes @@ -318,7 +338,7 @@ def test_publish_message_to_channel_other_server(self): # Cleanup pubsub connections p.close() - @pytest.mark.xfail(reason="Pattern pubsub do not work currently") + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_published_message_to_pattern(self, r): p = r.pubsub() p.subscribe('foo') @@ -342,6 +362,7 @@ def test_published_message_to_pattern(self, r): assert message2 in expected assert message1 != message2 + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(foo=self.message_handler) @@ -350,7 +371,7 @@ def test_channel_message_handler(self, r): assert wait_for_message(p) is None assert self.message == make_message('message', 'foo', 'test message') - @pytest.mark.xfail(reason="Pattern pubsub do not work currently") + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{'f*': self.message_handler}) @@ -360,30 +381,38 @@ def test_pattern_message_handler(self, r): assert self.message == make_message('pmessage', 'foo', 'test message', pattern='f*') - @pytest.mark.xfail(reason="Pattern pubsub do not work currently") + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_unicode_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) channel = 'uni' + unichr(4456) + 'code' channels = {channel: self.message_handler} - print(channels) p.subscribe(**channels) assert wait_for_message(p) is None - assert wait_for_message(p) is None assert r.publish(channel, 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('message', channel, 'test message') - @pytest.mark.xfail(reason="Pattern pubsub do not work currently") + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_unicode_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) pattern = 'uni' + unichr(4456) + '*' channel = 'uni' + unichr(4456) + 'code' p.psubscribe(**{pattern: self.message_handler}) + assert wait_for_message(p) is None assert r.publish(channel, 'test message') == 1 assert wait_for_message(p) is None assert self.message == make_message('pmessage', channel, 'test message', pattern=pattern) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_get_message_without_subscribe(self, r): + p = r.pubsub() + with pytest.raises(RuntimeError) as info: + p.get_message() + expect = ('connection not set: ' + 'did you forget to call subscribe() or psubscribe()?') + assert expect in info.exconly() + class TestPubSubAutoDecoding(object): "These tests only validate that we get unicode values back" @@ -406,8 +435,13 @@ def setup_method(self, *args): def message_handler(self, message): self.message = message - def test_channel_subscribe_unsubscribe(self, o): - p = o.pubsub() + @pytest.fixture() + def r(self, request): + return _get_client(redis.Redis, request=request, decode_responses=True) + + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_channel_subscribe_unsubscribe(self, r): + p = r.pubsub() p.subscribe(self.channel) assert wait_for_message(p) == self.make_message('subscribe', self.channel, 1) @@ -416,9 +450,9 @@ def test_channel_subscribe_unsubscribe(self, o): assert wait_for_message(p) == self.make_message('unsubscribe', self.channel, 0) - @pytest.mark.xfail(reason="Pattern pubsub do not work currently") - def test_pattern_subscribe_unsubscribe(self, o): - p = o.pubsub() + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_pattern_subscribe_unsubscribe(self, r): + p = r.pubsub() p.psubscribe(self.pattern) assert wait_for_message(p) == self.make_message('psubscribe', self.pattern, 1) @@ -427,6 +461,7 @@ def test_pattern_subscribe_unsubscribe(self, o): assert wait_for_message(p) == self.make_message('punsubscribe', self.pattern, 0) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_channel_publish(self, r): p = r.pubsub() p.subscribe(self.channel) @@ -437,7 +472,7 @@ def test_channel_publish(self, r): self.channel, self.data) - @pytest.mark.xfail(reason="Pattern pubsub do not work currently") + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pattern_publish(self, r): p = r.pubsub() p.psubscribe(self.pattern) @@ -449,10 +484,12 @@ def test_pattern_publish(self, r): self.data, pattern=self.pattern) - def test_channel_message_handler(self, o): - p = o.pubsub(ignore_subscribe_messages=True) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_channel_message_handler(self, r): + p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(**{self.channel: self.message_handler}) - o.publish(self.channel, self.data) + assert wait_for_message(p) is None + r.publish(self.channel, self.data) assert wait_for_message(p) is None assert self.message == self.make_message('message', self.channel, self.data) @@ -462,17 +499,17 @@ def test_channel_message_handler(self, o): p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + 'new data' - o.publish(self.channel, new_data) + r.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('message', self.channel, new_data) - @pytest.mark.xfail(reason="Pattern pubsub do not work currently") - def test_pattern_message_handler(self, o): - p = o.pubsub(ignore_subscribe_messages=True) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_pattern_message_handler(self, r): + p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{self.pattern: self.message_handler}) assert wait_for_message(p) is None - o.publish(self.channel, self.data) + r.publish(self.channel, self.data) assert wait_for_message(p) is None assert self.message == self.make_message('pmessage', self.channel, self.data, @@ -483,12 +520,22 @@ def test_pattern_message_handler(self, o): p.connection.disconnect() assert wait_for_message(p) is None # should reconnect new_data = self.data + 'new data' - o.publish(self.channel, new_data) + r.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('pmessage', self.channel, new_data, pattern=self.pattern) + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") + def test_context_manager(self, r): + with r.pubsub() as pubsub: + pubsub.subscribe('foo') + assert pubsub.connection is not None + + assert pubsub.connection is None + assert pubsub.channels == {} + assert pubsub.patterns == {} + class TestPubSubRedisDown(object): From eeb19beac529ecdddf8aec99f51b9755958252bd Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 21 Jan 2020 19:49:18 +0100 Subject: [PATCH 145/263] Update and fix tests for test_multiprocessing.py Now properly uses cluster nodes and connection fetching methods to make tests pass --- tests/test_multiprocessing.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/test_multiprocessing.py b/tests/test_multiprocessing.py index 17bdcc26..e0c39c94 100644 --- a/tests/test_multiprocessing.py +++ b/tests/test_multiprocessing.py @@ -3,7 +3,7 @@ import contextlib import redis -from redis.connection import Connection, ConnectionPool +from rediscluster.connection import ClusterConnection, ClusterConnectionPool from redis.exceptions import ConnectionError from .conftest import _get_client @@ -35,7 +35,7 @@ def test_close_connection_in_child(self): A connection owned by a parent and closed by a child doesn't destroy the file descriptors so a parent can still use it. """ - conn = Connection() + conn = ClusterConnection(port=7000) conn.send_command('ping') assert conn.read_response() == b'PONG' @@ -61,7 +61,7 @@ def test_close_connection_in_parent(self): A connection owned by a parent is unusable by a child if the parent (the owning process) closes the connection. """ - conn = Connection() + conn = ClusterConnection(port=7000) conn.send_command('ping') assert conn.read_response() == b'PONG' @@ -89,10 +89,10 @@ def test_pool(self, max_connections): A child will create its own connections when using a pool created by a parent. """ - pool = ConnectionPool.from_url('redis://localhost', + pool = ClusterConnectionPool.from_url('redis://localhost:7000', max_connections=max_connections) - conn = pool.get_connection('ping') + conn = pool.get_random_connection() main_conn_pid = conn.pid with exit_callback(pool.release, conn): conn.send_command('ping') @@ -100,7 +100,7 @@ def test_pool(self, max_connections): def target(pool): with exit_callback(pool.disconnect): - conn = pool.get_connection('ping') + conn = pool.get_random_connection() assert conn.pid != main_conn_pid with exit_callback(pool.release, conn): assert conn.send_command('ping') is None @@ -113,7 +113,7 @@ def target(pool): # Check that connection is still alive after fork process has exited # and disconnected the connections in its pool - conn = pool.get_connection('ping') + conn = pool.get_random_connection() with exit_callback(pool.release, conn): assert conn.send_command('ping') is None assert conn.read_response() == b'PONG' @@ -124,15 +124,15 @@ def test_close_pool_in_main(self, max_connections): A child process that uses the same pool as its parent isn't affected when the parent disconnects all connections within the pool. """ - pool = ConnectionPool.from_url('redis://localhost', + pool = ClusterConnectionPool.from_url('redis://localhost:7000', max_connections=max_connections) - conn = pool.get_connection('ping') + conn = pool.get_random_connection() assert conn.send_command('ping') is None assert conn.read_response() == b'PONG' def target(pool, disconnect_event): - conn = pool.get_connection('ping') + conn = pool.get_random_connection() with exit_callback(pool.release, conn): assert conn.send_command('ping') is None assert conn.read_response() == b'PONG' From 9a04696ab04a2ee3548f0d57388fae37b7444007 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 21 Jan 2020 23:42:58 +0100 Subject: [PATCH 146/263] Fix all broken geo commands that was broken for byte strings in the expected result output --- tests/test_commands.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 6b50f73f..4c41fbc1 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1815,7 +1815,7 @@ def test_georadius(self, r): (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) - assert r.georadius('barcelona', 2.191, 41.433, 1000) == ['place1'] + assert r.georadius(b'barcelona', 2.191, 41.433, 1000) == [b'place1'] @skip_if_server_version_lt('3.2.0') def test_georadius_no_values(self, r): @@ -1831,8 +1831,8 @@ def test_georadius_units(self, r): (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) - assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km') ==\ - ['place1'] + assert r.georadius(b'barcelona', 2.191, 41.433, 1, unit='km') ==\ + [b'place1'] @skip_if_server_version_lt('3.2.0') def test_georadius_with(self, r): @@ -1845,17 +1845,17 @@ def test_georadius_with(self, r): # function. assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', withdist=True, withcoord=True, withhash=True) ==\ - [['place1', 0.0881, 3471609698139488, + [[b'place1', 0.0881, 3471609698139488, (2.19093829393386841, 41.43379028184083523)]] assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', withdist=True, withcoord=True) ==\ - [['place1', 0.0881, + [[b'place1', 0.0881, (2.19093829393386841, 41.43379028184083523)]] assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', withhash=True, withcoord=True) ==\ - [['place1', 3471609698139488, + [[b'place1', 3471609698139488, (2.19093829393386841, 41.43379028184083523)]] # test no values. @@ -1868,8 +1868,8 @@ def test_georadius_count(self, r): (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) - assert r.georadius('barcelona', 2.191, 41.433, 3000, count=1) ==\ - ['place1'] + assert r.georadius(b'barcelona', 2.191, 41.433, 3000, count=1) ==\ + [b'place1'] @skip_if_server_version_lt('3.2.0') def test_georadius_sort(self, r): @@ -1878,9 +1878,9 @@ def test_georadius_sort(self, r): r.geoadd('barcelona', *values) assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='ASC') ==\ - ['place1', 'place2'] + [b'place1', b'place2'] assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='DESC') ==\ - ['place2', 'place1'] + [b'place2', b'place1'] @skip_if_server_version_lt('3.2.0') def test_georadius_store(self, r): @@ -1917,15 +1917,15 @@ def test_georadiusmember(self, r): r.geoadd('barcelona', *values) assert r.georadiusbymember('barcelona', 'place1', 4000) ==\ - ['place2', 'place1'] - assert r.georadiusbymember('barcelona', 'place1', 10) == ['place1'] + [b'place2', b'place1'] + assert r.georadiusbymember('barcelona', 'place1', 10) == [b'place1'] assert r.georadiusbymember('barcelona', 'place1', 4000, withdist=True, withcoord=True, withhash=True) ==\ - [['place2', 3067.4157, 3471609625421029, + [[b'place2', 3067.4157, 3471609625421029, (2.187376320362091, 41.40634178640635)], - ['place1', 0.0, 3471609698139488, + [b'place1', 0.0, 3471609698139488, (2.1909382939338684, 41.433790281840835)]] @skip_if_server_version_lt('5.0.0') From 5bb3770ef21be67853ada10d5e754d154da02f19 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 22 Jan 2020 00:11:34 +0100 Subject: [PATCH 147/263] Update tests inside test_monitor.py to be same as upstream. Blocked out methods that is not yet working --- tests/test_monitor.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/test_monitor.py b/tests/test_monitor.py index 09ec21bd..42ed702b 100644 --- a/tests/test_monitor.py +++ b/tests/test_monitor.py @@ -1,5 +1,9 @@ from __future__ import unicode_literals from redis._compat import unicode +from .conftest import skip_if_server_version_lt + +# 3rd party imports +import pytest def wait_for_command(client, monitor, command): @@ -17,12 +21,16 @@ def wait_for_command(client, monitor, command): class TestPipeline(object): + @skip_if_server_version_lt('5.0.0') + @pytest.mark.xfail(reason="Monitor feature not yet implemented") def test_wait_command_not_found(self, r): "Make sure the wait_for_command func works when command is not found" with r.monitor() as m: response = wait_for_command(r, m, 'nothing') assert response is None + @skip_if_server_version_lt('5.0.0') + @pytest.mark.xfail(reason="Monitor feature not yet implemented") def test_response_values(self, r): with r.monitor() as m: r.ping() @@ -34,12 +42,16 @@ def test_response_values(self, r): assert isinstance(response['client_port'], unicode) assert response['command'] == 'PING' + @skip_if_server_version_lt('5.0.0') + @pytest.mark.xfail(reason="Monitor feature not yet implemented") def test_command_with_quoted_key(self, r): with r.monitor() as m: r.get('foo"bar') response = wait_for_command(r, m, 'GET foo"bar') assert response['command'] == 'GET foo"bar' + @skip_if_server_version_lt('5.0.0') + @pytest.mark.xfail(reason="Monitor feature not yet implemented") def test_command_with_binary_data(self, r): with r.monitor() as m: byte_string = b'foo\x92' @@ -47,6 +59,8 @@ def test_command_with_binary_data(self, r): response = wait_for_command(r, m, 'GET foo\\x92') assert response['command'] == 'GET foo\\x92' + @skip_if_server_version_lt('5.0.0') + @pytest.mark.xfail(reason="Monitor feature not yet implemented") def test_lua_script(self, r): with r.monitor() as m: script = 'return redis.call("GET", "foo")' From f485d8ecc2a2c22732787057112f38555b44ace7 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 24 Jan 2020 14:27:21 +0100 Subject: [PATCH 148/263] Commented out BlockingConnectionPool tests. This will be brought back into the fold in a future PR merge. Added some minor linting fixes to other code parts. --- tests/test_cluster_connection_pool.py | 30 +++++++++++----- tests/test_cluster_obj.py | 50 +++++++++++++-------------- 2 files changed, 47 insertions(+), 33 deletions(-) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 125d6b16..1553afdb 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -32,6 +32,18 @@ def __init__(self, host="localhost", port=7000, socket_timeout=None, **kwargs): self.socket_timeout = socket_timeout +def get_pool(connection_kwargs=None, max_connections=None, max_connections_per_node=None, connection_class=DummyConnection, init_slot_cache=True): + connection_kwargs = connection_kwargs or {} + pool = ClusterConnectionPool( + connection_class=connection_class, + max_connections=max_connections, + max_connections_per_node=max_connections_per_node, + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + init_slot_cache=init_slot_cache, + **connection_kwargs) + return pool + + class TestConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=None, max_connections_per_node=None, connection_class=DummyConnection, init_slot_cache=True): @@ -256,6 +268,7 @@ def test_get_node_by_slot_random(self): assert actual_ports == expected_ports +@pytest.mark.xfail(reason="Blocking connection pool is not supported in this cluster client") class TestBlockingConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20): connection_kwargs = connection_kwargs or {} @@ -554,17 +567,18 @@ def test_defaults(self): @pytest.mark.skipif(not ssl_available, reason="SSL not installed") def test_cert_reqs_options(self): + """ + rediss://[[username]:[password]]@localhost:6379/0 + """ import ssl - pool = redis.ConnectionPool.from_url('rediss://?ssl_cert_reqs=none') - assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE + pool = get_pool().from_url('rediss://localhost:7000?ssl_cert_reqs=none') + assert pool.get_random_connection().cert_reqs == ssl.CERT_NONE - pool = redis.ConnectionPool.from_url( - 'rediss://?ssl_cert_reqs=optional') - assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL + pool = get_pool().from_url('rediss://localhost:7000?ssl_cert_reqs=optional') + assert pool.get_random_connection().cert_reqs == ssl.CERT_OPTIONAL - pool = redis.ConnectionPool.from_url( - 'rediss://?ssl_cert_reqs=required') - assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED + pool = get_pool().from_url('rediss://localhost:7000?ssl_cert_reqs=required') + assert pool.get_random_connection().cert_reqs == ssl.CERT_REQUIRED class TestConnection(object): diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 02bbc8ff..fc1b6a1e 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -75,8 +75,8 @@ def test_blocked_strict_redis_args(): Some arguments should explicitly be blocked because they will not work in a cluster setup """ params = {'startup_nodes': [{'host': '127.0.0.1', 'port': 7000}]} - c = RedisCluster(**params) - assert c.connection_pool.connection_kwargs["socket_timeout"] == ClusterConnectionPool.RedisClusterDefaultTimeout + cluster = RedisCluster(**params) + assert cluster.connection_pool.connection_kwargs["socket_timeout"] == ClusterConnectionPool.RedisClusterDefaultTimeout with pytest.raises(RedisClusterException) as ex: _get_client(RedisCluster, db=1) @@ -105,10 +105,10 @@ def test_host_port_startup_node(): """ Test that it is possible to use host & port arguments as startup node args """ - h = "192.168.0.1" - p = 7000 - c = RedisCluster(host=h, port=p, init_slot_cache=False) - assert {"host": h, "port": p} in c.connection_pool.nodes.startup_nodes + host = "192.168.0.1" + port = 7000 + cluster = RedisCluster(host=host, port=port, init_slot_cache=False) + assert {"host": host, "port": port} in cluster.connection_pool.nodes.startup_nodes def test_empty_startup_nodes(): @@ -132,15 +132,19 @@ def test_custom_connectionpool(): """ Test that a custom connection pool will be used by RedisCluster """ - h = "192.168.0.1" - p = 7001 - pool = DummyConnectionPool(host=h, port=p, connection_class=DummyConnection, - startup_nodes=[{'host': h, 'port': p}], - init_slot_cache=False) - c = RedisCluster(connection_pool=pool, init_slot_cache=False) - assert c.connection_pool is pool - assert c.connection_pool.connection_class == DummyConnection - assert {"host": h, "port": p} in c.connection_pool.nodes.startup_nodes + host = "192.168.0.1" + port = 7001 + pool = DummyConnectionPool( + host=host, + port=port, + connection_class=DummyConnection, + startup_nodes=[{'host': host, 'port': port}], + init_slot_cache=False, + ) + cluster = RedisCluster(connection_pool=pool, init_slot_cache=False) + assert cluster.connection_pool is pool + assert cluster.connection_pool.connection_class == DummyConnection + assert {"host": host, "port": port} in cluster.connection_pool.nodes.startup_nodes @patch('rediscluster.nodemanager.Redis', new=MagicMock()) @@ -148,10 +152,10 @@ def test_skip_full_coverage_check(): """ Test if the cluster_require_full_coverage NodeManager method was not called with the flag activated """ - c = RedisCluster("192.168.0.1", 7001, init_slot_cache=False, skip_full_coverage_check=True) - c.connection_pool.nodes.cluster_require_full_coverage = MagicMock() - c.connection_pool.nodes.initialize() - assert not c.connection_pool.nodes.cluster_require_full_coverage.called + cluster = RedisCluster("192.168.0.1", 7001, init_slot_cache=False, skip_full_coverage_check=True) + cluster.connection_pool.nodes.cluster_require_full_coverage = MagicMock() + cluster.connection_pool.nodes.initialize() + assert not cluster.connection_pool.nodes.cluster_require_full_coverage.called def test_blocked_commands(r): @@ -303,9 +307,7 @@ def test_ask_redirection(): 'port': 7001, 'name': '127.0.0.1:7001' } - with patch.object(RedisCluster, - 'parse_response') as parse_response: - + with patch.object(RedisCluster, 'parse_response') as parse_response: host_ip = find_node_ip_based_on_port(r, '7001') def ask_redirect_effect(connection, *args, **options): @@ -332,9 +334,7 @@ def test_pipeline_ask_redirection(): Important thing to verify is that it tries to talk to the second node. """ r = get_mocked_redis_client(host="127.0.0.1", port=7000) - with patch.object(RedisCluster, - 'parse_response') as parse_response: - + with patch.object(RedisCluster, 'parse_response') as parse_response: def response(connection, *args, **options): def response(connection, *args, **options): def response(connection, *args, **options): From a9a3ee5e30de68b1d769492d00d0816a404e94d0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 19 Feb 2020 15:05:11 +0100 Subject: [PATCH 149/263] Update redis upstream requirement to allow up to any release in the 3.4.x track #fixes #355 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 665ddccf..1603c29c 100644 --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ url='http://github.com/grokzen/redis-py-cluster', license='MIT', install_requires=[ - 'redis>=3.0.0,<3.4.0' + 'redis>=3.0.0,<3.5.0' ], python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4, !=3.5", extras_require={ From 50af2683f33dcfc3266bcaf5c36ab8277451b942 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 19 Feb 2020 15:06:04 +0100 Subject: [PATCH 150/263] Update requirements.yml file to allow for any release in 3.4.x track --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 82404bae..debda9a9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -redis>=3.0.0,<3.4.0 +redis>=3.0.0,<3.5.0 From 7d519096013cbb8f2f681f152920322147d7d0f8 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 19 Feb 2020 15:07:48 +0100 Subject: [PATCH 151/263] Update readme with supported redis-py versions --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a461e29f..371b56bd 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ Latest stable release from pypi $ pip install redis-py-cluster ``` -This major version of `redis-py-cluster` supports `redis-py>=3.0.0,<3.4.0`. +This major version of `redis-py-cluster` supports `redis-py >=3.0.0, <3.5.0`. From db7666bf0144b4ef9d4438967547a109b8a4b4a9 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 2 Mar 2020 23:05:44 +0100 Subject: [PATCH 152/263] Add new ACL tests. Update conftest to use the new pytest helper methods. Solve issue with skip_unless_arch_bits pytest check. Minor updates to solve some test issues in several test methods. --- rediscluster/client.py | 2 +- tests/conftest.py | 41 +++++ tests/test_commands.py | 379 ++++++++++++++++++++++++++++++++++++----- 3 files changed, 378 insertions(+), 44 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 799dfeb9..71286766 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -246,7 +246,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non self.nodes_flags = self.__class__.NODES_FLAGS.copy() self.result_callbacks = self.__class__.RESULT_CALLBACKS.copy() self.response_callbacks = CaseInsensitiveDict(self.__class__.RESPONSE_CALLBACKS) - self.response_callbacks = dict_merge(self.response_callbacks, self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS) + self.response_callbacks = CaseInsensitiveDict(dict_merge(self.response_callbacks, self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS)) self.read_from_replicas = read_from_replicas @classmethod diff --git a/tests/conftest.py b/tests/conftest.py index 8d2166a3..1093a82a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,6 +20,40 @@ sys.path.insert(1, basepath) _REDIS_VERSIONS = {} +REDIS_INFO = {} + +default_redis_url = "redis://127.0.0.1:7001" + + +def pytest_addoption(parser): + parser.addoption( + '--redis-url', + default=default_redis_url, + action="store", + help="Redis connection string, defaults to `%(default)s`", + ) + + +def _get_info(redis_url): + """ + customized for a cluster environment + """ + client = RedisCluster.from_url(redis_url) + info = client.info() + for node_name, node_data in info.items(): + if '7001' in node_name: + info = node_data + client.connection_pool.disconnect() + return info + + +def pytest_sessionstart(session): + redis_url = session.config.getoption("--redis-url") + info = _get_info(redis_url) + version = info["redis_version"] + arch_bits = info["arch_bits"] + REDIS_INFO["version"] = version + REDIS_INFO["arch_bits"] = arch_bits def get_version(**kwargs): @@ -223,3 +257,10 @@ def mock_cluster_resp_slaves(request, **kwargs): "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 " "1447836789290 3 connected']") return _gen_cluster_mock_resp(r, response) + + +def skip_unless_arch_bits(arch_bits): + return pytest.mark.skipif( + REDIS_INFO["arch_bits"] != arch_bits, + reason="server is not {}-bit".format(arch_bits), + ) diff --git a/tests/test_commands.py b/tests/test_commands.py index 4c41fbc1..91f3f8b6 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -10,7 +10,7 @@ import rediscluster from rediscluster.exceptions import RedisClusterException, ClusterCrossSlotError from rediscluster.utils import dict_merge -from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt, skip_if_server_version_gte, skip_for_no_cluster_impl +from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt, skip_if_server_version_gte, skip_for_no_cluster_impl, skip_unless_arch_bits # 3rd party imports import pytest @@ -23,7 +23,7 @@ @pytest.fixture() def slowlog(request, r): - current_config = get_main_cluster_node_data(r.config_get()) + current_config = r.config_get() old_slower_than_value = current_config['slowlog-log-slower-than'] old_max_legnth_value = current_config['slowlog-max-len'] @@ -79,6 +79,9 @@ def test_response_callbacks(self, r): r['a'] = 'foo' assert r['a'] == 'static' + def test_case_insensitive_command_names(self, r): + assert r.response_callbacks['del'] == r.response_callbacks['DEL'] + class TestRedisCommands(object): @@ -87,7 +90,181 @@ def test_command_on_invalid_key_type(self, r): with pytest.raises(redis.ResponseError): r['a'] - # SERVER INFORMATION + # SERVER INFORMATION + @skip_if_server_version_lt('5.9.101') + def test_acl_cat_no_category(self, r): + categories = r.acl_cat() + assert isinstance(categories, list) + assert 'read' in categories + + @skip_if_server_version_lt('5.9.101') + def test_acl_cat_with_category(self, r): + commands = r.acl_cat('read') + assert isinstance(commands, list) + assert 'get' in commands + + @skip_if_server_version_lt('5.9.101') + def test_acl_deluser(self, r, request): + username = 'redis-py-user' + + def teardown(): + r.acl_deluser(username) + + request.addfinalizer(teardown) + + assert r.acl_deluser(username) == 0 + assert r.acl_setuser(username, enabled=False, reset=True) + assert r.acl_deluser(username) == 1 + + @skip_if_server_version_lt('5.9.101') + def test_acl_genpass(self, r): + password = r.acl_genpass() + assert isinstance(password, basestring) + + @skip_if_server_version_lt('5.9.101') + def test_acl_getuser_setuser(self, r, request): + username = 'redis-py-user' + + def teardown(): + r.acl_deluser(username) + request.addfinalizer(teardown) + + # test enabled=False + assert r.acl_setuser(username, enabled=False, reset=True) + assert r.acl_getuser(username) == { + 'categories': ['-@all'], + 'commands': [], + 'enabled': False, + 'flags': ['off'], + 'keys': [], + 'passwords': [], + } + + # test nopass=True + assert r.acl_setuser(username, enabled=True, reset=True, nopass=True) + assert r.acl_getuser(username) == { + 'categories': ['-@all'], + 'commands': [], + 'enabled': True, + 'flags': ['on', 'nopass'], + 'keys': [], + 'passwords': [], + } + + # test all args + assert r.acl_setuser(username, enabled=True, reset=True, + passwords=['+pass1', '+pass2'], + categories=['+set', '+@hash', '-geo'], + commands=['+get', '+mget', '-hset'], + keys=['cache:*', 'objects:*']) + acl = r.acl_getuser(username) + assert set(acl['categories']) == set(['-@all', '+@set', '+@hash']) + assert set(acl['commands']) == set(['+get', '+mget', '-hset']) + assert acl['enabled'] is True + assert acl['flags'] == ['on'] + assert set(acl['keys']) == set([b'cache:*', b'objects:*']) + assert len(acl['passwords']) == 2 + + # test reset=False keeps existing ACL and applies new ACL on top + assert r.acl_setuser(username, enabled=True, reset=True, + passwords=['+pass1'], + categories=['+@set'], + commands=['+get'], + keys=['cache:*']) + assert r.acl_setuser(username, enabled=True, + passwords=['+pass2'], + categories=['+@hash'], + commands=['+mget'], + keys=['objects:*']) + acl = r.acl_getuser(username) + assert set(acl['categories']) == set(['-@all', '+@set', '+@hash']) + assert set(acl['commands']) == set(['+get', '+mget']) + assert acl['enabled'] is True + assert acl['flags'] == ['on'] + assert set(acl['keys']) == set([b'cache:*', b'objects:*']) + assert len(acl['passwords']) == 2 + + # test removal of passwords + assert r.acl_setuser(username, enabled=True, reset=True, + passwords=['+pass1', '+pass2']) + assert len(r.acl_getuser(username)['passwords']) == 2 + assert r.acl_setuser(username, enabled=True, + passwords=['-pass2']) + assert len(r.acl_getuser(username)['passwords']) == 1 + + # Resets and tests that hashed passwords are set properly. + hashed_password = ('5e884898da28047151d0e56f8dc629' + '2773603d0d6aabbdd62a11ef721d1542d8') + assert r.acl_setuser(username, enabled=True, reset=True, + hashed_passwords=['+' + hashed_password]) + acl = r.acl_getuser(username) + assert acl['passwords'] == [hashed_password] + + # test removal of hashed passwords + assert r.acl_setuser(username, enabled=True, reset=True, + hashed_passwords=['+' + hashed_password], + passwords=['+pass1']) + assert len(r.acl_getuser(username)['passwords']) == 2 + assert r.acl_setuser(username, enabled=True, + hashed_passwords=['-' + hashed_password]) + assert len(r.acl_getuser(username)['passwords']) == 1 + + @skip_if_server_version_lt('5.9.101') + def test_acl_list(self, r, request): + username = 'redis-py-user' + + def teardown(): + r.acl_deluser(username) + request.addfinalizer(teardown) + + assert r.acl_setuser(username, enabled=False, reset=True) + users = r.acl_list() + assert 'user %s off -@all' % username in users + + @skip_if_server_version_lt('5.9.101') + def test_acl_setuser_categories_without_prefix_fails(self, r, request): + username = 'redis-py-user' + + def teardown(): + r.acl_deluser(username) + request.addfinalizer(teardown) + + with pytest.raises(exceptions.DataError): + r.acl_setuser(username, categories=['list']) + + @skip_if_server_version_lt('5.9.101') + def test_acl_setuser_commands_without_prefix_fails(self, r, request): + username = 'redis-py-user' + + def teardown(): + r.acl_deluser(username) + request.addfinalizer(teardown) + + with pytest.raises(exceptions.DataError): + r.acl_setuser(username, commands=['get']) + + @skip_if_server_version_lt('5.9.101') + def test_acl_setuser_add_passwords_and_nopass_fails(self, r, request): + username = 'redis-py-user' + + def teardown(): + r.acl_deluser(username) + request.addfinalizer(teardown) + + with pytest.raises(exceptions.DataError): + r.acl_setuser(username, passwords='+mypass', nopass=True) + + @skip_if_server_version_lt('5.9.101') + def test_acl_users(self, r): + users = r.acl_users() + assert isinstance(users, list) + assert len(users) > 0 + + @skip_if_server_version_lt('5.9.101') + def test_acl_whoami(self, r): + username = r.acl_whoami() + assert isinstance(username, basestring) + def test_client_list(self, r): clients = r.client_list() client_data = get_main_cluster_node_data(clients)[0] @@ -123,6 +300,83 @@ def test_client_setname(self, r): assert r.client_setname('redis_py_test') assert r.client_getname() == 'redis_py_test' + @skip_if_server_version_lt('2.6.9') + @skip_for_no_cluster_impl() + def test_client_kill(self, r, r2): + r.client_setname('redis-py-c1') + r2.client_setname('redis-py-c2') + clients = [client for client in r.client_list() + if client.get('name') in ['redis-py-c1', 'redis-py-c2']] + assert len(clients) == 2 + + clients_by_name = dict([(client.get('name'), client) + for client in clients]) + + client_addr = clients_by_name['redis-py-c2'].get('addr') + assert r.client_kill(client_addr) is True + + clients = [client for client in r.client_list() + if client.get('name') in ['redis-py-c1', 'redis-py-c2']] + assert len(clients) == 1 + assert clients[0].get('name') == 'redis-py-c1' + + @skip_if_server_version_lt('2.8.12') + @skip_for_no_cluster_impl() + def test_client_kill_filter_invalid_params(self, r): + # empty + with pytest.raises(exceptions.DataError): + r.client_kill_filter() + + # invalid skipme + with pytest.raises(exceptions.DataError): + r.client_kill_filter(skipme="yeah") + + # invalid type + with pytest.raises(exceptions.DataError): + r.client_kill_filter(_type="caster") + + @skip_if_server_version_lt('2.8.12') + @skip_for_no_cluster_impl() + def test_client_kill_filter_by_id(self, r, r2): + r.client_setname('redis-py-c1') + r2.client_setname('redis-py-c2') + clients = [client for client in r.client_list() + if client.get('name') in ['redis-py-c1', 'redis-py-c2']] + assert len(clients) == 2 + + clients_by_name = dict([(client.get('name'), client) + for client in clients]) + + client_2_id = clients_by_name['redis-py-c2'].get('id') + resp = r.client_kill_filter(_id=client_2_id) + assert resp == 1 + + clients = [client for client in r.client_list() + if client.get('name') in ['redis-py-c1', 'redis-py-c2']] + assert len(clients) == 1 + assert clients[0].get('name') == 'redis-py-c1' + + @skip_if_server_version_lt('2.8.12') + @skip_for_no_cluster_impl() + def test_client_kill_filter_by_addr(self, r, r2): + r.client_setname('redis-py-c1') + r2.client_setname('redis-py-c2') + clients = [client for client in r.client_list() + if client.get('name') in ['redis-py-c1', 'redis-py-c2']] + assert len(clients) == 2 + + clients_by_name = dict([(client.get('name'), client) + for client in clients]) + + client_2_addr = clients_by_name['redis-py-c2'].get('addr') + resp = r.client_kill_filter(addr=client_2_addr) + assert resp == 1 + + clients = [client for client in r.client_list() + if client.get('name') in ['redis-py-c1', 'redis-py-c2']] + assert len(clients) == 1 + assert clients[0].get('name') == 'redis-py-c1' + @skip_if_server_version_lt('2.6.9') @skip_for_no_cluster_impl() def test_client_list_after_client_setname(self, r): @@ -145,7 +399,6 @@ def test_config_get(self, r): def test_config_resetstat(self, r): r.ping() - prior_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) assert prior_commands_processed >= 1 r.config_resetstat() @@ -267,6 +520,7 @@ def test_bitcount(self, r): assert r.bitcount('a', -2, -1) == 2 assert r.bitcount('a', 1, 1) == 1 + # TODO: Move this method to a more generic solution/method that tests the blocked nodes flags feature def test_bitop_not_supported(self, r): """ Validate that the command is blocked in cluster mode and throws an Exception @@ -1005,7 +1259,6 @@ def test_spop(self, r): assert value in s assert r.smembers('a') == set(s) - {value} - @skip_if_server_version_lt('3.2.0') def test_spop_multi_value(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) @@ -1812,10 +2065,11 @@ def test_old_geopos_no_value(self, r): @skip_if_server_version_lt('3.2.0') def test_georadius(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ - (2.1873744593677, 41.406342043777, 'place2') + (2.1873744593677, 41.406342043777, b'\x80place2') r.geoadd('barcelona', *values) - assert r.georadius(b'barcelona', 2.191, 41.433, 1000) == [b'place1'] + assert r.georadius('barcelona', 2.191, 41.433, 1000) == [b'place1'] + assert r.georadius('barcelona', 2.187, 41.406, 1000) == [b'\x80place2'] @skip_if_server_version_lt('3.2.0') def test_georadius_no_values(self, r): @@ -1834,6 +2088,7 @@ def test_georadius_units(self, r): assert r.georadius(b'barcelona', 2.191, 41.433, 1, unit='km') ==\ [b'place1'] + @skip_unless_arch_bits(64) @skip_if_server_version_lt('3.2.0') def test_georadius_with(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ @@ -1913,17 +2168,17 @@ def test_georadius_store_dist(self, r): @skip_if_server_version_lt('3.2.0') def test_georadiusmember(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ - (2.1873744593677, 41.406342043777, 'place2') + (2.1873744593677, 41.406342043777, b'\x80place2') r.geoadd('barcelona', *values) assert r.georadiusbymember('barcelona', 'place1', 4000) ==\ - [b'place2', b'place1'] + [b'\x80place2', b'place1'] assert r.georadiusbymember('barcelona', 'place1', 10) == [b'place1'] assert r.georadiusbymember('barcelona', 'place1', 4000, withdist=True, withcoord=True, withhash=True) ==\ - [[b'place2', 3067.4157, 3471609625421029, + [[b'\x80place2', 3067.4157, 3471609625421029, (2.187376320362091, 41.40634178640635)], [b'place1', 0.0, 3471609698139488, (2.1909382939338684, 41.433790281840835)]] @@ -1984,7 +2239,7 @@ def test_xclaim(self, r): assert response == [] # read the group as consumer1 to initially claim the messages - r.xreadgroup(group, consumer1, streams={stream: 0}) + r.xreadgroup(group, consumer1, streams={stream: '>'}) # claim the message as consumer2 response = r.xclaim(stream, group, consumer2, @@ -1997,6 +2252,32 @@ def test_xclaim(self, r): min_idle_time=0, message_ids=(message_id,), justid=True) == [message_id] + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xclaim_trimmed(self, r): + # xclaim should not raise an exception if the item is not there + stream = 'stream' + group = 'group' + + r.xgroup_create(stream, group, id="$", mkstream=True) + + # add a couple of new items + sid1 = r.xadd(stream, {"item": 0}) + sid2 = r.xadd(stream, {"item": 0}) + + # read them from consumer1 + r.xreadgroup(group, 'consumer1', {stream: ">"}) + + # add a 3rd and trim the stream down to 2 items + r.xadd(stream, {"item": 3}, maxlen=2, approximate=False) + + # xclaim them from consumer2 + # the item that is still in the stream should be returned + item = r.xclaim(stream, group, 'consumer2', 0, [sid1, sid2]) + assert len(item) == 2 + assert item[0] == (None, None) + assert item[1][0] == sid2 + @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() def test_xdel(self, r): @@ -2070,7 +2351,7 @@ def test_xgroup_delconsumer(self, r): assert r.xgroup_delconsumer(stream, group, consumer) == 0 # read all messages from the group - r.xreadgroup(group, consumer, streams={stream: 0}) + r.xreadgroup(group, consumer, streams={stream: '>'}) # deleting the consumer should return 2 pending messages assert r.xgroup_delconsumer(stream, group, consumer) == 2 @@ -2114,15 +2395,17 @@ def test_xinfo_consumers(self, r): consumer1 = 'consumer1' consumer2 = 'consumer2' r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) r.xgroup_create(stream, group, 0) - r.xreadgroup(group, consumer1, streams={stream: 0}) - r.xreadgroup(group, consumer2, streams={stream: 0}) + r.xreadgroup(group, consumer1, streams={stream: '>'}, count=1) + r.xreadgroup(group, consumer2, streams={stream: '>'}) info = r.xinfo_consumers(stream, group) assert len(info) == 2 expected = [ {'name': consumer1.encode(), 'pending': 1}, - {'name': consumer2.encode(), 'pending': 0}, + {'name': consumer2.encode(), 'pending': 2}, ] # we can't determine the idle time, so just make sure it's an int @@ -2172,8 +2455,8 @@ def test_xpending(self, r): assert r.xpending(stream, group) == expected # read 1 message from the group with each consumer - r.xreadgroup(group, consumer1, streams={stream: 0}, count=1) - r.xreadgroup(group, consumer2, streams={stream: m1}, count=1) + r.xreadgroup(group, consumer1, streams={stream: '>'}, count=1) + r.xreadgroup(group, consumer2, streams={stream: '>'}, count=1) expected = { 'pending': 2, @@ -2198,13 +2481,14 @@ def test_xpending_range(self, r): r.xgroup_create(stream, group, 0) # xpending range on a group that has no consumers yet - assert r.xpending_range(stream, group) == [] + assert r.xpending_range(stream, group, min='-', max='+', count=5) == [] # read 1 message from the group with each consumer - r.xreadgroup(group, consumer1, streams={stream: 0}, count=1) - r.xreadgroup(group, consumer2, streams={stream: m1}, count=1) + r.xreadgroup(group, consumer1, streams={stream: '>'}, count=1) + r.xreadgroup(group, consumer2, streams={stream: '>'}, count=1) - response = r.xpending_range(stream, group) + response = r.xpending_range(stream, group, + min='-', max='+', count=5) assert len(response) == 2 assert response[0]['message_id'] == m1 assert response[0]['consumer'] == consumer1.encode() @@ -2291,7 +2575,7 @@ def test_xreadgroup(self, r): expected = [ [ - stream, + stream.encode(), [ get_stream_message(r, stream, m1), get_stream_message(r, stream, m2), @@ -2299,48 +2583,57 @@ def test_xreadgroup(self, r): ] ] # xread starting at 0 returns both messages - assert r.xreadgroup(group, consumer, streams={stream: 0}) == expected + assert r.xreadgroup(group, consumer, streams={stream: '>'}) == expected r.xgroup_destroy(stream, group) r.xgroup_create(stream, group, 0) expected = [ [ - stream, + stream.encode(), [ get_stream_message(r, stream, m1), ] ] ] - # xread starting at 0 and count=1 returns only the first message - assert r.xreadgroup(group, consumer, streams={stream: 0}, count=1) == \ - expected + # xread with count=1 returns only the first message + assert r.xreadgroup(group, consumer, + streams={stream: '>'}, count=1) == expected r.xgroup_destroy(stream, group) - r.xgroup_create(stream, group, 0) - expected = [ - [ - stream, - [ - get_stream_message(r, stream, m2), - ] - ] - ] - # xread starting at m1 returns only the second message - assert r.xreadgroup(group, consumer, streams={stream: m1}) == expected + # create the group using $ as the last id meaning subsequent reads + # will only find messages added after this + r.xgroup_create(stream, group, '$') + expected = [] + # xread starting after the last message returns an empty message list + assert r.xreadgroup(group, consumer, streams={stream: '>'}) == expected + + # xreadgroup with noack does not have any items in the PEL r.xgroup_destroy(stream, group) - r.xgroup_create(stream, group, 0) + r.xgroup_create(stream, group, '0') + assert len(r.xreadgroup(group, consumer, streams={stream: '>'}, + noack=True)[0][1]) == 2 + # now there should be nothing pending + assert len(r.xreadgroup(group, consumer, + streams={stream: '0'})[0][1]) == 0 - # xread starting at the last message returns an empty message list + r.xgroup_destroy(stream, group) + r.xgroup_create(stream, group, '0') + # delete all the messages in the stream expected = [ [ - stream, - [] + stream.encode(), + [ + (m1, {}), + (m2, {}), + ] ] ] - assert r.xreadgroup(group, consumer, streams={stream: m2}) == expected + r.xreadgroup(group, consumer, streams={stream: '>'}) + r.xtrim(stream, 0) + assert r.xreadgroup(group, consumer, streams={stream: '0'}) == expected @skip_if_server_version_lt('5.0.0') @skip_for_no_cluster_impl() From 347672b53ea5c4af9e828e28d76c0ea5269c3d58 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Fri, 15 Nov 2019 17:07:08 -0800 Subject: [PATCH 153/263] initial implementation --- rediscluster/connection.py | 136 +++++++++++++++++++++++++++++++++++-- 1 file changed, 132 insertions(+), 4 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 677bdace..08baf077 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -7,6 +7,7 @@ import threading from contextlib import contextmanager from itertools import chain +from queue import LifoQueue, Full, Empty # rediscluster imports from .nodemanager import NodeManager @@ -110,7 +111,7 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No :nodemanager_follow_cluster: The node manager will during initialization try the last set of nodes that it was operating on. This will allow the client to drift along side the cluster - if the cluster nodes move around alot. + if the cluster nodes move around a lot. """ if connection_class is None: connection_class = ClusterConnection @@ -180,7 +181,7 @@ def _checkpid(self): with self._check_lock: if self.pid == os.getpid(): # another thread already did the work while we waited - # on the lockself. + # on the lock. return self.disconnect() self.reset() @@ -230,7 +231,7 @@ def make_connection(self, node): self._created_connections_per_node[node['name']] += 1 connection = self.connection_class(host=node["host"], port=node["port"], **self.connection_kwargs) - # Must store node in the connection to make it eaiser to track + # Must store node in the connection to make it easier to track connection.node = node return connection @@ -281,7 +282,7 @@ def get_random_connection(self): """ Open new connection to random redis server. """ - # TODO: Should this open a new random connection or shuld it look if there is any + # TODO: Should this open a new random connection or should it look if there is any # open available connections and return that instead? for node in self.nodes.random_startup_node_ittr(): connection = self.get_connection_by_node(node) @@ -343,6 +344,133 @@ def get_node_by_slot(self, slot, *args, **kwargs): return self.get_master_node_by_slot(slot) +class ClusterBlockingConnectionPool(ClusterConnectionPool): + """ + Thread-safe blocking connection pool for Redis Cluster:: + + >>> from rediscluster.client import RedisCluster + >>> client = RedisCluster(connection_pool=ClusterBlockingConnectionPool()) + + It performs the same function as the default + ``:py:class: ~rediscluster.connection.ClusterConnectionPool`` implementation, in that, + it maintains a pool of reusable connections to a redis cluster that can be shared by + multiple redis clients (safely across threads if required). + + The difference is that, in the event that a client tries to get a + connection from the pool when all of connections are in use, rather than + raising a ``:py:class: ~rediscluster.exceptions.RedisClusterException`` (as the default + ``:py:class: ~rediscluster.connection.ClusterConnectionPool`` implementation does), it + makes the client wait ("blocks") for a specified number of seconds until + a connection becomes available. + + Use ``max_connections`` to increase / decrease the pool size:: + + >>> pool = ClusterBlockingConnectionPool(max_connections=10) + + Use ``timeout`` to tell it either how many seconds to wait for a connection + to become available, or to block forever: + + # Block forever. + >>> pool = ClusterBlockingConnectionPool(timeout=None) + + # Raise a ``ConnectionError`` after five seconds if a connection is + # not available. + >>> pool = ClusterBlockingConnectionPool(timeout=5) + """ + def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=None, + max_connections=None, max_connections_per_node=False, reinitialize_steps=None, + skip_full_coverage_check=False, nodemanager_follow_cluster=False, + timeout=20, queue_class=LifoQueue, **connection_kwargs): + + self.queue_class = queue_class + self.timeout = timeout + + super(ClusterBlockingConnectionPool, self).__init__( + startup_nodes=startup_nodes, + init_slot_cache=init_slot_cache, + connection_class=connection_class, + max_connections=max_connections, + max_connections_per_node=max_connections_per_node, + reinitialize_steps=reinitialize_steps, + skip_full_coverage_check=skip_full_coverage_check, + nodemanager_follow_cluster=nodemanager_follow_cluster, + **connection_kwargs + ) + + def reset(self): + self.pid = os.getpid() + self._check_lock = threading.Lock() + + # Create and fill up a thread safe queue with ``None`` values. + # We will use ``None`` to denote when to create a new connection rather than to reuse. + self.pool = self.queue_class(self.max_connections) + while True: + try: + self.pool.put_nowait(None) + except Full: + break + + # Keep a list of actual connection instances so that we can + # disconnect them later. + self._connections_by_node = {} # Dict(Node, Set) + + def make_connection(self, node): + """ Create a new connection """ + connection = self.connection_class(host=node["host"], port=node["port"], **self.connection_kwargs) + self._connections_by_node.setdefault(node["name"], set()).add(connection) + connection.node = node + return connection + + def get_connection(self, command_name, *keys, **options): + if command_name != "pubsub": + raise RedisClusterException("Only 'pubsub' commands can be used by get_connection()") + + channel = options.pop('channel', None) + + if not channel: + # find random startup node and try to get connection again + return self.get_random_connection() + + slot = self.nodes.keyslot(channel) + node = self.get_master_node_by_slot(slot) + + self._checkpid() + connection = None + try: + connection = self.pool.get(block=True, timeout=self.timeout) + except Empty: + # Note that this is not caught by the redis cluster client and will be + # raised unless handled by application code. + raise ConnectionError("No connection available") + + if connection is None: + connection = self.make_connection() + + return connection + + def release(self, connection): + """ + Releases the connection back to the pool + """ + self._checkpid() + if connection.pid != self.pid: + return + + # Put the connection back into the pool. + try: + self.pool.put_nowait(connection) + except Full: + # perhaps the pool has been reset() after a fork? regardless, + # we don't want this connection + pass + + def disconnect(self): + "Disconnects all connections in the pool." + for node in self._connections_by_node: + for connection in self._connections_by_node[node["name"]]: + connection.disconnect() + + class ClusterReadOnlyConnectionPool(ClusterConnectionPool): """ Readonly connection pool for rediscluster From 7731db5f94c7a982e4c874a7cd2b29ca5c09bf2b Mon Sep 17 00:00:00 2001 From: Alan Li Date: Fri, 15 Nov 2019 20:27:32 -0800 Subject: [PATCH 154/263] moved blocking queues to node level --- rediscluster/connection.py | 45 +++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 08baf077..2f73b2a3 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -8,6 +8,7 @@ from contextlib import contextmanager from itertools import chain from queue import LifoQueue, Full, Empty +from collections import defaultdict # rediscluster imports from .nodemanager import NodeManager @@ -397,27 +398,34 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No **connection_kwargs ) - def reset(self): - self.pid = os.getpid() - self._check_lock = threading.Lock() - + def blocking_pool_factory(self): # Create and fill up a thread safe queue with ``None`` values. # We will use ``None`` to denote when to create a new connection rather than to reuse. - self.pool = self.queue_class(self.max_connections) + pool = self.queue_class(self.max_connections_per_node) while True: try: - self.pool.put_nowait(None) + pool.put_nowait(None) except Full: break + return pool + + def reset(self): + self.pid = os.getpid() + self._check_lock = threading.Lock() + self.pool_by_node = defaultdict(self.blocking_pool_factory) # Keep a list of actual connection instances so that we can # disconnect them later. - self._connections_by_node = {} # Dict(Node, Set) + self._connections = [] def make_connection(self, node): """ Create a new connection """ + if len(self._connections) >= self.max_connections: + # todo ayl: sleep here? + raise RedisClusterException("Too many total connections to cluster") + connection = self.connection_class(host=node["host"], port=node["port"], **self.connection_kwargs) - self._connections_by_node.setdefault(node["name"], set()).add(connection) + self._connections.append(connection) connection.node = node return connection @@ -430,17 +438,25 @@ def get_connection(self, command_name, *keys, **options): if not channel: # find random startup node and try to get connection again return self.get_random_connection() + return self.get_connection_by_node( + self.get_master_node_by_slot( + self.nodes.keyslot(channel) + ) + ) - slot = self.nodes.keyslot(channel) - node = self.get_master_node_by_slot(slot) - + def get_connection_by_node(self, node): + """ + Get a connection by node + """ self._checkpid() connection = None try: - connection = self.pool.get(block=True, timeout=self.timeout) + connection = self.pool_by_node[node["name"]].get(block=True, timeout=self.timeout) except Empty: # Note that this is not caught by the redis cluster client and will be # raised unless handled by application code. + + # ``ConnectionError`` is raised when timeout is hit on the queue. raise ConnectionError("No connection available") if connection is None: @@ -458,7 +474,7 @@ def release(self, connection): # Put the connection back into the pool. try: - self.pool.put_nowait(connection) + self.pool_by_node[connection.node["name"]].put_nowait(connection) except Full: # perhaps the pool has been reset() after a fork? regardless, # we don't want this connection @@ -466,8 +482,7 @@ def release(self, connection): def disconnect(self): "Disconnects all connections in the pool." - for node in self._connections_by_node: - for connection in self._connections_by_node[node["name"]]: + for connection in self._connections: connection.disconnect() From b66cf3fad41cfa7f503c6f161c77ec0bd31026e7 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Sun, 17 Nov 2019 18:27:21 -0800 Subject: [PATCH 155/263] fixed max connection implementation --- rediscluster/connection.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 2f73b2a3..a36749c9 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -401,7 +401,7 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No def blocking_pool_factory(self): # Create and fill up a thread safe queue with ``None`` values. # We will use ``None`` to denote when to create a new connection rather than to reuse. - pool = self.queue_class(self.max_connections_per_node) + pool = self.queue_class(self.max_connections) while True: try: pool.put_nowait(None) @@ -409,10 +409,21 @@ def blocking_pool_factory(self): break return pool + def get_pool(self, node): + return self._pool_by_node[node["name"]] \ + if self.max_connections_per_node or node is None else self._group_pool + def reset(self): self.pid = os.getpid() self._check_lock = threading.Lock() - self.pool_by_node = defaultdict(self.blocking_pool_factory) + self._pool_by_node = None + self._group_pool = None + + # use conditional to minimize overhead in initializing queue + if self.max_connections_per_node: + self._pool_by_node = defaultdict(self.blocking_pool_factory) + else: + self._group_pool = self.blocking_pool_factory() # Keep a list of actual connection instances so that we can # disconnect them later. @@ -420,10 +431,6 @@ def reset(self): def make_connection(self, node): """ Create a new connection """ - if len(self._connections) >= self.max_connections: - # todo ayl: sleep here? - raise RedisClusterException("Too many total connections to cluster") - connection = self.connection_class(host=node["host"], port=node["port"], **self.connection_kwargs) self._connections.append(connection) connection.node = node @@ -451,7 +458,7 @@ def get_connection_by_node(self, node): self._checkpid() connection = None try: - connection = self.pool_by_node[node["name"]].get(block=True, timeout=self.timeout) + connection = self.get_pool(node=node).get(block=True, timeout=self.timeout) except Empty: # Note that this is not caught by the redis cluster client and will be # raised unless handled by application code. @@ -474,7 +481,7 @@ def release(self, connection): # Put the connection back into the pool. try: - self.pool_by_node[connection.node["name"]].put_nowait(connection) + self.get_pool(connection.node).put_nowait(connection) except Full: # perhaps the pool has been reset() after a fork? regardless, # we don't want this connection From 0dc1b288a004cb354f7a5699ca3ac5eb81385161 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Sun, 17 Nov 2019 20:27:34 -0800 Subject: [PATCH 156/263] added tests and fixed issue where connections from different nodes were being reused --- rediscluster/connection.py | 21 +++- tests/test_cluster_connection_pool.py | 160 +++++++++++++++++++++++++- 2 files changed, 176 insertions(+), 5 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index a36749c9..a066b78a 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -369,7 +369,7 @@ class ClusterBlockingConnectionPool(ClusterConnectionPool): >>> pool = ClusterBlockingConnectionPool(max_connections=10) Use ``timeout`` to tell it either how many seconds to wait for a connection - to become available, or to block forever: + to become available when accessing the queue, or to block forever: # Block forever. >>> pool = ClusterBlockingConnectionPool(timeout=None) @@ -457,8 +457,14 @@ def get_connection_by_node(self, node): """ self._checkpid() connection = None + connections_to_other_nodes = [] + pool = self.get_pool(node=node) try: - connection = self.get_pool(node=node).get(block=True, timeout=self.timeout) + connection = pool.get(block=True, timeout=self.timeout) + while connection is not None and connection.node != node: + connections_to_other_nodes.append(connection) + connection = self.get_pool(node=node).get(block=True, timeout=self.timeout) + except Empty: # Note that this is not caught by the redis cluster client and will be # raised unless handled by application code. @@ -466,6 +472,17 @@ def get_connection_by_node(self, node): # ``ConnectionError`` is raised when timeout is hit on the queue. raise ConnectionError("No connection available") + # Put all the connections belonging to other nodes back, + # disconnecting the ones we fail to return. + for idx, connection in enumerate(connections_to_other_nodes): + try: + pool.put(connection, timeout=self.timeout) + except Full: + for lost_connection in connections_to_other_nodes[idx:]: + self._connections.remove(lost_connection) + lost_connection.disconnect() + break + if connection is None: connection = self.make_connection() diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 1553afdb..a8677b51 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -8,7 +8,7 @@ # rediscluster imports from rediscluster.connection import ( - ClusterConnectionPool, ClusterReadOnlyConnectionPool, + ClusterConnectionPool, ClusterBlockingConnectionPool, ClusterReadOnlyConnectionPool, ClusterConnection, UnixDomainSocketConnection) from rediscluster.exceptions import RedisClusterException from tests.conftest import skip_if_server_version_lt @@ -108,7 +108,7 @@ def test_reuse_previously_released_connection(self): def test_repr_contains_db_info_tcp(self): """ - Note: init_slot_cache muts be set to false otherwise it will try to + Note: init_slot_cache must be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ connection_kwargs = {'host': 'localhost', 'port': 7000} @@ -120,7 +120,7 @@ def test_repr_contains_db_info_tcp(self): def test_repr_contains_db_info_unix(self): """ - Note: init_slot_cache muts be set to false otherwise it will try to + Note: init_slot_cache must be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ connection_kwargs = {'path': '/abc', 'db': 1} @@ -190,6 +190,160 @@ def test_master_node_by_slot(self): node['port'] = 7002 +class TestClusterBlockingConnectionPool(object): + def get_pool(self, connection_kwargs=None, max_connections=None, max_connections_per_node=None, + connection_class=DummyConnection, init_slot_cache=True, timeout=20): + connection_kwargs = connection_kwargs or {} + pool = ClusterBlockingConnectionPool( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + init_slot_cache=init_slot_cache, + connection_class=connection_class, + max_connections=max_connections, + max_connections_per_node=max_connections_per_node, + timeout=timeout, + **connection_kwargs) + return pool + + def test_connection_creation(self): + connection_kwargs = {'foo': 'bar', 'biz': 'baz'} + pool = self.get_pool(connection_kwargs=connection_kwargs) + connection = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + assert isinstance(connection, DummyConnection) + assert connection.kwargs == connection_kwargs + + def test_multiple_connections(self): + pool = self.get_pool() + c1 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + c2 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) + assert c1 != c2 + + def test_connection_pool_blocks_until_timeout(self): + "When out of connections, block for timeout seconds, then raise" + pool = self.get_pool(max_connections=1, timeout=0.1) + pool.get_connection("pubsub") + + start = time.time() + with pytest.raises(redis.ConnectionError): + pool.get_connection("pubsub") + # we should have waited at least 0.1 seconds + assert time.time() - start >= 0.1 + + def test_max_per_node_connection_pool_blocks_until_timeout(self): + "When out of connections, block for timeout seconds, then raise" + pool = self.get_pool(max_connections=1, max_connections_per_node=True, timeout=0.1) + pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + + start = time.time() + with pytest.raises(redis.ConnectionError): + pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + # we should have waited at least 0.1 seconds + assert time.time() - start >= 0.1 + + # shouldn't error out as this is another node + pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) + + start = time.time() + with pytest.raises(redis.ConnectionError): + pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) + # we should have waited at least 0.1 seconds + assert time.time() - start >= 0.1 + + def test_connection_pool_blocks_until_another_connection_released(self): + """ + When out of connections, block until another connection is released + to the pool + """ + pool = self.get_pool(max_connections=1, timeout=2) + c1 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + + def target(): + time.sleep(0.1) + pool.release(c1) + + Thread(target=target).start() + start = time.time() + pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + assert time.time() - start >= 0.1 + + def test_max_per_node_connection_pool_blocks_until_another_connection_released(self): + """ + When out of connections, block until another connection is released + to the pool + """ + pool = self.get_pool(max_connections=1, max_connections_per_node=True, timeout=2) + c1 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + + def release_connection_after_sleep(*args, **kwargs): + def inner(connection): + time.sleep(0.1) + pool.release(connection) + return inner + + Thread(target=release_connection_after_sleep(c1)).start() + start = time.time() + + # different node so should not block + c2 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) + assert time.time() - start < 0.1 + + pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + assert time.time() - start >= 0.1 + + Thread(target=release_connection_after_sleep(c2)).start() + start = time.time() + pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) + assert time.time() - start >= 0.1 + + def test_reuse_previously_released_connection(self): + # Test that behaviour for re-use is the same whatever metric we block on. + def reuse_previously_released_connections_test(enable_max_connections_per_node): + pool = self.get_pool(max_connections_per_node=enable_max_connections_per_node) + c1 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + pool.release(c1) + + # release c2 back in and make sure c3 still picks the connection with the correct node + c2 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) + pool.release(c2) + + c3 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + c4 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) + c5 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) + + # ensure reuse + assert c1 == c3 + assert c2 == c4 + + # check that re-use policy is not naive + assert c5 != c1 # also expresses that c5 does not equal c3 + + reuse_previously_released_connections_test(enable_max_connections_per_node=True) + reuse_previously_released_connections_test(enable_max_connections_per_node=False) + + def test_repr_contains_db_info_tcp(self): + """ + Note: init_slot_cache must be set to false otherwise it will try to + query the test server for data and then it can't be predicted reliably + """ + connection_kwargs = {'host': 'localhost', 'port': 7000} + pool = self.get_pool(connection_kwargs=connection_kwargs, + connection_class=ClusterConnection, + init_slot_cache=False) + expected = 'ClusterBlockingConnectionPool>' + assert repr(pool) == expected + + def test_repr_contains_db_info_unix(self): + """ + Note: init_slot_cache must be set to false otherwise it will try to + query the test server for data and then it can't be predicted reliably + """ + connection_kwargs = {'path': '/abc', 'db': 1} + pool = self.get_pool(connection_kwargs=connection_kwargs, + connection_class=UnixDomainSocketConnection, + init_slot_cache=False) + expected = 'ClusterBlockingConnectionPool>' + assert repr(pool) == expected + + class TestReadOnlyConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=None, init_slot_cache=True, startup_nodes=None): startup_nodes = startup_nodes or [{'host': '127.0.0.1', 'port': 7000}] From a16199fc28fd3b73a78fa77359bf9fedacd9935d Mon Sep 17 00:00:00 2001 From: Alan Li Date: Mon, 18 Nov 2019 10:53:27 -0800 Subject: [PATCH 157/263] fixed issue with passing parameter to threaded function --- tests/test_cluster_connection_pool.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index a8677b51..648da5a9 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -273,13 +273,11 @@ def test_max_per_node_connection_pool_blocks_until_another_connection_released(s pool = self.get_pool(max_connections=1, max_connections_per_node=True, timeout=2) c1 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) - def release_connection_after_sleep(*args, **kwargs): - def inner(connection): - time.sleep(0.1) - pool.release(connection) - return inner + def release_connection_after_sleep(connection): + time.sleep(0.1) + pool.release(connection) - Thread(target=release_connection_after_sleep(c1)).start() + Thread(target=release_connection_after_sleep, args=(c1,)).start() start = time.time() # different node so should not block @@ -289,7 +287,7 @@ def inner(connection): pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000}) assert time.time() - start >= 0.1 - Thread(target=release_connection_after_sleep(c2)).start() + Thread(target=release_connection_after_sleep, args=(c2,)).start() start = time.time() pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001}) assert time.time() - start >= 0.1 From d24d961061cebd28f7b4725e5af959ab0670705b Mon Sep 17 00:00:00 2001 From: Alan Li Date: Mon, 18 Nov 2019 11:01:00 -0800 Subject: [PATCH 158/263] use local variable instead of property --- rediscluster/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index a066b78a..438463c0 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -463,7 +463,7 @@ def get_connection_by_node(self, node): connection = pool.get(block=True, timeout=self.timeout) while connection is not None and connection.node != node: connections_to_other_nodes.append(connection) - connection = self.get_pool(node=node).get(block=True, timeout=self.timeout) + connection = pool.get(block=True, timeout=self.timeout) except Empty: # Note that this is not caught by the redis cluster client and will be From 5e51b3616f710385b1fe1370d5e323de10b1bbab Mon Sep 17 00:00:00 2001 From: Alan Li Date: Mon, 18 Nov 2019 11:40:59 -0800 Subject: [PATCH 159/263] fix issue when queue is full with connections from other nodes --- rediscluster/connection.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 438463c0..e950d807 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -466,11 +466,19 @@ def get_connection_by_node(self, node): connection = pool.get(block=True, timeout=self.timeout) except Empty: - # Note that this is not caught by the redis cluster client and will be - # raised unless handled by application code. - - # ``ConnectionError`` is raised when timeout is hit on the queue. - raise ConnectionError("No connection available") + # queue is full of connections to other nodes + if not self.max_connections_per_node and len(connections_to_other_nodes) == self.max_connections: + # is the earliest released / longest un-used connection + connection_to_clear = connections_to_other_nodes.pop() + self._connections.remove(connection_to_clear) + connection_to_clear.disconnect() + connection = None # get a new connection + else: + # Note that this is not caught by the redis cluster client and will be + # raised unless handled by application code. + + # ``ConnectionError`` is raised when timeout is hit on the queue. + raise ConnectionError("No connection available") # Put all the connections belonging to other nodes back, # disconnecting the ones we fail to return. From 70914fb4e62fccd596ff50708956b92f29ac3575 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Mon, 18 Nov 2019 11:44:38 -0800 Subject: [PATCH 160/263] remove redundant literal from conditional --- rediscluster/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index e950d807..293a5144 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -467,7 +467,7 @@ def get_connection_by_node(self, node): except Empty: # queue is full of connections to other nodes - if not self.max_connections_per_node and len(connections_to_other_nodes) == self.max_connections: + if len(connections_to_other_nodes) == self.max_connections: # is the earliest released / longest un-used connection connection_to_clear = connections_to_other_nodes.pop() self._connections.remove(connection_to_clear) From 16bd4267e5ed12fb8c774179ff8a0b7aa6ab5990 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Mon, 18 Nov 2019 18:01:29 -0800 Subject: [PATCH 161/263] fixed spacing issue --- rediscluster/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 293a5144..c38cb376 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -515,7 +515,7 @@ def release(self, connection): def disconnect(self): "Disconnects all connections in the pool." for connection in self._connections: - connection.disconnect() + connection.disconnect() class ClusterReadOnlyConnectionPool(ClusterConnectionPool): From 24d957c9997a1648fc3ba8fd7c2037fd337111d8 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Wed, 20 Nov 2019 13:33:26 -0800 Subject: [PATCH 162/263] Fixed errors caught by unit tests --- rediscluster/connection.py | 23 +++++++++++++---------- tests/test_cluster_connection_pool.py | 6 +++++- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index c38cb376..41fb8c99 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -416,14 +416,16 @@ def get_pool(self, node): def reset(self): self.pid = os.getpid() self._check_lock = threading.Lock() - self._pool_by_node = None - self._group_pool = None - # use conditional to minimize overhead in initializing queue - if self.max_connections_per_node: - self._pool_by_node = defaultdict(self.blocking_pool_factory) - else: - self._group_pool = self.blocking_pool_factory() + """ + We could use a conditional branch on ``max_connections_per_node`` to see which pool to initialize, + but ClusterConnectionPool calls ConnectionPool init which has no concept of ``max_connections_per_node``, + and also performs ``reset()``. This will lead to an attribute error. + + This could suggest removing inheritance from ConnectionPool, but initializing both should not add much overhead. + """ + self._pool_by_node = defaultdict(self.blocking_pool_factory) + self._group_pool = self.blocking_pool_factory() # Keep a list of actual connection instances so that we can # disconnect them later. @@ -456,6 +458,7 @@ def get_connection_by_node(self, node): Get a connection by node """ self._checkpid() + self.nodes.set_node_name(node) connection = None connections_to_other_nodes = [] pool = self.get_pool(node=node) @@ -482,9 +485,9 @@ def get_connection_by_node(self, node): # Put all the connections belonging to other nodes back, # disconnecting the ones we fail to return. - for idx, connection in enumerate(connections_to_other_nodes): + for idx, other_connection in enumerate(connections_to_other_nodes): try: - pool.put(connection, timeout=self.timeout) + pool.put_nowait(other_connection) except Full: for lost_connection in connections_to_other_nodes[idx:]: self._connections.remove(lost_connection) @@ -492,7 +495,7 @@ def get_connection_by_node(self, node): break if connection is None: - connection = self.make_connection() + connection = self.make_connection(node) return connection diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 648da5a9..1f61bf72 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -191,8 +191,12 @@ def test_master_node_by_slot(self): class TestClusterBlockingConnectionPool(object): - def get_pool(self, connection_kwargs=None, max_connections=None, max_connections_per_node=None, + def get_pool(self, connection_kwargs=None, max_connections=100, max_connections_per_node=None, connection_class=DummyConnection, init_slot_cache=True, timeout=20): + ''' + Setting max_connections default to 100 instead of None (which === 2**31) like in ClusterConnectionPool as + BlockingClusterConnectionPool takes time to setup a queue containing max_connections num of elements + ''' connection_kwargs = connection_kwargs or {} pool = ClusterBlockingConnectionPool( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], From c0a0765938d090493e957f45b9b97ef0097d6595 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Wed, 20 Nov 2019 13:37:26 -0800 Subject: [PATCH 163/263] fixed linter errors --- rediscluster/connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 41fb8c99..df27069c 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -418,10 +418,10 @@ def reset(self): self._check_lock = threading.Lock() """ - We could use a conditional branch on ``max_connections_per_node`` to see which pool to initialize, + We could use a conditional branch on ``max_connections_per_node`` to see which pool to initialize, but ClusterConnectionPool calls ConnectionPool init which has no concept of ``max_connections_per_node``, and also performs ``reset()``. This will lead to an attribute error. - + This could suggest removing inheritance from ConnectionPool, but initializing both should not add much overhead. """ self._pool_by_node = defaultdict(self.blocking_pool_factory) From 711d3a9367a899594565a24df4406fbd21796ef3 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Thu, 21 Nov 2019 15:56:54 -0800 Subject: [PATCH 164/263] update docstring style --- rediscluster/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index df27069c..f19c7e5f 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -516,7 +516,7 @@ def release(self, connection): pass def disconnect(self): - "Disconnects all connections in the pool." + """Disconnects all connections in the pool.""" for connection in self._connections: connection.disconnect() From 17e4b1c65b0831e58880731eab6500d43a0a3b8e Mon Sep 17 00:00:00 2001 From: Alan Li Date: Thu, 21 Nov 2019 16:03:20 -0800 Subject: [PATCH 165/263] fixed import location --- rediscluster/connection.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index f19c7e5f..ed0b82c8 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -7,7 +7,6 @@ import threading from contextlib import contextmanager from itertools import chain -from queue import LifoQueue, Full, Empty from collections import defaultdict # rediscluster imports @@ -19,7 +18,7 @@ ) # 3rd party imports -from redis._compat import nativestr +from redis._compat import nativestr, LifoQueue, Full, Empty from redis.client import dict_merge from redis.connection import ConnectionPool, Connection, DefaultParser, SSLConnection from redis.exceptions import ConnectionError From f4b2c46218b5664a21f302ec13fc2ec529d4b586 Mon Sep 17 00:00:00 2001 From: Alan Li Date: Mon, 2 Dec 2019 15:01:05 -0800 Subject: [PATCH 166/263] readability and safety changes --- rediscluster/connection.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index ed0b82c8..20535941 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -363,7 +363,7 @@ class ClusterBlockingConnectionPool(ClusterConnectionPool): makes the client wait ("blocks") for a specified number of seconds until a connection becomes available. - Use ``max_connections`` to increase / decrease the pool size:: + Use ``max_connections`` to set the pool size:: >>> pool = ClusterBlockingConnectionPool(max_connections=10) @@ -380,9 +380,8 @@ class ClusterBlockingConnectionPool(ClusterConnectionPool): def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=None, max_connections=None, max_connections_per_node=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, - timeout=20, queue_class=LifoQueue, **connection_kwargs): + timeout=20, **connection_kwargs): - self.queue_class = queue_class self.timeout = timeout super(ClusterBlockingConnectionPool, self).__init__( @@ -397,10 +396,10 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No **connection_kwargs ) - def blocking_pool_factory(self): + def _blocking_pool_factory(self): # Create and fill up a thread safe queue with ``None`` values. # We will use ``None`` to denote when to create a new connection rather than to reuse. - pool = self.queue_class(self.max_connections) + pool = LifoQueue(self.max_connections) while True: try: pool.put_nowait(None) @@ -408,7 +407,7 @@ def blocking_pool_factory(self): break return pool - def get_pool(self, node): + def _get_pool(self, node): return self._pool_by_node[node["name"]] \ if self.max_connections_per_node or node is None else self._group_pool @@ -423,8 +422,8 @@ def reset(self): This could suggest removing inheritance from ConnectionPool, but initializing both should not add much overhead. """ - self._pool_by_node = defaultdict(self.blocking_pool_factory) - self._group_pool = self.blocking_pool_factory() + self._pool_by_node = defaultdict(self._blocking_pool_factory) + self._group_pool = self._blocking_pool_factory() # Keep a list of actual connection instances so that we can # disconnect them later. @@ -460,7 +459,7 @@ def get_connection_by_node(self, node): self.nodes.set_node_name(node) connection = None connections_to_other_nodes = [] - pool = self.get_pool(node=node) + pool = self._get_pool(node=node) try: connection = pool.get(block=True, timeout=self.timeout) while connection is not None and connection.node != node: @@ -508,7 +507,7 @@ def release(self, connection): # Put the connection back into the pool. try: - self.get_pool(connection.node).put_nowait(connection) + self._get_pool(connection.node).put_nowait(connection) except Full: # perhaps the pool has been reset() after a fork? regardless, # we don't want this connection From 9a468043d6cfde2a4cdb04879b89d86b02c65697 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 12:29:13 +0100 Subject: [PATCH 167/263] Update code and tests and small pieces to make current test suite work when running on redis-py 3.4.x series. Still some code and work to port over from those releases, but this work initially at least --- rediscluster/connection.py | 14 ++---- rediscluster/pipeline.py | 15 +++++-- setup.cfg | 2 +- tests/test_cluster_connection_pool.py | 63 +++++++++++++++++++++++---- tests/test_multiprocessing.py | 10 ++--- tests/test_pipeline.py | 30 +++++++++++-- 6 files changed, 102 insertions(+), 32 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 20535941..92122c96 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -20,7 +20,7 @@ # 3rd party imports from redis._compat import nativestr, LifoQueue, Full, Empty from redis.client import dict_merge -from redis.connection import ConnectionPool, Connection, DefaultParser, SSLConnection +from redis.connection import ConnectionPool, Connection, DefaultParser, SSLConnection, UnixDomainSocketConnection from redis.exceptions import ConnectionError @@ -40,7 +40,6 @@ class ClusterParser(DefaultParser): class ClusterConnection(Connection): "Manages TCP communication to and from a Redis server" - description_format = "ClusterConnection" def __init__(self, *args, **kwargs): self.readonly = kwargs.pop('readonly', False) @@ -68,7 +67,6 @@ class SSLClusterConnection(SSLConnection): pool = ClusterConnectionPool(connection_class=SSLClusterConnection, ...) client = RedisCluster(connection_pool=pool) """ - description_format = "SSLClusterConnection" def __init__(self, **kwargs): self.readonly = kwargs.pop('readonly', False) @@ -87,13 +85,7 @@ def on_connect(self): if nativestr(self.read_response()) != 'OK': raise ConnectionError('READONLY command failed') - - -class UnixDomainSocketConnection(Connection): - """ - """ - description_format = "ClusterUnixDomainSocketConnection" - + class ClusterConnectionPool(ConnectionPool): """ @@ -160,7 +152,7 @@ def __repr__(self): return "{0}<{1}>".format( type(self).__name__, - ", ".join([self.connection_class.description_format % dict(node, **self.connection_kwargs) for node in nodes]) + ", ".join([repr(self.connection_class(**self.connection_kwargs)) for node in nodes]) ) def reset(self): diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index 29ec8793..6782a3e2 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -53,15 +53,24 @@ def __exit__(self, exc_type, exc_value, traceback): self.reset() def __del__(self): - """ - """ - self.reset() + try: + self.reset() + except Exception: + pass def __len__(self): """ """ return len(self.command_stack) + def __nonzero__(self): + "Pipeline instances should always evaluate to True on Python 2.7" + return True + + def __bool__(self): + "Pipeline instances should always evaluate to True on Python 3+" + return True + def execute_command(self, *args, **kwargs): """ """ diff --git a/setup.cfg b/setup.cfg index 85215dcb..d2fd5837 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,4 +6,4 @@ license_file = LICENSE [pycodestyle] show-source = 1 -exclude = .venv,.tox,dist,docs,build,*.egg +exclude = .venv,.tox,dist,docs,build,*.egg,redis_install diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 1f61bf72..faf2caa2 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -111,11 +111,16 @@ def test_repr_contains_db_info_tcp(self): Note: init_slot_cache must be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ - connection_kwargs = {'host': 'localhost', 'port': 7000} + connection_kwargs = { + 'host': 'localhost', + 'port': 7000, + 'db': 1, + 'client_name': 'test-client' + } pool = self.get_pool(connection_kwargs=connection_kwargs, connection_class=ClusterConnection, init_slot_cache=False) - expected = 'ClusterConnectionPool>' + expected = 'ClusterConnectionPool>' assert repr(pool) == expected def test_repr_contains_db_info_unix(self): @@ -123,11 +128,15 @@ def test_repr_contains_db_info_unix(self): Note: init_slot_cache must be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ - connection_kwargs = {'path': '/abc', 'db': 1} + connection_kwargs = { + 'path': '/abc', + 'db': 1, + 'client_name': 'test-client' + } pool = self.get_pool(connection_kwargs=connection_kwargs, connection_class=UnixDomainSocketConnection, init_slot_cache=False) - expected = 'ClusterConnectionPool>' + expected = 'ClusterConnectionPool>' assert repr(pool) == expected def test_get_connection_by_key(self): @@ -326,11 +335,16 @@ def test_repr_contains_db_info_tcp(self): Note: init_slot_cache must be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ - connection_kwargs = {'host': 'localhost', 'port': 7000} + connection_kwargs = { + 'host': 'localhost', + 'port': 7000, + 'db': 0, + 'client_name': 'test-client' + } pool = self.get_pool(connection_kwargs=connection_kwargs, connection_class=ClusterConnection, init_slot_cache=False) - expected = 'ClusterBlockingConnectionPool>' + expected = 'ClusterBlockingConnectionPool>' assert repr(pool) == expected def test_repr_contains_db_info_unix(self): @@ -338,11 +352,15 @@ def test_repr_contains_db_info_unix(self): Note: init_slot_cache must be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ - connection_kwargs = {'path': '/abc', 'db': 1} + connection_kwargs = { + 'path': '/abc', + 'db': 1, + 'client_name': 'test-client', + } pool = self.get_pool(connection_kwargs=connection_kwargs, connection_class=UnixDomainSocketConnection, init_slot_cache=False) - expected = 'ClusterBlockingConnectionPool>' + expected = 'ClusterBlockingConnectionPool>' assert repr(pool) == expected @@ -357,14 +375,22 @@ def get_pool(self, connection_kwargs=None, max_connections=None, init_slot_cache **connection_kwargs) return pool + @pytest.mark.xfail(reason="Broken, needs repair") def test_repr_contains_db_info_readonly(self): """ Note: init_slot_cache must be set to false otherwise it will try to query the test server for data and then it can't be predicted reliably """ + connection_kwargs = { + 'db': 0, + } pool = self.get_pool( + connection_kwargs=connection_kwargs, init_slot_cache=False, - startup_nodes=[{"host": "127.0.0.1", "port": 7000}, {"host": "127.0.0.2", "port": 7001}], + startup_nodes=[ + {"host": "127.0.0.1", "port": 7000}, + {"host": "127.0.0.2", "port": 7001}, + ], ) expected = 'ClusterReadOnlyConnectionPool, ClusterConnection>' assert repr(pool) == expected @@ -505,6 +531,7 @@ def test_defaults(self): 'host': 'localhost', 'port': 6379, 'db': 0, + 'username': None, 'password': None, } @@ -515,6 +542,7 @@ def test_hostname(self): 'host': 'myhost', 'port': 6379, 'db': 0, + 'username': None, 'password': None, } @@ -526,6 +554,7 @@ def test_quoted_hostname(self): 'host': 'my / host +=+', 'port': 6379, 'db': 0, + 'username': None, 'password': None, } @@ -536,6 +565,7 @@ def test_port(self): 'host': 'localhost', 'port': 6380, 'db': 0, + 'username': None, 'password': None, } @@ -546,6 +576,7 @@ def test_password(self): 'host': 'localhost', 'port': 6379, 'db': 0, + 'username': None, 'password': 'mypassword', } @@ -558,6 +589,7 @@ def test_quoted_password(self): 'host': 'localhost', 'port': 6379, 'db': 0, + 'username': None, 'password': '/mypass/+ word=$+', } @@ -569,6 +601,7 @@ def test_quoted_path(self): assert pool.connection_kwargs == { 'path': '/my/path/to/../+_+=$ocket', 'db': 0, + 'username': None, 'password': 'mypassword', } @@ -579,6 +612,7 @@ def test_db_as_argument(self): 'host': 'localhost', 'port': 6379, 'db': 1, + 'username': None, 'password': None, } @@ -589,6 +623,7 @@ def test_db_in_path(self): 'host': 'localhost', 'port': 6379, 'db': 2, + 'username': None, 'password': None, } @@ -600,6 +635,7 @@ def test_db_in_querystring(self): 'host': 'localhost', 'port': 6379, 'db': 3, + 'username': None, 'password': None, } @@ -614,6 +650,7 @@ def test_extra_typed_querystring_options(self): 'host': 'localhost', 'port': 6379, 'db': 2, + 'username': None, 'socket_timeout': 20.0, 'socket_connect_timeout': 10.0, 'retry_on_timeout': True, @@ -640,6 +677,7 @@ def test_extra_querystring_options(self): 'host': 'localhost', 'port': 6379, 'db': 0, + 'username': None, 'password': None, 'a': '1', 'b': '2' @@ -656,6 +694,7 @@ def test_client_creates_connection_pool(self): 'host': 'myhost', 'port': 6379, 'db': 0, + 'username': None, 'password': None, } @@ -667,6 +706,7 @@ def test_defaults(self): assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, + 'username': None, 'password': None, } @@ -676,6 +716,7 @@ def test_password(self): assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, + 'username': None, 'password': 'mypassword', } @@ -685,6 +726,7 @@ def test_db_as_argument(self): assert pool.connection_kwargs == { 'path': '/socket', 'db': 1, + 'username': None, 'password': None, } @@ -694,6 +736,7 @@ def test_db_in_querystring(self): assert pool.connection_kwargs == { 'path': '/socket', 'db': 2, + 'username': None, 'password': None, } @@ -703,6 +746,7 @@ def test_extra_querystring_options(self): assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, + 'username': None, 'password': None, 'a': '1', 'b': '2' @@ -718,6 +762,7 @@ def test_defaults(self): 'host': 'localhost', 'port': 6379, 'db': 0, + 'username': None, 'password': None, } diff --git a/tests/test_multiprocessing.py b/tests/test_multiprocessing.py index e0c39c94..6faa7eb6 100644 --- a/tests/test_multiprocessing.py +++ b/tests/test_multiprocessing.py @@ -47,7 +47,7 @@ def target(conn): proc = multiprocessing.Process(target=target, args=(conn,)) proc.start() proc.join(3) - assert proc.exitcode is 0 + assert proc.exitcode == 0 # The connection was created in the parent but disconnected in the # child. The child called socket.close() but did not call @@ -81,7 +81,7 @@ def target(conn, ev): ev.set() proc.join(3) - assert proc.exitcode is 0 + assert proc.exitcode == 0 @pytest.mark.parametrize('max_connections', [1, 2, None]) def test_pool(self, max_connections): @@ -109,7 +109,7 @@ def target(pool): proc = multiprocessing.Process(target=target, args=(pool,)) proc.start() proc.join(3) - assert proc.exitcode is 0 + assert proc.exitcode == 0 # Check that connection is still alive after fork process has exited # and disconnected the connections in its pool @@ -148,7 +148,7 @@ def target(pool, disconnect_event): pool.disconnect() ev.set() proc.join(3) - assert proc.exitcode is 0 + assert proc.exitcode == 0 def test_redis_client(self, r): "A redis client created in a parent can also be used in a child" @@ -161,6 +161,6 @@ def target(client): proc = multiprocessing.Process(target=target, args=(r,)) proc.start() proc.join(3) - assert proc.exitcode is 0 + assert proc.exitcode == 0 assert r.ping() is True diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index aa1617d2..a85ee459 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -20,6 +20,10 @@ class TestPipeline(object): """ """ + def test_pipeline_is_true(self, r): + "Ensure pipeline instances are not false-y" + with r.pipeline() as pipe: + assert pipe def test_pipeline(self, r): with r.pipeline() as pipe: @@ -42,17 +46,14 @@ def test_pipeline_length(self, r): with r.pipeline() as pipe: # Initially empty. assert len(pipe) == 0 - assert not pipe # Fill 'er up! pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') assert len(pipe) == 3 - assert pipe # Execute calls reset(), so empty once again. pipe.execute() assert len(pipe) == 0 - assert not pipe def test_pipeline_no_transaction(self, r): with r.pipeline(transaction=False) as pipe: @@ -218,6 +219,20 @@ def test_watch_failure(self, r): assert not pipe.watching + @pytest.mark.xfail(reason="unsupported command: watch") + def test_watch_failure_in_empty_transaction(self, r): + r['a'] = 1 + r['b'] = 2 + + with r.pipeline() as pipe: + pipe.watch('a', 'b') + r['b'] = 3 + pipe.multi() + with pytest.raises(redis.WatchError): + pipe.execute() + + assert not pipe.watching + @pytest.mark.xfail(reason="unsupported command: watch") def test_unwatch(self, r): r['a'] = 1 @@ -256,6 +271,15 @@ def my_transaction(pipe): assert result == [True] assert r['c'] == b'4' + @pytest.mark.xfail(reason="unsupported command: watch") + def test_transaction_callable_returns_value_from_callable(self, r): + def callback(pipe): + # No need to do anything here since we only want the return value + return 'a' + + res = r.transaction(callback, 'my-key', value_from_callable=True) + assert res == 'a' + def test_exec_error_in_no_transaction_pipeline(self, r): r['a'] = 1 with r.pipeline(transaction=False) as pipe: From 9a51556d42970600b15ecdc0d84110144f52cab6 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 13:32:08 +0100 Subject: [PATCH 168/263] Lint many imports and all command names inside NODE_FLAGS and RESPONSE_CALLBACKS to be easier to overview and read --- rediscluster/client.py | 169 ++++++++++++++++++++++++++++++++--------- 1 file changed, 135 insertions(+), 34 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 71286766..863cf7d9 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -15,29 +15,41 @@ SSLClusterConnection, ) from .exceptions import ( - RedisClusterException, AskError, MovedError, ClusterDownError, - ClusterError, TryAgainError + AskError, + ClusterDownError, + ClusterError, + MovedError, + RedisClusterException, + TryAgainError, ) from .pubsub import ClusterPubSub from .utils import ( + blocked_command, bool_ok, - string_keys_to_dict, + clusterdown_wrapper, dict_merge, - blocked_command, - merge_result, first_key, - clusterdown_wrapper, - parse_cluster_slots, + merge_result, parse_cluster_nodes, + parse_cluster_slots, parse_pubsub_channels, - parse_pubsub_numsub, parse_pubsub_numpat, + parse_pubsub_numsub, + string_keys_to_dict, ) + # 3rd party imports from redis import Redis from redis.client import list_or_args, parse_info from redis._compat import iteritems, basestring, izip, nativestr, long -from redis.exceptions import RedisError, ResponseError, TimeoutError, DataError, ConnectionError, BusyLoadingError +from redis.exceptions import ( + BusyLoadingError, + ConnectionError, + DataError, + RedisError, + ResponseError, + TimeoutError, +) class CaseInsensitiveDict(dict): @@ -76,23 +88,60 @@ class RedisCluster(Redis): NODES_FLAGS = dict_merge( string_keys_to_dict([ - "CLIENT SETNAME", "SENTINEL GET-MASTER-ADDR-BY-NAME", 'SENTINEL MASTER', 'SENTINEL MASTERS', - 'SENTINEL MONITOR', 'SENTINEL REMOVE', 'SENTINEL SENTINELS', 'SENTINEL SET', - 'SENTINEL SLAVES', 'SHUTDOWN', 'SLAVEOF', 'SCRIPT KILL', - 'MOVE', 'BITOP', + 'BITOP', + 'CLIENT SETNAME', + 'MOVE', + 'SCRIPT KILL', + 'SENTINEL GET-MASTER-ADDR-BY-NAME', + 'SENTINEL MASTER', + 'SENTINEL MASTERS', + 'SENTINEL MONITOR', + 'SENTINEL REMOVE', + 'SENTINEL SENTINELS', + 'SENTINEL SET', + 'SENTINEL SLAVES', + 'SHUTDOWN', + 'SLAVEOF', ], 'blocked'), string_keys_to_dict([ - "ECHO", "CONFIG GET", "CONFIG SET", "SLOWLOG GET", "CLIENT KILL", "INFO", - "BGREWRITEAOF", "BGSAVE", "CLIENT LIST", "CLIENT GETNAME", "CONFIG RESETSTAT", - "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", - "TIME", "KEYS", "CLUSTER INFO", "PUBSUB CHANNELS", - "PUBSUB NUMSUB", "PUBSUB NUMPAT", "CLIENT ID", + "BGREWRITEAOF", + "BGSAVE", + "CLIENT GETNAME", + "CLIENT ID", + "CLIENT KILL", + "CLIENT LIST", + "CLUSTER INFO", + "CONFIG GET", + "CONFIG RESETSTAT", + "CONFIG REWRITE", + "CONFIG SET", + "DBSIZE", + "ECHO", + "INFO", + "KEYS", + "LASTSAVE", + "PING", + "PUBSUB CHANNELS", + "PUBSUB NUMPAT", + "PUBSUB NUMSUB", + "SAVE", + "SLOWLOG GET", + "SLOWLOG LEN", + "SLOWLOG RESET", + "TIME", ], 'all-nodes'), string_keys_to_dict([ - "FLUSHALL", "FLUSHDB", "SCRIPT LOAD", "SCRIPT FLUSH", "SCRIPT EXISTS", "SCAN", + "FLUSHALL", + "FLUSHDB", + "SCAN", + "SCRIPT EXISTS", + "SCRIPT FLUSH", + "SCRIPT LOAD", ], 'all-masters'), string_keys_to_dict([ - "RANDOMKEY", "CLUSTER NODES", "CLUSTER SLOTS", + "CLUSTER NODES", + "CLUSTER SLOTS", + "RANDOMKEY", ], 'random'), string_keys_to_dict([ "CLUSTER COUNTKEYSINSLOT", @@ -103,29 +152,78 @@ class RedisCluster(Redis): # Not complete, but covers the major ones # https://redis.io/commands READ_COMMANDS = [ - "BITPOS", "BITCOUNT", + "BITCOUNT", + "BITPOS", "EXISTS", - "GEOHASH", "GEOPOS", "GEODIST", "GEORADIUS", "GEORADIUSBYMEMBER", - "GET", "GETBIT", "GETRANGE", - "HEXISTS", "HGET", "HGETALL", "HKEYS", "HLEN", "HMGET", "HSTRLEN", "HVALS", + "GEODIST", + "GEOHASH", + "GEOPOS", + "GEORADIUS", + "GEORADIUSBYMEMBER", + "GET", + "GETBIT", + "GETRANGE", + "HEXISTS", + "HGET", + "HGETALL", + "HKEYS", + "HLEN", + "HMGET", + "HSTRLEN", + "HVALS", "KEYS", - "LINDEX", "LLEN", "LRANGE", + "LINDEX", + "LLEN", + "LRANGE", "MGET", "PTTL", "RANDOMKEY", - "SCARD", "SDIFF", "SINTER", "SISMEMBER", "SMEMBERS", "SRANDMEMBER", - "STRLEN", "SUNION", + "SCARD", + "SDIFF", + "SINTER", + "SISMEMBER", + "SMEMBERS", + "SRANDMEMBER", + "STRLEN", + "SUNION", "TTL", - "ZCARD", "ZCOUNT", "ZRANGE", "ZSCORE" + "ZCARD", + "ZCOUNT", + "ZRANGE", + "ZSCORE" ] RESULT_CALLBACKS = dict_merge( string_keys_to_dict([ - "ECHO", "CONFIG GET", "CONFIG SET", "SLOWLOG GET", "CLIENT KILL", "INFO", - "BGREWRITEAOF", "BGSAVE", "CLIENT LIST", "CLIENT GETNAME", "CONFIG RESETSTAT", - "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", - "TIME", "SCAN", "CLUSTER INFO", 'CLUSTER ADDSLOTS', 'CLUSTER COUNT-FAILURE-REPORTS', - 'CLUSTER DELSLOTS', 'CLUSTER FAILOVER', 'CLUSTER FORGET', "FLUSHALL", "FLUSHDB", "CLIENT ID", + "BGREWRITEAOF", + "BGSAVE", + "CLIENT GETNAME", + "CLIENT ID", + "CLIENT KILL", + "CLIENT LIST", + "CLUSTER INFO", + "CONFIG GET", + "CONFIG RESETSTAT", + "CONFIG REWRITE", + "CONFIG SET", + "DBSIZE", + "ECHO", + "FLUSHALL", + "FLUSHDB", + "INFO", + "LASTSAVE", + "PING", + "SAVE", + "SCAN", + "SLOWLOG GET", + "SLOWLOG LEN", + "SLOWLOG RESET", + "TIME", + 'CLUSTER ADDSLOTS', + 'CLUSTER COUNT-FAILURE-REPORTS', + 'CLUSTER DELSLOTS', + 'CLUSTER FAILOVER', + 'CLUSTER FORGET', ], lambda command, res: res), string_keys_to_dict([ "SCRIPT LOAD", @@ -140,7 +238,10 @@ class RedisCluster(Redis): "KEYS", ], merge_result), string_keys_to_dict([ - "SSCAN", "HSCAN", "ZSCAN", "RANDOMKEY", + "HSCAN", + "RANDOMKEY", + "SSCAN", + "ZSCAN", ], first_key), string_keys_to_dict([ "PUBSUB CHANNELS", From 2ec370693f16c27bdb981dface0d02c7e3b9f60b Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 13:32:59 +0100 Subject: [PATCH 169/263] Add in new ACL commands from redis-py 3.4.x branch. Currently block them out while waiting for proper implementation and a full pass over that feature and how it fits into a cluster environment. --- rediscluster/client.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/rediscluster/client.py b/rediscluster/client.py index 863cf7d9..e1d01667 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -88,6 +88,16 @@ class RedisCluster(Redis): NODES_FLAGS = dict_merge( string_keys_to_dict([ + 'ACL CAT', + 'ACL DELUSER', + 'ACL GENPASS', + 'ACL GETUSER', + 'ACL LIST', + 'ACL LOAD', + 'ACL SAVE', + 'ACL SETUSER', + 'ACL USERS', + 'ACL WHOAMI', 'BITOP', 'CLIENT SETNAME', 'MOVE', From da7352bacde56aff29abfdcbcb6d8f613a9f1771 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 13:35:52 +0100 Subject: [PATCH 170/263] Add test_scan_type method --- tests/test_commands.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/test_commands.py b/tests/test_commands.py index 91f3f8b6..055fc704 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1135,6 +1135,14 @@ def test_scan(self, r): _, keys = get_main_cluster_node_data(r.scan(match='GQ5KU')) assert set(keys) == {b'GQ5KU'} + @skip_if_server_version_lt('5.9.101') + def test_scan_type(self, r): + r.sadd('a-set', 1) + r.hset('a-hash', 'foo', 2) + r.lpush('a-list', 'aux', 3) + _, keys = r.scan(match='a*', _type='SET') + assert set(keys) == {b'a-set'} + @skip_if_server_version_lt('2.8.0') def test_scan_iter(self, r): r.set('a', 1) From 38ef963b813e24ec2930eaced31b417b26d0abbb Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 14:22:52 +0100 Subject: [PATCH 171/263] Add in a skip decorator for test_spop_multi_value method to avoid error on redis servers 3.0.x --- tests/test_commands.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_commands.py b/tests/test_commands.py index 055fc704..ccfc8c5b 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1267,6 +1267,7 @@ def test_spop(self, r): assert value in s assert r.smembers('a') == set(s) - {value} + @skip_if_server_version_lt('3.2.0') def test_spop_multi_value(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) From a0fa82e744bdd41b583681fc31c14cbb33281892 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 14:25:35 +0100 Subject: [PATCH 172/263] Add in new argument _type to scan_iter cluster version --- rediscluster/client.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index e1d01667..ead380da 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -819,7 +819,7 @@ def _parse_scan(self, response, **options): cursor, r = response return long(cursor), r - def scan_iter(self, match=None, count=None): + def scan_iter(self, match=None, count=None, _type=None): """ Make an iterator using the SCAN command so that the client doesn't need to remember the cursor position. @@ -848,6 +848,8 @@ def scan_iter(self, match=None, count=None): pieces.extend([b'MATCH', match]) if count is not None: pieces.extend([b'COUNT', count]) + if _type is not None: + pieces.extend([b'TYPE', _type]) conn.send_command(*pieces) From 1c5628c7d8aa4a0eb1dc32c153b510f62a904c62 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 14:29:31 +0100 Subject: [PATCH 173/263] Minor lint --- tests/test_commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index ccfc8c5b..44c42658 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -90,7 +90,7 @@ def test_command_on_invalid_key_type(self, r): with pytest.raises(redis.ResponseError): r['a'] - # SERVER INFORMATION + # SERVER INFORMATION @skip_if_server_version_lt('5.9.101') def test_acl_cat_no_category(self, r): categories = r.acl_cat() From b1c6eac7b7070cd2e40d8bf178241f46be2e5f85 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 14:33:34 +0100 Subject: [PATCH 174/263] Update 2019 -> 2020 where needed --- LICENSE | 2 +- README.md | 2 +- docs/License.txt | 2 +- docs/license.rst | 2 +- docs/release-notes.rst | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/LICENSE b/LICENSE index f2a09d18..130e281a 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2019 Johan Andersson +Copyright (c) 2014-2020 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation diff --git a/README.md b/README.md index 371b56bd..2e8e4fc0 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ True ## License & Authors -Copyright (c) 2013-2019 Johan Andersson +Copyright (c) 2013-2020 Johan Andersson MIT (See docs/License.txt file) diff --git a/docs/License.txt b/docs/License.txt index ceabc499..a690495b 100644 --- a/docs/License.txt +++ b/docs/License.txt @@ -1,4 +1,4 @@ -Copyright (c) 2014-2019 Johan Andersson +Copyright (c) 2014-2020 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation diff --git a/docs/license.rst b/docs/license.rst index d023468c..854e0526 100644 --- a/docs/license.rst +++ b/docs/license.rst @@ -1,7 +1,7 @@ Licensing --------- -Copyright (c) 2013-2019 Johan Andersson +Copyright (c) 2013-2020 Johan Andersson MIT (See docs/License.txt file) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index b2c7b2e4..5abb4709 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,7 +1,7 @@ Release Notes ============= -2.1.0 (xxx yy, 2019) +2.1.0 (xxx yy, 2020) * Updated redis-py compatbile version to support any version in the major version 3.0.x, 3.1.x, 3.2.x, 3.3.x. (#326) * Fixed bug preventing reinitialization after getting MOVED errors From c902459ac56fb890422cd485e7f3109923903d5f Mon Sep 17 00:00:00 2001 From: n89nanda Date: Mon, 24 Feb 2020 18:12:30 -0500 Subject: [PATCH 175/263] added back python 3.5 support --- .travis.yml | 1 + docs/index.rst | 1 + docs/upgrading.rst | 2 +- tox.ini | 8 +++++++- 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7fd934a2..2fcfcc51 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,7 @@ language: python cache: pip python: - "2.7" + - "3.5" - "3.6" - "3.7" - "3.8" diff --git a/docs/index.rst b/docs/index.rst index 3721c6eb..bf884f6c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -89,6 +89,7 @@ Python versions should follow the same supported python versions as specificed b If this library supports more then one major version line of `redis-py`, then the supported python versions must include the set of supported python versions by all major version lines. - 2.7 +- 3.5 - 3.6 - 3.7 - 3.8 diff --git a/docs/upgrading.rst b/docs/upgrading.rst index ffae3ef6..f5824568 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -6,7 +6,7 @@ This document describes what must be done when upgrading between different versi 2.0.0 --> 2.1.0 --------------- -Python3 version must now be one of 3.6, 3.7, 3.8 +Python3 version must now be one of 3.5, 3.6, 3.7, 3.8 1.3.x --> 2.0.0 diff --git a/tox.ini b/tox.ini index 5300e4d6..dffea718 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ # install tox" and then run "tox" from this directory. [tox] -envlist = py27, py36, py37, py38, hi27, hi36, hi37, hi38, flake8-py34, flake8-py27 +envlist = py27, py35, py36, py37, py38, hi27, hi35, hi36, hi37, hi38, flake8-py34, flake8-py27 [testenv] deps = -r{toxinidir}/dev-requirements.txt @@ -16,6 +16,12 @@ deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 +[testenv:hi35] +basepython = python3.5 +deps = + -r{toxinidir}/dev-requirements.txt + hiredis == 0.2.0 + [testenv:hi36] basepython = python3.6 deps = From 85af0bf6262649378e307fc7d5b032fe431761e0 Mon Sep 17 00:00:00 2001 From: Matt Robenolt Date: Tue, 28 Jan 2020 19:23:58 -0800 Subject: [PATCH 176/263] fix: ClusterConnectionPool.count_all_num_connections is not thread safe in python 3 In python 3, `dict.values()` is an iterator, whereas in python 2 it returns a copy of the values as a list. When running under python 3 and multiple threads, that means the dictionary _created_connections_per_node is now able to be mutated and iterated over at the same time, thus raising a RuntimeError. The race comes from multiple threads calling `ClusterConnectionPool.make_connection` at the same time. Coercing the `.values()` to a `list()` _appears_ to the best of my knowledge to be locked by the GIL and doesn't cause this issue. I have stress tested this with: https://gist.github.com/mattrobenolt/e88d940c9afa6a18d6b39328eeb9a8f6 --- rediscluster/connection.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 92122c96..f63a9d53 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -213,9 +213,10 @@ def make_connection(self, node): """ Create a new connection """ - if self.count_all_num_connections(node) >= self.max_connections: + num_connections = self.count_all_num_connections(node) + if num_connections >= self.max_connections: if self.max_connections_per_node: - raise RedisClusterException("Too many connection ({0}) for node: {1}".format(self.count_all_num_connections(node), node['name'])) + raise RedisClusterException("Too many connection ({0}) for node: {1}".format(num_connections, node['name'])) raise RedisClusterException("Too many connections") @@ -268,7 +269,7 @@ def count_all_num_connections(self, node): if self.max_connections_per_node: return self._created_connections_per_node.get(node['name'], 0) - return sum([i for i in self._created_connections_per_node.values()]) + return sum([i for i in list(self._created_connections_per_node.values())]) def get_random_connection(self): """ From f273b65d887a4d711a6b5a4fed1176d7080a5b43 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 18:44:38 +0100 Subject: [PATCH 177/263] Updated test method test_cert_reqs_options and added missing methods to DummyConnection class in the same test file --- tests/test_cluster_connection_pool.py | 37 ++++++++++++++++++++------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index faf2caa2..4d7d0d80 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -31,6 +31,12 @@ def __init__(self, host="localhost", port=7000, socket_timeout=None, **kwargs): self.port = port self.socket_timeout = socket_timeout + def connect(self): + pass + + def can_read(self): + return False + def get_pool(connection_kwargs=None, max_connections=None, max_connections_per_node=None, connection_class=DummyConnection, init_slot_cache=True): connection_kwargs = connection_kwargs or {} @@ -768,18 +774,31 @@ def test_defaults(self): @pytest.mark.skipif(not ssl_available, reason="SSL not installed") def test_cert_reqs_options(self): - """ - rediss://[[username]:[password]]@localhost:6379/0 - """ import ssl - pool = get_pool().from_url('rediss://localhost:7000?ssl_cert_reqs=none') - assert pool.get_random_connection().cert_reqs == ssl.CERT_NONE - pool = get_pool().from_url('rediss://localhost:7000?ssl_cert_reqs=optional') - assert pool.get_random_connection().cert_reqs == ssl.CERT_OPTIONAL + class DummyConnectionPool(redis.ConnectionPool): + def get_connection(self, *args, **kwargs): + return self.make_connection() + + pool = DummyConnectionPool.from_url( + 'rediss://?ssl_cert_reqs=none') + assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE + + pool = DummyConnectionPool.from_url( + 'rediss://?ssl_cert_reqs=optional') + assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL + + pool = DummyConnectionPool.from_url( + 'rediss://?ssl_cert_reqs=required') + assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED + + pool = DummyConnectionPool.from_url( + 'rediss://?ssl_check_hostname=False') + assert pool.get_connection('_').check_hostname is False - pool = get_pool().from_url('rediss://localhost:7000?ssl_cert_reqs=required') - assert pool.get_random_connection().cert_reqs == ssl.CERT_REQUIRED + pool = DummyConnectionPool.from_url( + 'rediss://?ssl_check_hostname=True') + assert pool.get_connection('_').check_hostname is True class TestConnection(object): From 1fd2d3c3882799f2da550cf4b1b54a75c6f56c00 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 3 Mar 2020 19:15:47 +0100 Subject: [PATCH 178/263] Rework the old ClusterDownError handling code to not be a wrapper around the methods it tries to protect against but be the method entry point instead. This allows for the number of attempts to be tried to be controlled through a config option to the RedisCluster and ClusterPipeline classes. Fixes #313 --- docs/release-notes.rst | 2 ++ rediscluster/client.py | 31 ++++++++++++++++++++++++++++--- rediscluster/pipeline.py | 37 +++++++++++++++++++++++++++++++++---- rediscluster/utils.py | 26 -------------------------- tests/test_utils.py | 11 ----------- 5 files changed, 63 insertions(+), 44 deletions(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 5abb4709..b727479e 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -3,6 +3,8 @@ Release Notes 2.1.0 (xxx yy, 2020) + * Add new config option for Client and Pipeline classes to controll how many attempts will be made before bailing out from a ClusterDownError. + Use "cluster_down_retry_attempts=" when creating the client class to controll this behaviour. * Updated redis-py compatbile version to support any version in the major version 3.0.x, 3.1.x, 3.2.x, 3.3.x. (#326) * Fixed bug preventing reinitialization after getting MOVED errors diff --git a/rediscluster/client.py b/rediscluster/client.py index ead380da..133cb197 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -26,7 +26,6 @@ from .utils import ( blocked_command, bool_ok, - clusterdown_wrapper, dict_merge, first_key, merge_result, @@ -290,7 +289,7 @@ class RedisCluster(Redis): def __init__(self, host=None, port=None, startup_nodes=None, max_connections=None, max_connections_per_node=False, init_slot_cache=True, readonly_mode=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, - connection_class=None, read_from_replicas=False, **kwargs): + connection_class=None, read_from_replicas=False, cluster_down_retry_attempts=3, **kwargs): """ :startup_nodes: List of nodes that initial bootstrapping can be done from @@ -359,6 +358,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non self.response_callbacks = CaseInsensitiveDict(self.__class__.RESPONSE_CALLBACKS) self.response_callbacks = CaseInsensitiveDict(dict_merge(self.response_callbacks, self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS)) self.read_from_replicas = read_from_replicas + self.cluster_down_retry_attempts = cluster_down_retry_attempts @classmethod def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=False, read_from_replicas=False, **kwargs): @@ -425,6 +425,7 @@ def pipeline(self, transaction=None, shard_hint=None): startup_nodes=self.connection_pool.nodes.startup_nodes, result_callbacks=self.result_callbacks, response_callbacks=self.response_callbacks, + cluster_down_retry_attempts=self.cluster_down_retry_attempts, ) def transaction(self, *args, **kwargs): @@ -496,8 +497,32 @@ def determine_node(self, *args, **kwargs): else: return None - @clusterdown_wrapper def execute_command(self, *args, **kwargs): + """ + Wrapper for CLUSTERDOWN error handling. + + If the cluster reports it is down it is assumed that: + - connection_pool was disconnected + - connection_pool was reseted + - refereh_table_asap set to True + + It will try the number of times specified by the config option "self.cluster_down_retry_attempts" + which defaults to 3 unless manually configured. + + If it reaches the number of times, the command will raises ClusterDownException. + """ + for _ in range(0, self.cluster_down_retry_attempts): + try: + return self._execute_command(*args, **kwargs) + except ClusterDownError: + # Try again with the new cluster setup. All other errors + # should be raised. + pass + + # If it fails the configured number of times then raise exception back to caller of this method + raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster") + + def _execute_command(self, *args, **kwargs): """ Send a command to a node in the cluster """ diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index 6782a3e2..1b7065fc 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -6,9 +6,9 @@ # rediscluster imports from .client import RedisCluster from .exceptions import ( - RedisClusterException, AskError, MovedError, TryAgainError, + RedisClusterException, AskError, MovedError, TryAgainError, ClusterDownError, ) -from .utils import clusterdown_wrapper, dict_merge +from .utils import dict_merge # 3rd party imports from redis import Redis @@ -24,7 +24,7 @@ class ClusterPipeline(RedisCluster): """ def __init__(self, connection_pool, result_callbacks=None, - response_callbacks=None, startup_nodes=None, read_from_replicas=False): + response_callbacks=None, startup_nodes=None, read_from_replicas=False, cluster_down_retry_attempts=3): """ """ self.command_stack = [] @@ -36,6 +36,7 @@ def __init__(self, connection_pool, result_callbacks=None, self.nodes_flags = self.__class__.NODES_FLAGS.copy() self.response_callbacks = dict_merge(response_callbacks or self.__class__.RESPONSE_CALLBACKS.copy(), self.CLUSTER_COMMANDS_RESPONSE_CALLBACKS) + self.cluster_down_retry_attempts = cluster_down_retry_attempts def __repr__(self): """ @@ -144,8 +145,36 @@ def reset(self): # self.connection_pool.release(self.connection) # self.connection = None - @clusterdown_wrapper def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True): + """ + Wrapper for CLUSTERDOWN error handling. + + If the cluster reports it is down it is assumed that: + - connection_pool was disconnected + - connection_pool was reseted + - refereh_table_asap set to True + + It will try the number of times specified by the config option "self.cluster_down_retry_attempts" + which defaults to 3 unless manually configured. + + If it reaches the number of times, the command will raises ClusterDownException. + """ + for _ in range(0, self.cluster_down_retry_attempts): + try: + return self._send_cluster_commands( + stack, + raise_on_error=raise_on_error, + allow_redirections=allow_redirections, + ) + except ClusterDownError: + # Try again with the new cluster setup. All other errors + # should be raised. + pass + + # If it fails the configured number of times then raise exception back to caller of this method + raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster") + + def _send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True): """ Send a bunch of cluster commands to the redis cluster. diff --git a/rediscluster/utils.py b/rediscluster/utils.py index ce038d93..fcd1e7f3 100644 --- a/rediscluster/utils.py +++ b/rediscluster/utils.py @@ -83,32 +83,6 @@ def first_key(command, res): return list(res.values())[0] -def clusterdown_wrapper(func): - """ - Wrapper for CLUSTERDOWN error handling. - - If the cluster reports it is down it is assumed that: - - connection_pool was disconnected - - connection_pool was reseted - - refereh_table_asap set to True - - It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail. - """ - @wraps(func) - def inner(*args, **kwargs): - for _ in range(0, 3): - try: - return func(*args, **kwargs) - except ClusterDownError: - # Try again with the new cluster setup. All other errors - # should be raised. - pass - - # If it fails 3 times then raise exception back to caller - raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster") - return inner - - def nslookup(node_ip): """ """ diff --git a/tests/test_utils.py b/tests/test_utils.py index 31dccf18..4e0c03db 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -13,7 +13,6 @@ blocked_command, merge_result, first_key, - clusterdown_wrapper, parse_cluster_slots, ) @@ -139,13 +138,3 @@ def test_first_key(): def test_first_key_value_error(): with pytest.raises(ValueError): first_key("foobar", None) - - -def test_clusterdown_wrapper(): - @clusterdown_wrapper - def bad_func(): - raise ClusterDownError("CLUSTERDOWN") - - with pytest.raises(ClusterDownError) as cex: - bad_func() - assert unicode(cex.value).startswith("CLUSTERDOWN error. Unable to rebuild the cluster") From 0d6dc5ed10ca77784142c8eda8252f8005058b4d Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 5 May 2020 15:58:47 +0200 Subject: [PATCH 179/263] Update the requirements version spec to allow for any release inside the 3.x.y version space to ease the use when new major versions of redis-py is released --- README.md | 2 +- requirements.txt | 2 +- setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2e8e4fc0..257e4098 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ Latest stable release from pypi $ pip install redis-py-cluster ``` -This major version of `redis-py-cluster` supports `redis-py >=3.0.0, <3.5.0`. +This major version of `redis-py-cluster` supports `redis-py >=3.0.0, <4.0.0`. diff --git a/requirements.txt b/requirements.txt index debda9a9..73054e91 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -redis>=3.0.0,<3.5.0 +redis>=3.0.0,<4.0.0 diff --git a/setup.py b/setup.py index 1603c29c..706293b6 100644 --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ url='http://github.com/grokzen/redis-py-cluster', license='MIT', install_requires=[ - 'redis>=3.0.0,<3.5.0' + 'redis>=3.0.0,<4.0.0' ], python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4, !=3.5", extras_require={ From f8d6661cfbcb13e05531497dd3eaaa39e14cb8df Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 5 May 2020 16:00:01 +0200 Subject: [PATCH 180/263] Bump default version of redis inside makefile from 5.0.5 to 5.0.9 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 563494de..0ffe3672 100644 --- a/Makefile +++ b/Makefile @@ -216,7 +216,7 @@ ifndef REDIS_TRIB_RB endif ifndef REDIS_VERSION - REDIS_VERSION=5.0.5 + REDIS_VERSION=5.0.9 endif export REDIS_CLUSTER_NODE1_CONF From 7b2ea2ed4c988a06212c85d749a32ba94e0affc5 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 5 May 2020 16:01:12 +0200 Subject: [PATCH 181/263] Update flake8 dependency inside tox to latest stable version --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index dffea718..148d7280 100644 --- a/tox.ini +++ b/tox.ini @@ -43,11 +43,11 @@ deps = [testenv:flake8-py36] basepython= python3.6 deps = - flake8==2.2.5 + flake8==3.7.9 commands = flake8 --show-source --exclude=.venv,.tox,dist,docs,build,.git --ignore=E501,E731,E402 . [testenv:flake8-py27] basepython= python2.7 deps = - flake8==2.2.5 + flake8==3.7.9 commands = flake8 --show-source --exclude=.venv,.tox,dist,docs,build,.git --ignore=E501,E731,E402 . From 2b8702e9b3658e272bfba53afb7410c1f113c0c8 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 6 May 2020 15:06:02 +0200 Subject: [PATCH 182/263] Improve the from_url method to fix the problem when SSLconnections was not possible to use. Added tests to show feature works and that it ensures that clusterconnection and sslclusterconnection is set in a correct way. Fixes #281 --- rediscluster/client.py | 12 ++++++++++++ tests/test_cluster_connection_pool.py | 18 ++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/rediscluster/client.py b/rediscluster/client.py index 133cb197..c1b89138 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -9,6 +9,7 @@ # rediscluster imports from .connection import ( + ClusterConnection, ClusterConnectionPool, ClusterReadOnlyConnectionPool, ClusterWithReadReplicasConnectionPool, @@ -40,6 +41,7 @@ # 3rd party imports from redis import Redis from redis.client import list_or_args, parse_info +from redis.connection import Connection, SSLConnection from redis._compat import iteritems, basestring, izip, nativestr, long from redis.exceptions import ( BusyLoadingError, @@ -381,6 +383,9 @@ def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=Fa passed along to the ConnectionPool class's initializer. In the case of conflicting arguments, querystring arguments always win. """ + if url.lower().startswith('unix://'): + raise RedisClusterException('Unix sockets do not work in a cluster environment') + if readonly_mode: connection_pool_cls = ClusterReadOnlyConnectionPool elif read_from_replicas: @@ -389,6 +394,13 @@ def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=Fa connection_pool_cls = ClusterConnectionPool connection_pool = connection_pool_cls.from_url(url, db=db, skip_full_coverage_check=skip_full_coverage_check, **kwargs) + + if connection_pool.connection_class == SSLConnection: + connection_pool.connection_class = SSLClusterConnection + + if connection_pool.connection_class == Connection: + connection_pool.connection_class = ClusterConnection + return cls(connection_pool=connection_pool, skip_full_coverage_check=skip_full_coverage_check) def __repr__(self): diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 4d7d0d80..db79f1ec 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -7,6 +7,7 @@ from threading import Thread # rediscluster imports +from rediscluster.client import RedisCluster from rediscluster.connection import ( ClusterConnectionPool, ClusterBlockingConnectionPool, ClusterReadOnlyConnectionPool, ClusterConnection, UnixDomainSocketConnection) @@ -204,6 +205,23 @@ def test_master_node_by_slot(self): node = pool.get_master_node_by_slot(12182) node['port'] = 7002 + def test_from_url_connection_classes(self): + from rediscluster.client import RedisCluster + from rediscluster.connection import ClusterConnectionPool, ClusterConnection, SSLClusterConnection + + r = RedisCluster.from_url('redis://localhost:7000') + assert isinstance(r.connection_pool, ClusterConnectionPool) + # connection_class is not an object but a ref to the class + assert r.connection_pool.connection_class == ClusterConnection + + r = RedisCluster.from_url('rediss://localhost:7000') + assert isinstance(r.connection_pool, ClusterConnectionPool) + assert r.connection_pool.connection_class == SSLClusterConnection + + # Unix socket connections do not work in cluster environment + with pytest.raises(RedisClusterException) as ex: + r = RedisCluster.from_url('unix://foobar@/tmp/random.sock') + class TestClusterBlockingConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=100, max_connections_per_node=None, From c62012c7bfaf3ffe4f199de39085fe355fcc5737 Mon Sep 17 00:00:00 2001 From: Kyle Hersey Date: Thu, 26 Mar 2020 17:26:10 -0700 Subject: [PATCH 183/263] fix(error handling): ConnectionError in Client --- rediscluster/client.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index c1b89138..3d42a620 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -594,11 +594,13 @@ def _execute_command(self, *args, **kwargs): return self.parse_response(r, command, **kwargs) except (RedisClusterException, BusyLoadingError): raise - except (ConnectionError, TimeoutError): - try_random_node = True - + except ConnectionError: + r.disconnect() + except TimeoutError: if ttl < self.RedisClusterRequestTTL / 2: - time.sleep(0.1) + time.sleep(0.05) + else: + try_random_node = True except ClusterDownError as e: self.connection_pool.disconnect() self.connection_pool.reset() From aa08a8aabb18b0f122bdfc07ac8c35dc652714b6 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 7 May 2020 00:27:35 +0200 Subject: [PATCH 184/263] When consuming refresh_table_asap flag inside the while loop, reset it so the external use when using it in the next command would reset the nodes cache improperly. --- rediscluster/client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rediscluster/client.py b/rediscluster/client.py index 3d42a620..b6646b63 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -573,6 +573,8 @@ def _execute_command(self, *args, **kwargs): if self.refresh_table_asap: # MOVED node = self.connection_pool.get_master_node_by_slot(slot) + # Reset the flag when it has been consumed to avoid it being + self.refresh_table_asap = False else: node = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and (command in self.READ_COMMANDS)) is_read_replica = node['server_type'] == 'slave' From d10506acbdedc48efca4af5e7910f475d6fb07fe Mon Sep 17 00:00:00 2001 From: Gnanesh Date: Tue, 28 Apr 2020 13:37:07 +0530 Subject: [PATCH 185/263] Add username field to nodemanager --- rediscluster/nodemanager.py | 1 + 1 file changed, 1 insertion(+) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 9e2d15aa..9fe19f66 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -111,6 +111,7 @@ def get_redis_link(self, host, port, decode_responses=False): 'host', 'port', 'db', + 'username', 'password', 'socket_timeout', 'socket_connect_timeout', From 25793a04f729579544a4fbed7c78092b8ccdb211 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 May 2020 02:05:44 +0200 Subject: [PATCH 186/263] First itteration of rebuilding all test files into a new way of having the upstream tests more easily updated in the future to reduce the ammount of work needed to update them when a new redis-py version drops. Created a new testfile test_commands_cluster.py that will be all unique tests and overriden changes that we need to impelment for the cluster mode. --- tests/conftest.py | 5 + tests/test_commands.py | 701 ++++++++++++++------------------- tests/test_commands_cluster.py | 460 +++++++++++++++++++++ 3 files changed, 754 insertions(+), 412 deletions(-) create mode 100644 tests/test_commands_cluster.py diff --git a/tests/conftest.py b/tests/conftest.py index 1093a82a..f2c8f3a7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -19,6 +19,11 @@ basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(1, basepath) +# redis 6 release candidates report a version number of 5.9.x. Use this +# constant for skip_if decorators as a placeholder until 6.0.0 is officially +# released +REDIS_6_VERSION = '5.9.0' + _REDIS_VERSIONS = {} REDIS_INFO = {} diff --git a/tests/test_commands.py b/tests/test_commands.py index 44c42658..6b47f6c1 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1,25 +1,20 @@ -# -*- coding: utf-8 -*- - -# python std lib from __future__ import unicode_literals +import binascii import datetime +import pytest import re +import redis import time -# rediscluster imports -import rediscluster -from rediscluster.exceptions import RedisClusterException, ClusterCrossSlotError -from rediscluster.utils import dict_merge -from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt, skip_if_server_version_gte, skip_for_no_cluster_impl, skip_unless_arch_bits - -# 3rd party imports -import pytest -import redis -from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode +from redis._compat import (unichr, ascii_letters, iteritems, iterkeys, + itervalues, long, basestring) from redis.client import parse_info -from redis.exceptions import ResponseError, DataError, RedisError, DataError from redis import exceptions +from .conftest import (skip_if_server_version_lt, skip_if_server_version_gte, + skip_unless_arch_bits, REDIS_6_VERSION, + skip_for_no_cluster_impl) + @pytest.fixture() def slowlog(request, r): @@ -37,6 +32,11 @@ def cleanup(): def redis_server_time(client): + """ + Method adapted from uptream to return the server timestamp from the main + cluster node that we assigned as port 7000 node. + This is not ideal but will be done for now. + """ all_clients_time = client.time() for server_id, server_time_data in all_clients_time.items(): if '7000' in server_id: @@ -53,28 +53,14 @@ def get_stream_message(client, stream, message_id): return response[0] -def get_main_cluster_node_data(command_result): - """ - Tries to find whatever node is running on port :7000 in the cluster resonse - """ - for node_id, node_data in command_result.items(): - if '7000' in node_id: - return node_data - return None - - # RESPONSE CALLBACKS class TestResponseCallbacks(object): "Tests for the response callback system" + @pytest.mark.skip(reason="Cluster specific override") def test_response_callbacks(self, r): - all_response_callbacks = dict_merge( - rediscluster.RedisCluster.RESPONSE_CALLBACKS, - rediscluster.RedisCluster.CLUSTER_COMMANDS_RESPONSE_CALLBACKS, - ) - - assert r.response_callbacks == all_response_callbacks - assert id(r.response_callbacks) != id(all_response_callbacks) + assert r.response_callbacks == redis.Redis.RESPONSE_CALLBACKS + assert id(r.response_callbacks) != id(redis.Redis.RESPONSE_CALLBACKS) r.set_response_callback('GET', lambda x: 'static') r['a'] = 'foo' assert r['a'] == 'static' @@ -84,26 +70,28 @@ def test_case_insensitive_command_names(self, r): class TestRedisCommands(object): - def test_command_on_invalid_key_type(self, r): r.lpush('a', '1') with pytest.raises(redis.ResponseError): r['a'] # SERVER INFORMATION - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_cat_no_category(self, r): categories = r.acl_cat() assert isinstance(categories, list) assert 'read' in categories - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_cat_with_category(self, r): commands = r.acl_cat('read') assert isinstance(commands, list) assert 'get' in commands - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_deluser(self, r, request): username = 'redis-py-user' @@ -116,12 +104,14 @@ def teardown(): assert r.acl_setuser(username, enabled=False, reset=True) assert r.acl_deluser(username) == 1 - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_genpass(self, r): password = r.acl_genpass() assert isinstance(password, basestring) - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_getuser_setuser(self, r, request): username = 'redis-py-user' @@ -209,7 +199,8 @@ def teardown(): hashed_passwords=['-' + hashed_password]) assert len(r.acl_getuser(username)['passwords']) == 1 - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_list(self, r, request): username = 'redis-py-user' @@ -221,7 +212,8 @@ def teardown(): users = r.acl_list() assert 'user %s off -@all' % username in users - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_setuser_categories_without_prefix_fails(self, r, request): username = 'redis-py-user' @@ -232,7 +224,8 @@ def teardown(): with pytest.raises(exceptions.DataError): r.acl_setuser(username, categories=['list']) - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_setuser_commands_without_prefix_fails(self, r, request): username = 'redis-py-user' @@ -243,7 +236,8 @@ def teardown(): with pytest.raises(exceptions.DataError): r.acl_setuser(username, commands=['get']) - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_setuser_add_passwords_and_nopass_fails(self, r, request): username = 'redis-py-user' @@ -254,45 +248,51 @@ def teardown(): with pytest.raises(exceptions.DataError): r.acl_setuser(username, passwords='+mypass', nopass=True) - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_users(self, r): users = r.acl_users() assert isinstance(users, list) assert len(users) > 0 - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @skip_for_no_cluster_impl() def test_acl_whoami(self, r): username = r.acl_whoami() assert isinstance(username, basestring) + @pytest.mark.skip(reason="Cluster specific override") def test_client_list(self, r): clients = r.client_list() - client_data = get_main_cluster_node_data(clients)[0] - assert isinstance(client_data, dict) - assert 'addr' in client_data + assert isinstance(clients[0], dict) + assert 'addr' in clients[0] @skip_if_server_version_lt('5.0.0') + @pytest.mark.skip(reason="Cluster specific override") def test_client_list_type(self, r): with pytest.raises(exceptions.RedisError): r.client_list(_type='not a client type') for client_type in ['normal', 'master', 'replica', 'pubsub']: - clients = get_main_cluster_node_data(r.client_list(_type=client_type)) + clients = r.client_list(_type=client_type) assert isinstance(clients, list) @skip_if_server_version_lt('5.0.0') + @pytest.mark.skip(reason="Cluster specific override") def test_client_id(self, r): - assert get_main_cluster_node_data(r.client_id()) > 0 + assert r.client_id() > 0 @skip_if_server_version_lt('5.0.0') + @pytest.mark.skip(reason="Cluster specific override") def test_client_unblock(self, r): - myid = get_main_cluster_node_data(r.client_id()) + myid = r.client_id() assert not r.client_unblock(myid) assert not r.client_unblock(myid, error=True) assert not r.client_unblock(myid, error=False) @skip_if_server_version_lt('2.6.9') + @pytest.mark.skip(reason="Cluster specific override") def test_client_getname(self, r): - assert get_main_cluster_node_data(r.client_getname()) is None + assert r.client_getname() is None @skip_if_server_version_lt('2.6.9') @skip_for_no_cluster_impl() @@ -392,52 +392,52 @@ def test_client_pause(self, r): with pytest.raises(exceptions.RedisError): r.client_pause(timeout='not an integer') + @pytest.mark.skip(reason="Cluster specific override") def test_config_get(self, r): - data = get_main_cluster_node_data(r.config_get()) + data = r.config_get() assert 'maxmemory' in data assert data['maxmemory'].isdigit() + @pytest.mark.skip(reason="Cluster specific override") def test_config_resetstat(self, r): r.ping() - prior_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) + prior_commands_processed = int(r.info()['total_commands_processed']) assert prior_commands_processed >= 1 r.config_resetstat() - reset_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) + reset_commands_processed = int(r.info()['total_commands_processed']) assert reset_commands_processed < prior_commands_processed + @pytest.mark.skip(reason="Cluster specific override") def test_config_set(self, r): - data = get_main_cluster_node_data(r.config_get()) + data = r.config_get() rdbname = data['dbfilename'] try: assert r.config_set('dbfilename', 'redis_py_test.rdb') - assert get_main_cluster_node_data(r.config_get())['dbfilename'] == 'redis_py_test.rdb' + assert r.config_get()['dbfilename'] == 'redis_py_test.rdb' finally: assert r.config_set('dbfilename', rdbname) + @pytest.mark.skip(reason="Cluster specific override") def test_dbsize(self, r): r['a'] = 'foo' r['b'] = 'bar' - # Count all commands sent to the DB. Since we have one slave - # for every master we will look for 4 and not 2 - dbsize_sum = sum([db_size_count for node_id, db_size_count in r.dbsize().items()]) - assert dbsize_sum == 4 + assert r.dbsize() == 2 + @pytest.mark.skip(reason="Cluster specific override") def test_echo(self, r): - assert get_main_cluster_node_data(r.echo('foo bar')) == b'foo bar' + assert r.echo('foo bar') == b'foo bar' + @pytest.mark.skip(reason="Cluster specific override") def test_info(self, r): r['a'] = 'foo' r['b'] = 'bar' - info = get_main_cluster_node_data(r.info()) + info = r.info() assert isinstance(info, dict) - # We only have a "db0" in cluster mode and only one of the commands will bind to node :7000 - assert info['db0']['keys'] == 1 - # Sum all keys in all slots - keys_sum = sum([node_data.get('db0', {}).get('keys', 0) for node_id, node_data in r.info().items()]) - assert keys_sum == 4 + assert info['db9']['keys'] == 2 + @pytest.mark.skip(reason="Cluster specific override") def test_lastsave(self, r): - assert isinstance(get_main_cluster_node_data(r.lastsave()), datetime.datetime) + assert isinstance(r.lastsave(), datetime.datetime) def test_object(self, r): r['a'] = 'foo' @@ -454,7 +454,7 @@ def test_slowlog_get(self, r, slowlog): assert r.slowlog_reset() unicode_string = unichr(3456) + 'abcd' + unichr(3421) r.get(unicode_string) - slowlog = get_main_cluster_node_data(r.slowlog_get()) + slowlog = r.slowlog_get() assert isinstance(slowlog, list) commands = [log['command'] for log in slowlog] @@ -488,8 +488,9 @@ def test_slowlog_length(self, r, slowlog): assert isinstance(r.slowlog_len(), int) @skip_if_server_version_lt('2.6.0') + @pytest.mark.skip(reason="Cluster specific override") def test_time(self, r): - t = get_main_cluster_node_data(r.time()) + t = r.time() assert len(t) == 2 assert isinstance(t[0], int) assert isinstance(t[1], int) @@ -520,15 +521,6 @@ def test_bitcount(self, r): assert r.bitcount('a', -2, -1) == 2 assert r.bitcount('a', 1, 1) == 1 - # TODO: Move this method to a more generic solution/method that tests the blocked nodes flags feature - def test_bitop_not_supported(self, r): - """ - Validate that the command is blocked in cluster mode and throws an Exception - """ - r['a'] = '' - with pytest.raises(RedisClusterException): - r.bitop('not', 'r', 'a') - @skip_if_server_version_lt('2.6.0') @skip_for_no_cluster_impl() def test_bitop_not_empty_string(self, r): @@ -664,12 +656,13 @@ def test_dump_and_restore_and_replace(self, r): r.restore('a', 0, dumped, replace=True) assert r['a'] == b'bar' + @pytest.mark.skip(reason="Cluster specific override") def test_exists(self, r): assert r.exists('a') == 0 - r['G0B96'] = 'foo' - r['TEFX5'] = 'bar' - assert r.exists('G0B96') == 1 - assert r.exists('G0B96', 'TEFX5') == 2 + r['a'] = 'foo' + r['b'] = 'bar' + assert r.exists('a') == 1 + assert r.exists('a', 'b') == 2 def test_exists_contains(self, r): assert 'a' not in r @@ -682,7 +675,6 @@ def test_expire(self, r): assert r.expire('a', 10) assert 0 < r.ttl('a') <= 10 assert r.persist('a') - # the ttl command changes behavior in redis-2.8+ http://redis.io/commands/ttl assert r.ttl('a') == -1 def test_expireat_datetime(self, r): @@ -926,6 +918,15 @@ def test_set_multipleoptions(self, r): assert r.set('a', '1', xx=True, px=10000) assert 0 < r.ttl('a') <= 10 + @skip_if_server_version_lt(REDIS_6_VERSION) + def test_set_keepttl(self, r): + r['a'] = 'val' + assert r.set('a', '1', xx=True, px=10000) + assert 0 < r.ttl('a') <= 10 + r.set('a', '2', keepttl=True) + assert r.get('a') == b'2' + assert 0 < r.ttl('a') <= 10 + def test_setex(self, r): assert r.setex('a', 60, '1') assert r['a'] == b'1' @@ -982,33 +983,27 @@ def test_type(self, r): assert r.type('a') == b'zset' # LIST COMMANDS + @pytest.mark.skip(reason="Cluster specific override") def test_blpop(self, r): - """ - Generated keys for slot - 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] - """ - r.rpush('0J8KD', '1', '2') - r.rpush('822JO', '3', '4') - assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') - assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') - assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') - assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') - assert r.blpop(['822JO', '0J8KD'], timeout=1) is None + r.rpush('a', '1', '2') + r.rpush('b', '3', '4') + assert r.blpop(['b', 'a'], timeout=1) == (b'b', b'3') + assert r.blpop(['b', 'a'], timeout=1) == (b'b', b'4') + assert r.blpop(['b', 'a'], timeout=1) == (b'a', b'1') + assert r.blpop(['b', 'a'], timeout=1) == (b'a', b'2') + assert r.blpop(['b', 'a'], timeout=1) is None r.rpush('c', '1') assert r.blpop('c', timeout=1) == (b'c', b'1') + @pytest.mark.skip(reason="Cluster specific override") def test_brpop(self, r): - """ - Generated keys for slot - 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] - """ - r.rpush('0J8KD', '1', '2') - r.rpush('822JO', '3', '4') - assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') - assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') - assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') - assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') - assert r.brpop(['822JO', '0J8KD'], timeout=1) is None + r.rpush('a', '1', '2') + r.rpush('b', '3', '4') + assert r.brpop(['b', 'a'], timeout=1) == (b'b', b'4') + assert r.brpop(['b', 'a'], timeout=1) == (b'b', b'3') + assert r.brpop(['b', 'a'], timeout=1) == (b'a', b'2') + assert r.brpop(['b', 'a'], timeout=1) == (b'a', b'1') + assert r.brpop(['b', 'a'], timeout=1) is None r.rpush('c', '1') assert r.brpop('c', timeout=1) == (b'c', b'1') @@ -1121,21 +1116,19 @@ def test_rpushx(self, r): # SCAN COMMANDS @skip_if_server_version_lt('2.8.0') + @pytest.mark.skip(reason="Cluster specific override") def test_scan(self, r): - """ - Generated keys for slot - 0 : ['GQ5KU', 'IFWJL', 'X582D'] - """ - r.set('GQ5KU', 1) - r.set('IFWJL', 2) - r.set('X582D', 3) - cursor, keys = get_main_cluster_node_data(r.scan()) + r.set('a', 1) + r.set('b', 2) + r.set('c', 3) + cursor, keys = r.scan() assert cursor == 0 - assert set(keys) == {b'GQ5KU', b'IFWJL', b'X582D'} - _, keys = get_main_cluster_node_data(r.scan(match='GQ5KU')) - assert set(keys) == {b'GQ5KU'} + assert set(keys) == {b'a', b'b', b'c'} + _, keys = r.scan(match='a') + assert set(keys) == {b'a'} - @skip_if_server_version_lt('5.9.101') + @skip_if_server_version_lt(REDIS_6_VERSION) + @pytest.mark.skip(reason="Cluster specific override") def test_scan_type(self, r): r.sadd('a-set', 1) r.hset('a-hash', 'foo', 2) @@ -1172,7 +1165,7 @@ def test_sscan_iter(self, r): @skip_if_server_version_lt('2.8.0') def test_hscan(self, r): - r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) + r.hset('a', mapping={'a': 1, 'b': 2, 'c': 3}) cursor, dic = r.hscan('a') assert cursor == 0 assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'} @@ -1181,7 +1174,7 @@ def test_hscan(self, r): @skip_if_server_version_lt('2.8.0') def test_hscan_iter(self, r): - r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) + r.hset('a', mapping={'a': 1, 'b': 2, 'c': 3}) dic = dict(r.hscan_iter('a')) assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'} dic = dict(r.hscan_iter('a', match='a')) @@ -1350,12 +1343,8 @@ def test_zadd_incr(self, r): assert r.zadd('a', {'a1': 1}) == 1 assert r.zadd('a', {'a1': 4.5}, incr=True) == 5.5 - @skip_for_no_cluster_impl() + @pytest.mark.skip(reason="Cluster specific override") def test_zadd_incr_with_xx(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ # this asks zadd to incr 'a1' only if it exists, but it clearly # doesn't. Redis returns a null value in this case and so should # redis-py @@ -1386,113 +1375,87 @@ def test_zlexcount(self, r): assert r.zlexcount('a', '-', '+') == 7 assert r.zlexcount('a', '[b', '[f') == 5 + @pytest.mark.skip(reason="Cluster specific override") def test_zinterstore_sum(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 2 - assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('d', ['a', 'b', 'c']) == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1', 9)] + @pytest.mark.skip(reason="Cluster specific override") def test_zinterstore_max(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 2 - assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MAX') == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] + @pytest.mark.skip(reason="Cluster specific override") def test_zinterstore_min(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) - r.zadd('8I2EQ', {'a1': 2, 'a2': 3, 'a3': 5}) - r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 2 - assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MIN') == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] + @pytest.mark.skip(reason="Cluster specific override") def test_zinterstore_with_weight(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 2 - assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('d', {'a': 1, 'b': 2, 'c': 3}) == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 20), (b'a1', 23)] @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Cluster specific override") def test_zpopmax(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) - assert r.zpopmax('60ZE7') == [(b'a3', 3)] + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmax('a') == [(b'a3', 3)] # with count - assert r.zpopmax('60ZE7', count=2) == \ + assert r.zpopmax('a', count=2) == \ [(b'a2', 2), (b'a1', 1)] @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Cluster specific override") def test_zpopmin(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) - assert r.zpopmin('60ZE7') == [(b'a1', 1)] + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmin('a') == [(b'a1', 1)] # with count - assert r.zpopmin('60ZE7', count=2) == \ + assert r.zpopmin('a', count=2) == \ [(b'a2', 2), (b'a3', 3)] @skip_if_server_version_lt('4.9.0') @skip_for_no_cluster_impl() def test_bzpopmax(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 2}) - r.zadd('8I2EQ', {'b1': 10, 'b2': 20}) - assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b2', 20) - assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b1', 10) - assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a2', 2) - assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a1', 1) - assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) is None - r.zadd('R8H1V', {'c1': 100}) - assert r.bzpopmax('R8H1V', timeout=1) == (b'c', b'c1', 100) + r.zadd('a', {'a1': 1, 'a2': 2}) + r.zadd('b', {'b1': 10, 'b2': 20}) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmax(['b', 'a'], timeout=1) is None + r.zadd('c', {'c1': 100}) + assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100) @skip_if_server_version_lt('4.9.0') @skip_for_no_cluster_impl() def test_bzpopmin(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 2}) - r.zadd('8I2EQ', {'b1': 10, 'b2': 20}) - assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b1', 10) - assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b2', 20) - assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a1', 1) - assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a2', 2) - assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) is None - r.zadd('R8H1V', {'c1': 100}) - assert r.bzpopmin('R8H1V', timeout=1) == (b'c', b'c1', 100) + r.zadd('a', {'a1': 1, 'a2': 2}) + r.zadd('b', {'b1': 10, 'b2': 20}) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmin(['b', 'a'], timeout=1) is None + r.zadd('c', {'c1': 100}) + assert r.bzpopmin('c', timeout=1) == (b'c', b'c1', 100) def test_zrange(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) @@ -1632,52 +1595,40 @@ def test_zscore(self, r): assert r.zscore('a', 'a2') == 2.0 assert r.zscore('a', 'a4') is None + @pytest.mark.skip(reason="Cluster specific override") def test_zunionstore_sum(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 4 - assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('d', ['a', 'b', 'c']) == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] + @pytest.mark.skip(reason="Cluster specific override") def test_zunionstore_max(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 4 - assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] + @pytest.mark.skip(reason="Cluster specific override") def test_zunionstore_min(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) - r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 4}) - r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 4 - assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] + @pytest.mark.skip(reason="Cluster specific override") def test_zunionstore_with_weight(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 4 - assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] # HYPERLOGLOG TESTS @@ -1714,7 +1665,7 @@ def test_pfmerge(self, r): # HASH COMMANDS def test_hget_and_hset(self, r): - r.hmset('a', {'1': 1, '2': 2, '3': 3}) + r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) assert r.hget('a', '1') == b'1' assert r.hget('a', '2') == b'2' assert r.hget('a', '3') == b'3' @@ -1730,21 +1681,40 @@ def test_hget_and_hset(self, r): # key inside of hash that doesn't exist returns null value assert r.hget('a', 'b') is None + # keys with bool(key) == False + assert r.hset('a', 0, 10) == 1 + assert r.hset('a', '', 10) == 1 + + def test_hset_with_multi_key_values(self, r): + r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) + assert r.hget('a', '1') == b'1' + assert r.hget('a', '2') == b'2' + assert r.hget('a', '3') == b'3' + + r.hset('b', "foo", "bar", mapping={'1': 1, '2': 2}) + assert r.hget('b', '1') == b'1' + assert r.hget('b', '2') == b'2' + assert r.hget('b', 'foo') == b'bar' + + def test_hset_without_data(self, r): + with pytest.raises(exceptions.DataError): + r.hset("x") + def test_hdel(self, r): - r.hmset('a', {'1': 1, '2': 2, '3': 3}) + r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) assert r.hdel('a', '2') == 1 assert r.hget('a', '2') is None assert r.hdel('a', '1', '3') == 2 assert r.hlen('a') == 0 def test_hexists(self, r): - r.hmset('a', {'1': 1, '2': 2, '3': 3}) + r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) assert r.hexists('a', '1') assert not r.hexists('a', '4') def test_hgetall(self, r): h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} - r.hmset('a', h) + r.hset('a', mapping=h) assert r.hgetall('a') == h def test_hincrby(self, r): @@ -1760,22 +1730,26 @@ def test_hincrbyfloat(self, r): def test_hkeys(self, r): h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} - r.hmset('a', h) + r.hset('a', mapping=h) local_keys = list(iterkeys(h)) remote_keys = r.hkeys('a') assert (sorted(local_keys) == sorted(remote_keys)) def test_hlen(self, r): - r.hmset('a', {'1': 1, '2': 2, '3': 3}) + r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) assert r.hlen('a') == 3 def test_hmget(self, r): - assert r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) + assert r.hset('a', mapping={'a': 1, 'b': 2, 'c': 3}) assert r.hmget('a', 'a', 'b', 'c') == [b'1', b'2', b'3'] + @pytest.mark.skip(reason="Cluster specific override") def test_hmset(self, r): + warning_message = (r'^Redis\.hmset\(\) is deprecated\. ' + r'Use Redis\.hset\(\) instead\.$') h = {b'a': b'1', b'b': b'2', b'c': b'3'} - assert r.hmset('a', h) + with pytest.warns(DeprecationWarning, match=warning_message): + assert r.hmset('a', h) assert r.hgetall('a') == h def test_hsetnx(self, r): @@ -1787,14 +1761,14 @@ def test_hsetnx(self, r): def test_hvals(self, r): h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} - r.hmset('a', h) + r.hset('a', mapping=h) local_vals = list(itervalues(h)) remote_vals = r.hvals('a') assert sorted(local_vals) == sorted(remote_vals) @skip_if_server_version_lt('3.2.0') def test_hstrlen(self, r): - r.hmset('a', {'1': '22', '2': '333'}) + r.hset('a', mapping={'1': '22', '2': '333'}) assert r.hstrlen('a', '1') == 2 assert r.hstrlen('a', '2') == 3 @@ -1890,21 +1864,14 @@ def test_sort_alpha(self, r): assert r.sort('a', alpha=True) == \ [b'a', b'b', b'c', b'd', b'e'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_store(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ - r.rpush('60ZE7', '2', '3', '1') - assert r.sort('60ZE7', store='8I2EQ') == 3 - assert r.lrange('8I2EQ', 0, -1) == [b'1', b'2', b'3'] + r.rpush('a', '2', '3', '1') + assert r.sort('a', store='sorted_values') == 3 + assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3'] @skip_for_no_cluster_impl() def test_sort_all_options(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] - """ r['user:1:username'] = 'zeus' r['user:2:username'] = 'titan' r['user:3:username'] = 'hermes' @@ -2001,6 +1968,31 @@ def test_cluster_slaves(self, mock_cluster_resp_slaves): assert isinstance(mock_cluster_resp_slaves.cluster( 'slaves', 'nodeid'), dict) + @skip_if_server_version_lt('3.0.0') + @skip_for_no_cluster_impl() + def test_readwrite(self, r): + """ + FIXME: Needs cluster impelmentation + """ + assert r.readwrite() + + @skip_if_server_version_lt('3.0.0') + @skip_for_no_cluster_impl() + def test_readonly_invalid_cluster_state(self, r): + """ + FIXME: Needs cluster impelmentation + """ + with pytest.raises(exceptions.RedisError): + r.readonly() + + @skip_if_server_version_lt('3.0.0') + @skip_for_no_cluster_impl() + def test_readonly(self, mock_cluster_resp_ok): + """ + FIXME: Needs cluster impelmentation + """ + assert mock_cluster_resp_ok.readonly() is True + # GEO COMMANDS @skip_if_server_version_lt('3.2.0') def test_geoadd(self, r): @@ -2048,9 +2040,10 @@ def test_geohash(self, r): (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) - assert r.geohash('barcelona', 'place1', 'place2') ==\ - ['sp3e9yg3kd0', 'sp3e9cbc3t0'] + assert r.geohash('barcelona', 'place1', 'place2', 'place3') ==\ + ['sp3e9yg3kd0', 'sp3e9cbc3t0', None] + @skip_unless_arch_bits(64) @skip_if_server_version_lt('3.2.0') def test_geopos(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ @@ -2094,7 +2087,7 @@ def test_georadius_units(self, r): (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) - assert r.georadius(b'barcelona', 2.191, 41.433, 1, unit='km') ==\ + assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km') ==\ [b'place1'] @skip_unless_arch_bits(64) @@ -2132,7 +2125,7 @@ def test_georadius_count(self, r): (2.1873744593677, 41.406342043777, 'place2') r.geoadd('barcelona', *values) - assert r.georadius(b'barcelona', 2.191, 41.433, 3000, count=1) ==\ + assert r.georadius('barcelona', 2.191, 41.433, 3000, count=1) ==\ [b'place1'] @skip_if_server_version_lt('3.2.0') @@ -2147,33 +2140,29 @@ def test_georadius_sort(self, r): [b'place2', b'place1'] @skip_if_server_version_lt('3.2.0') + @pytest.mark.skip(reason="Cluster specific override") def test_georadius_store(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ'] - """ values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') - r.geoadd('60ZE7', *values) - r.georadius('60ZE7', 2.191, 41.433, 1000, store='8I2EQ') - assert r.zrange('8I2EQ', 0, -1) == [b'place1'] + r.geoadd('barcelona', *values) + r.georadius('barcelona', 2.191, 41.433, 1000, store='places_barcelona') + assert r.zrange('places_barcelona', 0, -1) == [b'place1'] + @skip_unless_arch_bits(64) @skip_if_server_version_lt('3.2.0') + @pytest.mark.skip(reason="Cluster specific override") def test_georadius_store_dist(self, r): - """ - Generated keys for slot - 0 : ['60ZE7', '8I2EQ'] - """ values = (2.1909389952632, 41.433791470673, 'place1') +\ (2.1873744593677, 41.406342043777, 'place2') - r.geoadd('60ZE7', *values) - r.georadius('60ZE7', 2.191, 41.433, 1000, - store_dist='8I2EQ') + r.geoadd('barcelona', *values) + r.georadius('barcelona', 2.191, 41.433, 1000, + store_dist='places_barcelona') # instead of save the geo score, the distance is saved. - assert r.zscore('8I2EQ', 'place1') == 88.05060698409301 + assert r.zscore('places_barcelona', 'place1') == 88.05060698409301 + @skip_unless_arch_bits(64) @skip_if_server_version_lt('3.2.0') def test_georadiusmember(self, r): values = (2.1909389952632, 41.433791470673, 'place1') +\ @@ -2209,7 +2198,7 @@ def test_xack(self, r): assert r.xack(stream, group, m1) == 0 r.xgroup_create(stream, group, 0) - r.xreadgroup(group, consumer, streams={stream: 0}) + r.xreadgroup(group, consumer, streams={stream: '>'}) # xack returns the number of ack'd elements assert r.xack(stream, group, m1) == 1 assert r.xack(stream, group, m2, m3) == 2 @@ -2537,7 +2526,7 @@ def test_xread(self, r): expected = [ [ - stream, + stream.encode(), [ get_stream_message(r, stream, m1), get_stream_message(r, stream, m2), @@ -2549,7 +2538,7 @@ def test_xread(self, r): expected = [ [ - stream, + stream.encode(), [ get_stream_message(r, stream, m1), ] @@ -2560,7 +2549,7 @@ def test_xread(self, r): expected = [ [ - stream, + stream.encode(), [ get_stream_message(r, stream, m2), ] @@ -2688,7 +2677,6 @@ def test_xtrim(self, r): # 1 message is trimmed assert r.xtrim(stream, 3, approximate=False) == 1 - @skip_if_server_version_lt('3.2.0') def test_bitfield_operations(self, r): # comments show affected bits bf = r.bitfield('a') @@ -2751,140 +2739,29 @@ def test_bitfield_operations(self, r): .execute()) assert resp == [0, None, 255] + @skip_if_server_version_lt('4.0.0') + @skip_for_no_cluster_impl() + def test_memory_stats(self, r): + """ + FIXME: Needs cluster implementation + """ + # put a key into the current db to make sure that "db." + # has data + r.set('foo', 'bar') + stats = r.memory_stats() + assert isinstance(stats, dict) + for key, value in iteritems(stats): + if key.startswith('db.'): + assert isinstance(value, dict) + @skip_if_server_version_lt('4.0.0') def test_memory_usage(self, r): r.set('foo', 'bar') assert isinstance(r.memory_usage('foo'), int) -class TestRedisCommandsSort(object): - # SORT - def test_sort_basic(self, r): - r.rpush('a', '3', '2', '1', '4') - assert r.sort('a') == [b'1', b'2', b'3', b'4'] - - def test_sort_limited(self, r): - r.rpush('a', '3', '2', '1', '4') - assert r.sort('a', start=1, num=2) == [b'2', b'3'] - - @pytest.mark.skip(reason="Sort works if done against keys in same slot") - def test_sort_by(self, r): - r['score:1'] = 8 - r['score:2'] = 3 - r['score:3'] = 5 - r.rpush('a', '3', '2', '1') - assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] - - @pytest.mark.skip(reason="Sort works if done against keys in same slot") - def test_sort_get(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] - - @pytest.mark.skip(reason="Sort works if done against keys in same slot") - def test_sort_get_multi(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - assert r.sort('a', get=('user:*', '#')) == \ - [b'u1', b'1', b'u2', b'2', b'u3', b'3'] - - @pytest.mark.skip(reason="Sort works if done against keys in same slot") - def test_sort_get_groups_two(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - assert r.sort('a', get=('user:*', '#'), groups=True) == \ - [(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')] - - def test_sort_groups_string_get(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - with pytest.raises(DataError): - r.sort('a', get='user:*', groups=True) - - def test_sort_groups_just_one_get(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - with pytest.raises(DataError): - r.sort('a', get=['user:*'], groups=True) - - def test_sort_groups_no_get(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r.rpush('a', '2', '3', '1') - with pytest.raises(DataError): - r.sort('a', groups=True) - - @pytest.mark.skip(reason="Sort works if done against keys in same slot") - def test_sort_groups_three_gets(self, r): - r['user:1'] = 'u1' - r['user:2'] = 'u2' - r['user:3'] = 'u3' - r['door:1'] = 'd1' - r['door:2'] = 'd2' - r['door:3'] = 'd3' - r.rpush('a', '2', '3', '1') - assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == [ - (b'u1', b'd1', b'1'), - (b'u2', b'd2', b'2'), - (b'u3', b'd3', b'3') - ] - - def test_sort_desc(self, r): - r.rpush('a', '2', '3', '1') - assert r.sort('a', desc=True) == [b'3', b'2', b'1'] - - def test_sort_alpha(self, r): - r.rpush('a', 'e', 'c', 'b', 'd', 'a') - assert r.sort('a', alpha=True) == \ - [b'a', b'b', b'c', b'd', b'e'] - - @pytest.mark.skip(reason="Sort works if done against keys in same slot") - def test_sort_store(self, r): - r.rpush('a', '2', '3', '1') - assert r.sort('a', store='sorted_values') == 3 - assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3'] - - @pytest.mark.skip(reason="Sort works if done against keys in same slot") - def test_sort_all_options(self, r): - r['user:1:username'] = 'zeus' - r['user:2:username'] = 'titan' - r['user:3:username'] = 'hermes' - r['user:4:username'] = 'hercules' - r['user:5:username'] = 'apollo' - r['user:6:username'] = 'athena' - r['user:7:username'] = 'hades' - r['user:8:username'] = 'dionysus' - - r['user:1:favorite_drink'] = 'yuengling' - r['user:2:favorite_drink'] = 'rum' - r['user:3:favorite_drink'] = 'vodka' - r['user:4:favorite_drink'] = 'milk' - r['user:5:favorite_drink'] = 'pinot noir' - r['user:6:favorite_drink'] = 'water' - r['user:7:favorite_drink'] = 'gin' - r['user:8:favorite_drink'] = 'apple juice' - - r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4') - num = r.sort('gods', start=2, num=4, by='user:*:username', - get='user:*:favorite_drink', desc=True, alpha=True, - store='sorted') - assert num == 4 - assert r.lrange('sorted', 0, 10) == \ - [b'vodka', b'milk', b'gin', b'apple juice'] - - class TestBinarySave(object): + def test_binary_get_set(self, r): assert r.set(' foo bar ', '123') assert r.get(' foo bar ') == b'123' @@ -2913,13 +2790,13 @@ def test_binary_lists(self, r): r.rpush(key, *value) # check that KEYS returns all the keys as they are - assert sorted(r.keys('*')) == sorted(list(iterkeys(mapping))) + assert sorted(r.keys('*')) == sorted(iterkeys(mapping)) # check that it is possible to get list content by key name for key, value in iteritems(mapping): assert r.lrange(key, 0, -1) == value - def test_22_info(self): + def test_22_info(self, r): """ Older Redis versions contained 'allocation_stats' in INFO that was the cause of a number of bugs when parsing. @@ -2955,8 +2832,8 @@ def test_22_info(self): def test_large_responses(self, r): "The PythonParser has some special cases for return values > 1MB" - # load up 100K of data into a key - data = ''.join([ascii_letters] * (100000 // len(ascii_letters))) + # load up 5MB of data into a key + data = ''.join([ascii_letters] * (5000000 // len(ascii_letters))) r['a'] = data assert r['a'] == data.encode() diff --git a/tests/test_commands_cluster.py b/tests/test_commands_cluster.py new file mode 100644 index 00000000..db32c1a6 --- /dev/null +++ b/tests/test_commands_cluster.py @@ -0,0 +1,460 @@ +# -*- coding: utf-8 -*- + +# python std lib +from __future__ import unicode_literals +import datetime + +# rediscluster imports +import rediscluster +from rediscluster.exceptions import RedisClusterException +from rediscluster.utils import dict_merge +from .conftest import ( + skip_if_server_version_lt, + skip_if_redis_py_version_lt, + skip_if_server_version_gte, + skip_for_no_cluster_impl, + skip_unless_arch_bits, + REDIS_6_VERSION, +) + +# 3rd party imports +import pytest +from redis.exceptions import RedisError +from redis import exceptions + + +def redis_server_time(client): + all_clients_time = client.time() + for server_id, server_time_data in all_clients_time.items(): + if '7000' in server_id: + seconds, milliseconds = server_time_data + + timestamp = float('%s.%s' % (seconds, milliseconds)) + return datetime.datetime.fromtimestamp(timestamp) + + +def get_main_cluster_node_data(command_result): + """ + Tries to find whatever node is running on port :7000 in the cluster resonse + """ + for node_id, node_data in command_result.items(): + if '7000' in node_id: + return node_data + return None + + +# RESPONSE CALLBACKS +class TestResponseCallbacksCluster(object): + "Tests for the response callback system" + + def test_response_callbacks(self, r): + all_response_callbacks = dict_merge( + rediscluster.RedisCluster.RESPONSE_CALLBACKS, + rediscluster.RedisCluster.CLUSTER_COMMANDS_RESPONSE_CALLBACKS, + ) + + assert r.response_callbacks == all_response_callbacks + assert id(r.response_callbacks) != id(all_response_callbacks) + r.set_response_callback('GET', lambda x: 'static') + r['a'] = 'foo' + assert r['a'] == 'static' + + +class TestRedisCommandsCluster(object): + + # SERVER INFORMATION + def test_client_list(self, r): + clients = r.client_list() + client_data = get_main_cluster_node_data(clients)[0] + assert isinstance(client_data, dict) + assert 'addr' in client_data + + @skip_if_server_version_lt('5.0.0') + def test_client_list_type(self, r): + with pytest.raises(exceptions.RedisError): + r.client_list(_type='not a client type') + for client_type in ['normal', 'master', 'replica', 'pubsub']: + clients = get_main_cluster_node_data(r.client_list(_type=client_type)) + assert isinstance(clients, list) + + @skip_if_server_version_lt('5.0.0') + def test_client_id(self, r): + assert get_main_cluster_node_data(r.client_id()) > 0 + + @skip_if_server_version_lt('5.0.0') + def test_client_unblock(self, r): + myid = get_main_cluster_node_data(r.client_id()) + assert not r.client_unblock(myid) + assert not r.client_unblock(myid, error=True) + assert not r.client_unblock(myid, error=False) + + @skip_if_server_version_lt('2.6.9') + def test_client_getname(self, r): + assert get_main_cluster_node_data(r.client_getname()) is None + + def test_config_get(self, r): + data = get_main_cluster_node_data(r.config_get()) + assert 'maxmemory' in data + assert data['maxmemory'].isdigit() + + def test_config_resetstat(self, r): + r.ping() + prior_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) + assert prior_commands_processed >= 1 + r.config_resetstat() + reset_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) + assert reset_commands_processed < prior_commands_processed + + def test_config_set(self, r): + data = get_main_cluster_node_data(r.config_get()) + rdbname = data['dbfilename'] + try: + assert r.config_set('dbfilename', 'redis_py_test.rdb') + assert get_main_cluster_node_data(r.config_get())['dbfilename'] == 'redis_py_test.rdb' + finally: + assert r.config_set('dbfilename', rdbname) + + def test_dbsize(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + # Count all commands sent to the DB. Since we have one slave + # for every master we will look for 4 and not 2 + dbsize_sum = sum([db_size_count for node_id, db_size_count in r.dbsize().items()]) + assert dbsize_sum == 4 + + def test_echo(self, r): + assert get_main_cluster_node_data(r.echo('foo bar')) == b'foo bar' + + def test_info(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + info = get_main_cluster_node_data(r.info()) + assert isinstance(info, dict) + # We only have a "db0" in cluster mode and only one of the commands will bind to node :7000 + assert info['db0']['keys'] == 1 + # Sum all keys in all slots + keys_sum = sum([node_data.get('db0', {}).get('keys', 0) for node_id, node_data in r.info().items()]) + assert keys_sum == 4 + + def test_lastsave(self, r): + assert isinstance(get_main_cluster_node_data(r.lastsave()), datetime.datetime) + + @skip_if_server_version_lt('2.6.0') + def test_time(self, r): + t = get_main_cluster_node_data(r.time()) + assert len(t) == 2 + assert isinstance(t[0], int) + assert isinstance(t[1], int) + + # FIXME: Move this method to a more generic solution/method that tests the blocked nodes flags feature + def test_bitop_not_supported(self, r): + """ + Validate that the command is blocked in cluster mode and throws an Exception + """ + r['a'] = '' + with pytest.raises(RedisClusterException): + r.bitop('not', 'r', 'a') + + def test_exists(self, r): + """ + Keys need to be in specific slots to work out + """ + assert r.exists('a') == 0 + r['G0B96'] = 'foo' + r['TEFX5'] = 'bar' + assert r.exists('G0B96') == 1 + assert r.exists('G0B96', 'TEFX5') == 2 + + def test_blpop(self, r): + """ + Generated keys for slot + 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] + """ + r.rpush('0J8KD', '1', '2') + r.rpush('822JO', '3', '4') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') + assert r.blpop(['822JO', '0J8KD'], timeout=1) is None + r.rpush('c', '1') + assert r.blpop('c', timeout=1) == (b'c', b'1') + + def test_brpop(self, r): + """ + Generated keys for slot + 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] + """ + r.rpush('0J8KD', '1', '2') + r.rpush('822JO', '3', '4') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') + assert r.brpop(['822JO', '0J8KD'], timeout=1) is None + r.rpush('c', '1') + assert r.brpop('c', timeout=1) == (b'c', b'1') + + @skip_if_server_version_lt('2.8.0') + def test_scan(self, r): + """ + Test is adapted for a same slot scenario in a clustered environment. + + FIXME: Add test for cross slot functionality test + + Generated keys for slot + 0 : ['GQ5KU', 'IFWJL', 'X582D'] + """ + r.set('GQ5KU', 1) + r.set('IFWJL', 2) + r.set('X582D', 3) + cursor, keys = get_main_cluster_node_data(r.scan()) + assert cursor == 0 + assert set(keys) == {b'GQ5KU', b'IFWJL', b'X582D'} + _, keys = get_main_cluster_node_data(r.scan(match='GQ5KU')) + assert set(keys) == {b'GQ5KU'} + + @skip_if_server_version_lt(REDIS_6_VERSION) + def test_scan_type(self, r): + """ + Test is adapted for a same slot scenario in a clustered environment. + + FIXME: Add test for cross slot functionality test + + Generated keys for slot + 0 : ['GQ5KU', 'IFWJL', 'X582D'] + """ + r.sadd('GQ5KU', 1) + r.hset('IFWJL', 'foo', 2) + r.lpush('X582D', 'aux', 3) + _, keys = get_main_cluster_node_data(r.scan(match='G*', _type='SET')) + assert set(keys) == {b'GQ5KU'} + + def test_zadd_incr_with_xx(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + # this asks zadd to incr 'a1' only if it exists, but it clearly + # doesn't. Redis returns a null value in this case and so should + # redis-py + assert r.zadd('a', {'a1': 1}, xx=True, incr=True) is None + + def test_zinterstore_sum(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + [(b'a3', 8), (b'a1', 9)] + + def test_zinterstore_max(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + [(b'a3', 5), (b'a1', 6)] + + def test_zinterstore_min(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 3, 'a3': 5}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + [(b'a1', 1), (b'a3', 3)] + + def test_zinterstore_with_weight(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + [(b'a3', 20), (b'a1', 23)] + + @skip_if_server_version_lt('4.9.0') + def test_zpopmax(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmax('60ZE7') == [(b'a3', 3)] + + # with count + assert r.zpopmax('60ZE7', count=2) == \ + [(b'a2', 2), (b'a1', 1)] + + @skip_if_server_version_lt('4.9.0') + def test_zpopmin(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmin('60ZE7') == [(b'a1', 1)] + + # with count + assert r.zpopmin('60ZE7', count=2) == \ + [(b'a2', 2), (b'a3', 3)] + + def test_zunionstore_sum(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] + + def test_zunionstore_max(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] + + def test_zunionstore_min(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 4}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] + + def test_zunionstore_with_weight(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ + [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] + + def test_hmset(self, r): + """ + Warning message is different in a RedisCluster instance + """ + warning_message = (r'^RedisCluster\.hmset\(\) is deprecated\. ' + r'Use RedisCluster\.hset\(\) instead\.$') + h = {b'a': b'1', b'b': b'2', b'c': b'3'} + with pytest.warns(DeprecationWarning, match=warning_message): + assert r.hmset('a', h) + assert r.hgetall('a') == h + + def test_sort_store(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.rpush('60ZE7', '2', '3', '1') + assert r.sort('60ZE7', store='8I2EQ') == 3 + assert r.lrange('8I2EQ', 0, -1) == [b'1', b'2', b'3'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_store(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ'] + """ + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('60ZE7', *values) + r.georadius('60ZE7', 2.191, 41.433, 1000, store='8I2EQ') + assert r.zrange('8I2EQ', 0, -1) == [b'place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_store_dist(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ'] + """ + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('60ZE7', *values) + r.georadius('60ZE7', 2.191, 41.433, 1000, + store_dist='8I2EQ') + # instead of save the geo score, the distance is saved. + assert r.zscore('8I2EQ', 'place1') == 88.05060698409301 + + @pytest.mark.skip(reason="Sort works if done against keys in same slot") + def test_sort_by(self, r): + r['score:1'] = 8 + r['score:2'] = 3 + r['score:3'] = 5 + r.rpush('a', '3', '2', '1') + assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] + + @pytest.mark.skip(reason="Sort works if done against keys in same slot") + def test_sort_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] + + @pytest.mark.skip(reason="Sort works if done against keys in same slot") + def test_sort_get_multi(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', '#')) == \ + [b'u1', b'1', b'u2', b'2', b'u3', b'3'] + + @pytest.mark.skip(reason="Sort works if done against keys in same slot") + def test_sort_get_groups_two(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', '#'), groups=True) == \ + [(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')] + + @pytest.mark.skip(reason="Sort works if done against keys in same slot") + def test_sort_groups_three_gets(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r['door:1'] = 'd1' + r['door:2'] = 'd2' + r['door:3'] = 'd3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == [ + (b'u1', b'd1', b'1'), + (b'u2', b'd2', b'2'), + (b'u3', b'd3', b'3') + ] From d32849a10c21a976d2d5b4c31f761c72fbaed810 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 May 2020 02:18:44 +0200 Subject: [PATCH 187/263] Update test_encoding.py to latest code. Also create test_encoding_cluster.py to overwrite all the tests that needs to be updated and altered. A better solution would want to be there but i dont have the solution for overwriting the other class right now. --- tests/test_encoding.py | 67 ++++++++++++++++++++++++---- tests/test_encoding_cluster.py | 79 ++++++++++++++++++++++++++++++++++ 2 files changed, 138 insertions(+), 8 deletions(-) create mode 100644 tests/test_encoding_cluster.py diff --git a/tests/test_encoding.py b/tests/test_encoding.py index 05766d14..6a8e23dc 100644 --- a/tests/test_encoding.py +++ b/tests/test_encoding.py @@ -2,10 +2,9 @@ import pytest import redis -from rediscluster import RedisCluster - from redis._compat import unichr, unicode -from .conftest import _get_client, _init_client +from redis.connection import Connection +from .conftest import _get_client class TestEncoding(object): @@ -13,15 +12,51 @@ class TestEncoding(object): def r(self, request): return _get_client(redis.Redis, request=request, decode_responses=True) - def test_simple_encoding(self, r): + @pytest.fixture() + def r_no_decode(self, request): + return _get_client( + redis.Redis, + request=request, + decode_responses=False, + ) + + @pytest.mark.skip(reason="Cluster specific override") + def test_simple_encoding(self, r_no_decode): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + r_no_decode['unicode-string'] = unicode_string.encode('utf-8') + cached_val = r_no_decode['unicode-string'] + assert isinstance(cached_val, bytes) + assert unicode_string == cached_val.decode('utf-8') + + @pytest.mark.skip(reason="Cluster specific override") + def test_simple_encoding_and_decoding(self, r): unicode_string = unichr(3456) + 'abcd' + unichr(3421) r['unicode-string'] = unicode_string cached_val = r['unicode-string'] assert isinstance(cached_val, unicode) assert unicode_string == cached_val - def test_list_encoding(self, request): - r = _init_client(request, cls=RedisCluster, decode_responses=True) + @pytest.mark.skip(reason="Cluster specific override") + def test_memoryview_encoding(self, r_no_decode): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + unicode_string_view = memoryview(unicode_string.encode('utf-8')) + r_no_decode['unicode-string-memoryview'] = unicode_string_view + cached_val = r_no_decode['unicode-string-memoryview'] + # The cached value won't be a memoryview because it's a copy from Redis + assert isinstance(cached_val, bytes) + assert unicode_string == cached_val.decode('utf-8') + + @pytest.mark.skip(reason="Cluster specific override") + def test_memoryview_encoding_and_decoding(self, r): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + unicode_string_view = memoryview(unicode_string.encode('utf-8')) + r['unicode-string-memoryview'] = unicode_string_view + cached_val = r['unicode-string-memoryview'] + assert isinstance(cached_val, unicode) + assert unicode_string == cached_val + + @pytest.mark.skip(reason="Cluster specific override") + def test_list_encoding(self, r): unicode_string = unichr(3456) + 'abcd' + unichr(3421) result = [unicode_string, unicode_string, unicode_string] r.rpush('a', *result) @@ -29,17 +64,33 @@ def test_list_encoding(self, request): class TestEncodingErrors(object): + @pytest.mark.skip(reason="Cluster specific override") def test_ignore(self, request): - r = _init_client(request, cls=RedisCluster, decode_responses=True, encoding_errors='ignore') + r = _get_client(redis.Redis, request=request, decode_responses=True, + encoding_errors='ignore') r.set('a', b'foo\xff') assert r.get('a') == 'foo' + @pytest.mark.skip(reason="Cluster specific override") def test_replace(self, request): - r = _init_client(request, cls=RedisCluster, decode_responses=True, encoding_errors='replace') + r = _get_client(redis.Redis, request=request, decode_responses=True, + encoding_errors='replace') r.set('a', b'foo\xff') assert r.get('a') == 'foo\ufffd' +class TestMemoryviewsAreNotPacked(object): + def test_memoryviews_are_not_packed(self): + c = Connection() + arg = memoryview(b'some_arg') + arg_list = ['SOME_COMMAND', arg] + cmd = c.pack_command(*arg_list) + assert cmd[1] is arg + cmds = c.pack_commands([arg_list, arg_list]) + assert cmds[1] is arg + assert cmds[3] is arg + + class TestCommandsAreNotEncoded(object): @pytest.fixture() def r(self, request): diff --git a/tests/test_encoding_cluster.py b/tests/test_encoding_cluster.py new file mode 100644 index 00000000..089cb75b --- /dev/null +++ b/tests/test_encoding_cluster.py @@ -0,0 +1,79 @@ +from __future__ import unicode_literals +import pytest +import redis + +from rediscluster import RedisCluster + +from redis._compat import unichr, unicode +from .conftest import _get_client, _init_client + + +class TestEncodingCluster(object): + """ + We must import the entire class due to the seperate fixture that uses RedisCluster as client + class instead of the normal Redis instance. + + FIXME: If possible, monkeypatching TestEncoding class would be preffered but kinda impossible in reality + """ + @pytest.fixture() + def r(self, request): + return _get_client(RedisCluster, request=request, decode_responses=True) + + @pytest.fixture() + def r_no_decode(self, request): + return _get_client( + RedisCluster, + request=request, + decode_responses=False, + ) + + def test_simple_encoding(self, r_no_decode): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + r_no_decode['unicode-string'] = unicode_string.encode('utf-8') + cached_val = r_no_decode['unicode-string'] + assert isinstance(cached_val, bytes) + assert unicode_string == cached_val.decode('utf-8') + + def test_simple_encoding_and_decoding(self, r): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + r['unicode-string'] = unicode_string + cached_val = r['unicode-string'] + assert isinstance(cached_val, unicode) + assert unicode_string == cached_val + + def test_memoryview_encoding(self, r_no_decode): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + unicode_string_view = memoryview(unicode_string.encode('utf-8')) + r_no_decode['unicode-string-memoryview'] = unicode_string_view + cached_val = r_no_decode['unicode-string-memoryview'] + # The cached value won't be a memoryview because it's a copy from Redis + assert isinstance(cached_val, bytes) + assert unicode_string == cached_val.decode('utf-8') + + def test_memoryview_encoding_and_decoding(self, r): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + unicode_string_view = memoryview(unicode_string.encode('utf-8')) + r['unicode-string-memoryview'] = unicode_string_view + cached_val = r['unicode-string-memoryview'] + assert isinstance(cached_val, unicode) + assert unicode_string == cached_val + + def test_list_encoding(self, r): + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + result = [unicode_string, unicode_string, unicode_string] + r.rpush('a', *result) + assert r.lrange('a', 0, -1) == result + + +class TestEncodingErrors(object): + def test_ignore(self, request): + r = _get_client(RedisCluster, request=request, decode_responses=True, + encoding_errors='ignore') + r.set('a', b'foo\xff') + assert r.get('a') == 'foo' + + def test_replace(self, request): + r = _get_client(RedisCluster, request=request, decode_responses=True, + encoding_errors='replace') + r.set('a', b'foo\xff') + assert r.get('a') == 'foo\ufffd' From 2b8df1d024c6a42049e9241432f54bc0ede7743e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 May 2020 22:01:08 +0200 Subject: [PATCH 188/263] Update test_lock.py to latest testing code from redis-py --- tests/test_lock.py | 51 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/tests/test_lock.py b/tests/test_lock.py index 6a7f794b..82ec43a2 100644 --- a/tests/test_lock.py +++ b/tests/test_lock.py @@ -6,13 +6,16 @@ from redis.exceptions import LockError, LockNotOwnedError from redis.client import Redis from redis.lock import Lock -from .conftest import _get_client, _init_client +from .conftest import _get_client class TestLock(object): @pytest.fixture() def r_decoded(self, request): - return _get_client(Redis, request=request, decode_responses=True) + """ + Helper function modified for RedisCluster usage to make tests work + """ + return _get_client(RedisCluster, request=request, decode_responses=True) def get_lock(self, redis, *args, **kwargs): kwargs['lock_class'] = Lock @@ -28,6 +31,13 @@ def test_lock(self, r): def test_lock_token(self, r): lock = self.get_lock(r, 'foo') + self._test_lock_token(r, lock) + + def test_lock_token_thread_local_false(self, r): + lock = self.get_lock(r, 'foo', thread_local=False) + self._test_lock_token(r, lock) + + def _test_lock_token(self, r, lock): assert lock.acquire(blocking=False, token='test') assert r.get('foo') == b'test' assert lock.local.token == b'test' @@ -62,12 +72,10 @@ def _test_owned(self, client): assert lock.owned() is False assert lock2.owned() is False - def test_owned(self, request): - r = _init_client(request, cls=RedisCluster, decode_responses=False) + def test_owned(self, r): self._test_owned(r) - def test_owned_with_decoded_responses(self, request): - r_decoded = _init_client(request, cls=RedisCluster, decode_responses=True) + def test_owned_with_decoded_responses(self, r_decoded): self._test_owned(r_decoded) def test_competing_locks(self, r): @@ -95,10 +103,13 @@ def test_float_timeout(self, r): def test_blocking_timeout(self, r): lock1 = self.get_lock(r, 'foo') assert lock1.acquire(blocking=False) - lock2 = self.get_lock(r, 'foo', blocking_timeout=0.2) + bt = 0.2 + sleep = 0.05 + lock2 = self.get_lock(r, 'foo', sleep=sleep, blocking_timeout=bt) start = time.time() assert not lock2.acquire() - assert (time.time() - start) > 0.2 + # The elapsed duration should be less than the total blocking_timeout + assert bt > (time.time() - start) > bt - sleep lock1.release() def test_context_manager(self, r): @@ -114,10 +125,18 @@ def test_context_manager_raises_when_locked_not_acquired(self, r): with self.get_lock(r, 'foo', blocking_timeout=0.1): pass - def test_high_sleep_raises_error(self, r): - "If sleep is higher than timeout, it should raise an error" - with pytest.raises(LockError): - self.get_lock(r, 'foo', timeout=1, sleep=2) + def test_high_sleep_small_blocking_timeout(self, r): + lock1 = self.get_lock(r, 'foo') + assert lock1.acquire(blocking=False) + sleep = 60 + bt = 1 + lock2 = self.get_lock(r, 'foo', sleep=sleep, blocking_timeout=bt) + start = time.time() + assert not lock2.acquire() + # the elapsed timed is less than the blocking_timeout as the lock is + # unattainable given the sleep/blocking_timeout configuration + assert bt > (time.time() - start) + lock1.release() def test_releasing_unlocked_lock_raises_error(self, r): lock = self.get_lock(r, 'foo') @@ -142,6 +161,14 @@ def test_extend_lock(self, r): assert 16000 < r.pttl('foo') <= 20000 lock.release() + def test_extend_lock_replace_ttl(self, r): + lock = self.get_lock(r, 'foo', timeout=10) + assert lock.acquire(blocking=False) + assert 8000 < r.pttl('foo') <= 10000 + assert lock.extend(10, replace_ttl=True) + assert 8000 < r.pttl('foo') <= 10000 + lock.release() + def test_extend_lock_float(self, r): lock = self.get_lock(r, 'foo', timeout=10.0) assert lock.acquire(blocking=False) From 920f849d773c1313526e04b8650a729cba07cac6 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 May 2020 22:03:21 +0200 Subject: [PATCH 189/263] ADd redis server version 6.0 to the travis test suite --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 2fcfcc51..d8c15210 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,6 +16,7 @@ install: - "if [[ $REDIS_VERSION == '3.2' ]]; then REDIS_VERSION=3.2 make redis-install; fi" - "if [[ $REDIS_VERSION == '4.0' ]]; then REDIS_VERSION=4.0 make redis-install; fi" - "if [[ $REDIS_VERSION == '5.0' ]]; then REDIS_VERSION=5.0 make redis-install; fi" + - "if [[ $REDIS_VERSION == '6.0' ]]; then REDIS_VERSION=6.0 make redis-install; fi" - "if [[ $TEST_PYCODESTYLE == '1' ]]; then pip install pycodestyle; fi" - pip install -r dev-requirements.txt - pip install -e . @@ -35,6 +36,9 @@ env: # Redis 5.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=5.0 - HIREDIS=1 REDIS_VERSION=5.0 + # Redis 6.0 & HIREDIS + - HIREDIS=0 REDIS_VERSION=6.0 + - HIREDIS=1 REDIS_VERSION=6.0 script: - make start - coverage erase From d35bfca8ab6cfe0d6fdc97af4814a5157c4aff19 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 May 2020 23:00:53 +0200 Subject: [PATCH 190/263] Fix the version limiting for hash and bitfield commands for commands that is broken in redis 3.2 or earlier versions --- tests/test_commands.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 6b47f6c1..e22d0f64 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1163,7 +1163,7 @@ def test_sscan_iter(self, r): members = list(r.sscan_iter('a', match=b'1')) assert set(members) == {b'1'} - @skip_if_server_version_lt('2.8.0') + @skip_if_server_version_lt('4.0.0') def test_hscan(self, r): r.hset('a', mapping={'a': 1, 'b': 2, 'c': 3}) cursor, dic = r.hscan('a') @@ -1172,7 +1172,7 @@ def test_hscan(self, r): _, dic = r.hscan('a', match='a') assert dic == {b'a': b'1'} - @skip_if_server_version_lt('2.8.0') + @skip_if_server_version_lt('4.0.0') def test_hscan_iter(self, r): r.hset('a', mapping={'a': 1, 'b': 2, 'c': 3}) dic = dict(r.hscan_iter('a')) @@ -1664,6 +1664,7 @@ def test_pfmerge(self, r): assert r.pfcount('d') == 7 # HASH COMMANDS + @skip_if_server_version_lt('4.0.0') def test_hget_and_hset(self, r): r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) assert r.hget('a', '1') == b'1' @@ -1685,6 +1686,7 @@ def test_hget_and_hset(self, r): assert r.hset('a', 0, 10) == 1 assert r.hset('a', '', 10) == 1 + @skip_if_server_version_lt('4.0.0') def test_hset_with_multi_key_values(self, r): r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) assert r.hget('a', '1') == b'1' @@ -1700,6 +1702,7 @@ def test_hset_without_data(self, r): with pytest.raises(exceptions.DataError): r.hset("x") + @skip_if_server_version_lt('4.0.0') def test_hdel(self, r): r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) assert r.hdel('a', '2') == 1 @@ -1707,11 +1710,13 @@ def test_hdel(self, r): assert r.hdel('a', '1', '3') == 2 assert r.hlen('a') == 0 + @skip_if_server_version_lt('4.0.0') def test_hexists(self, r): r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) assert r.hexists('a', '1') assert not r.hexists('a', '4') + @skip_if_server_version_lt('4.0.0') def test_hgetall(self, r): h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hset('a', mapping=h) @@ -1728,6 +1733,7 @@ def test_hincrbyfloat(self, r): assert r.hincrbyfloat('a', '1') == 2.0 assert r.hincrbyfloat('a', '1', 1.2) == 3.2 + @skip_if_server_version_lt('4.0.0') def test_hkeys(self, r): h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hset('a', mapping=h) @@ -1735,10 +1741,12 @@ def test_hkeys(self, r): remote_keys = r.hkeys('a') assert (sorted(local_keys) == sorted(remote_keys)) + @skip_if_server_version_lt('4.0.0') def test_hlen(self, r): r.hset('a', mapping={'1': 1, '2': 2, '3': 3}) assert r.hlen('a') == 3 + @skip_if_server_version_lt('4.0.0') def test_hmget(self, r): assert r.hset('a', mapping={'a': 1, 'b': 2, 'c': 3}) assert r.hmget('a', 'a', 'b', 'c') == [b'1', b'2', b'3'] @@ -1759,6 +1767,7 @@ def test_hsetnx(self, r): assert not r.hsetnx('a', '1', 2) assert r.hget('a', '1') == b'1' + @skip_if_server_version_lt('4.0.0') def test_hvals(self, r): h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hset('a', mapping=h) @@ -2677,6 +2686,7 @@ def test_xtrim(self, r): # 1 message is trimmed assert r.xtrim(stream, 3, approximate=False) == 1 + @skip_if_server_version_lt('3.2.0') def test_bitfield_operations(self, r): # comments show affected bits bf = r.bitfield('a') From 17d93a69b535111aea47df76de40fd39a69aab9f Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 May 2020 23:13:22 +0200 Subject: [PATCH 191/263] Fix test_hstrlen to work on correct version --- tests/test_commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index e22d0f64..b9c2b934 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1775,7 +1775,7 @@ def test_hvals(self, r): remote_vals = r.hvals('a') assert sorted(local_vals) == sorted(remote_vals) - @skip_if_server_version_lt('3.2.0') + @skip_if_server_version_lt('4.0.0') def test_hstrlen(self, r): r.hset('a', mapping={'1': '22', '2': '333'}) assert r.hstrlen('a', '1') == 2 From 831ba974c6a63145789f877c7d8aa8c329a15b91 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 May 2020 23:30:43 +0200 Subject: [PATCH 192/263] Hardcoded block for python 3.4.0 is no longer needed in the code as the setup.py script will deal with that for us now when py3.4 is no longer supported to be installed on the system --- rediscluster/__init__.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 72798f6e..d43c2f2a 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -26,6 +26,3 @@ def int_or_str(value): __version__ = '2.1.0' VERSION = tuple(map(int_or_str, __version__.split('.'))) - -if sys.version_info[0:3] == (3, 4, 0): - raise RuntimeError("CRITICAL: rediscluster do not work with python 3.4.0. Please use 3.4.1 or higher.") From d5f82d37fc99b39a9f47e399e7155ebe1877e893 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 May 2020 23:38:42 +0200 Subject: [PATCH 193/263] Parsing redis url from a unix socket redis url is not supported in a cluster env as it throws error due to not having a startup nodes solution in place --- tests/test_cluster_connection_pool.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index db79f1ec..1840dbee 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -12,7 +12,7 @@ ClusterConnectionPool, ClusterBlockingConnectionPool, ClusterReadOnlyConnectionPool, ClusterConnection, UnixDomainSocketConnection) from rediscluster.exceptions import RedisClusterException -from tests.conftest import skip_if_server_version_lt +from .conftest import (skip_if_server_version_lt, skip_for_no_cluster_impl) # 3rd party imports import pytest @@ -724,6 +724,12 @@ def test_client_creates_connection_pool(self): class TestConnectionPoolUnixSocketURLParsing(object): + """ + Unix sockets do not work with redis-cluster as it do not really provide a startup nodes + that can be used by the client for cluster discovery. + """ + + @skip_for_no_cluster_impl() def test_defaults(self): pool = redis.ConnectionPool.from_url('unix:///socket') assert pool.connection_class == redis.UnixDomainSocketConnection @@ -734,6 +740,7 @@ def test_defaults(self): 'password': None, } + @skip_for_no_cluster_impl() def test_password(self): pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket') assert pool.connection_class == redis.UnixDomainSocketConnection @@ -744,6 +751,7 @@ def test_password(self): 'password': 'mypassword', } + @skip_for_no_cluster_impl() def test_db_as_argument(self): pool = redis.ConnectionPool.from_url('unix:///socket', db=1) assert pool.connection_class == redis.UnixDomainSocketConnection @@ -754,6 +762,7 @@ def test_db_as_argument(self): 'password': None, } + @skip_for_no_cluster_impl() def test_db_in_querystring(self): pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1) assert pool.connection_class == redis.UnixDomainSocketConnection @@ -764,6 +773,7 @@ def test_db_in_querystring(self): 'password': None, } + @skip_for_no_cluster_impl() def test_extra_querystring_options(self): pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2') assert pool.connection_class == redis.UnixDomainSocketConnection From 5fc0cc68247fd6745d6d745ee47ef858d4db605e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 15 May 2020 23:44:02 +0200 Subject: [PATCH 194/263] Restructure the __init__.py file to be more similar to redis-py where it enables easier importing of commonly used resources by removing the need to import subfiles and enable easier import like "from rediscluster import *" to get the most commonly used features. Removed the old monkeypatching where rediscluster objects was patches into the redis import name. This will be something breaking if someone is using this feature in their own code. --- rediscluster/__init__.py | 49 ++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index d43c2f2a..2f25c02b 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -3,19 +3,26 @@ # python std lib import sys -# Import shortcut -from .client import RedisCluster -from .pipeline import ClusterPipeline -from .pubsub import ClusterPubSub +# rediscluster imports +from rediscluster.client import RedisCluster +from rediscluster.connection import ( + ClusterBlockingConnectionPool, + ClusterConnection, + ClusterConnectionPool, +) +from rediscluster.exceptions import ( + RedisClusterException, + RedisClusterError, + ClusterDownException, + ClusterError, + ClusterCrossSlotError, + ClusterDownError, + AskError, + TryAgainError, + MovedError, + MasterDownError, +) -# Monkey patch RedisCluster class into redis for easy access -import redis -setattr(redis, "RedisCluster", RedisCluster) -setattr(redis, "ClusterPubSub", ClusterPubSub) -setattr(redis, "ClusterPipeline", ClusterPipeline) - -# Major, Minor, Fix version -__version__ = (2, 1, 0) def int_or_str(value): try: @@ -24,5 +31,23 @@ def int_or_str(value): return value +# Major, Minor, Fix version __version__ = '2.1.0' VERSION = tuple(map(int_or_str, __version__.split('.'))) + +__all__ = [ + AskError, + ClusterBlockingConnectionPool, + ClusterConnection, + ClusterConnectionPool, + ClusterCrossSlotError, + ClusterDownError, + ClusterDownException, + ClusterError, + MasterDownError, + MovedError, + RedisCluster, + RedisClusterError, + RedisClusterException, + TryAgainError, +] From 1499b620c28af94e618af8e1d0695380906e36ca Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 11:45:21 +0100 Subject: [PATCH 195/263] Add new text block about testingstrategy and how to write and sync tests from upstream redis-py package going forward. --- CONTRIBUTING.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dc4b6488..99360c2c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -60,3 +60,14 @@ All new features must implement tests to show that it works as intended. All implemented tests must pass on all supported python versions. List of supported versions can be found in `README.md`. All tests should be assumed to work against the test environment that is implemented when running in `travis-ci`. Currently that means 6 nodes in the cluster, 3 masters, 3 slaves, using port `7000-7005` and the node on port `7000` must be accessible on `127.0.0.1` + + +## Testing strategy and how to implement cluster specific tests + +A new way of having the old upstream tests from redis-py combined with the cluster specific and unique tests that is needed to validate cluster functionality. This has been designed to improve the speed of which tests is updated from uptream as new redis-py releases is made and to make it easier to port them into the cluster variant. + +How do you implement a test for this code? + +The simplest case, this is a new cluster only/specific test that has nothing to do with the upstream redis-py package. If the test is related or could be classified to be added to one of the already existing test files that is mirrored from redis-py, then you should put this new test in the `..._cluster.py` version of the same file. + +If you need to make some kind of cluster unique adjustment to a test mirrorer from redis-py upstream, then do the following. In the mirrored file, for example `test_commands.py` you add the following decorator `@skip_for_no_cluster_impl()` to the method you want to modify. Then you copy the entire method and add it to the same class/method structure but inside the cluster specific version of the test file. In this example you would put it in `test_commands_cluster.py`. Copy the entire test method and keep it as similar as possible to make it easier to update in the future in-case there is changes in upstream redis-py tests. From 45548ec745e175fb6332e5a8d40d8c7ef06164dd Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 11:50:11 +0100 Subject: [PATCH 196/263] Port in an updated wait for command helper method --- tests/conftest.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index f2c8f3a7..9eaa3db4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -269,3 +269,22 @@ def skip_unless_arch_bits(arch_bits): REDIS_INFO["arch_bits"] != arch_bits, reason="server is not {}-bit".format(arch_bits), ) + + +def wait_for_command(client, monitor, command): + # issue a command with a key name that's local to this process. + # if we find a command with our key before the command we're waiting + # for, something went wrong + redis_version = REDIS_INFO["version"] + if StrictVersion(redis_version) >= StrictVersion('5.0.0'): + id_str = str(client.client_id()) + else: + id_str = '%08x' % random.randrange(2**32) + key = '__REDIS-PY-%s__' % id_str + client.get(key) + while True: + monitor_response = monitor.next_command() + if command in monitor_response['command']: + return monitor_response + if key in monitor_response['command']: + return None From c7842009fc0c6e0716074bbfb6a0adc81af8a7d3 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 11:52:38 +0100 Subject: [PATCH 197/263] Minor cleanup to test_monitor.py to sync with upstream properly --- tests/test_monitor.py | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/tests/test_monitor.py b/tests/test_monitor.py index 42ed702b..4fc11c0c 100644 --- a/tests/test_monitor.py +++ b/tests/test_monitor.py @@ -1,26 +1,12 @@ from __future__ import unicode_literals from redis._compat import unicode -from .conftest import skip_if_server_version_lt +from .conftest import skip_if_server_version_lt, wait_for_command # 3rd party imports import pytest -def wait_for_command(client, monitor, command): - # issue a command with a key name that's local to this process. - # if we find a command with our key before the command we're waiting - # for, something went wrong - key = '__REDIS-PY-%s__' % str(client.client_id()) - client.get(key) - while True: - monitor_response = monitor.next_command() - if command in monitor_response['command']: - return monitor_response - if key in monitor_response['command']: - return None - - -class TestPipeline(object): +class TestMonitor(object): @skip_if_server_version_lt('5.0.0') @pytest.mark.xfail(reason="Monitor feature not yet implemented") def test_wait_command_not_found(self, r): From f66356fa6a44a469751e4eaea525d30e4c1128fc Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 11:57:48 +0100 Subject: [PATCH 198/263] Update test_multiprocessing.py to be correct from upstream. Fork off all cluster unique tests into test_multiprocessing_cluster.py instead. --- tests/test_multiprocessing.py | 26 +++-- tests/test_multiprocessing_cluster.py | 155 ++++++++++++++++++++++++++ 2 files changed, 170 insertions(+), 11 deletions(-) create mode 100644 tests/test_multiprocessing_cluster.py diff --git a/tests/test_multiprocessing.py b/tests/test_multiprocessing.py index 6faa7eb6..0d83f69d 100644 --- a/tests/test_multiprocessing.py +++ b/tests/test_multiprocessing.py @@ -3,7 +3,7 @@ import contextlib import redis -from rediscluster.connection import ClusterConnection, ClusterConnectionPool +from redis.connection import Connection, ConnectionPool from redis.exceptions import ConnectionError from .conftest import _get_client @@ -22,7 +22,7 @@ class TestMultiprocessing(object): # See issue #1085 for details. # use a multi-connection client as that's the only type that is - # actuall fork/process-safe + # actually fork/process-safe @pytest.fixture() def r(self, request): return _get_client( @@ -30,12 +30,13 @@ def r(self, request): request=request, single_connection_client=False) + @pytest.mark.skip(reason="Cluster specific override") def test_close_connection_in_child(self): """ A connection owned by a parent and closed by a child doesn't destroy the file descriptors so a parent can still use it. """ - conn = ClusterConnection(port=7000) + conn = Connection() conn.send_command('ping') assert conn.read_response() == b'PONG' @@ -56,12 +57,13 @@ def target(conn): conn.send_command('ping') assert conn.read_response() == b'PONG' + @pytest.mark.skip(reason="Cluster specific override") def test_close_connection_in_parent(self): """ A connection owned by a parent is unusable by a child if the parent (the owning process) closes the connection. """ - conn = ClusterConnection(port=7000) + conn = Connection() conn.send_command('ping') assert conn.read_response() == b'PONG' @@ -84,15 +86,16 @@ def target(conn, ev): assert proc.exitcode == 0 @pytest.mark.parametrize('max_connections', [1, 2, None]) + @pytest.mark.skip(reason="Cluster specific override") def test_pool(self, max_connections): """ A child will create its own connections when using a pool created by a parent. """ - pool = ClusterConnectionPool.from_url('redis://localhost:7000', + pool = ConnectionPool.from_url('redis://localhost', max_connections=max_connections) - conn = pool.get_random_connection() + conn = pool.get_connection('ping') main_conn_pid = conn.pid with exit_callback(pool.release, conn): conn.send_command('ping') @@ -100,7 +103,7 @@ def test_pool(self, max_connections): def target(pool): with exit_callback(pool.disconnect): - conn = pool.get_random_connection() + conn = pool.get_connection('ping') assert conn.pid != main_conn_pid with exit_callback(pool.release, conn): assert conn.send_command('ping') is None @@ -113,26 +116,27 @@ def target(pool): # Check that connection is still alive after fork process has exited # and disconnected the connections in its pool - conn = pool.get_random_connection() + conn = pool.get_connection('ping') with exit_callback(pool.release, conn): assert conn.send_command('ping') is None assert conn.read_response() == b'PONG' @pytest.mark.parametrize('max_connections', [1, 2, None]) + @pytest.mark.skip(reason="Cluster specific override") def test_close_pool_in_main(self, max_connections): """ A child process that uses the same pool as its parent isn't affected when the parent disconnects all connections within the pool. """ - pool = ClusterConnectionPool.from_url('redis://localhost:7000', + pool = ConnectionPool.from_url('redis://localhost', max_connections=max_connections) - conn = pool.get_random_connection() + conn = pool.get_connection('ping') assert conn.send_command('ping') is None assert conn.read_response() == b'PONG' def target(pool, disconnect_event): - conn = pool.get_random_connection() + conn = pool.get_connection('ping') with exit_callback(pool.release, conn): assert conn.send_command('ping') is None assert conn.read_response() == b'PONG' diff --git a/tests/test_multiprocessing_cluster.py b/tests/test_multiprocessing_cluster.py new file mode 100644 index 00000000..3354a3b6 --- /dev/null +++ b/tests/test_multiprocessing_cluster.py @@ -0,0 +1,155 @@ +import pytest +import multiprocessing +import contextlib + +import rediscluster +from rediscluster.connection import ClusterConnection, ClusterConnectionPool +from redis.exceptions import ConnectionError + +from .conftest import _get_client + + +@contextlib.contextmanager +def exit_callback(callback, *args): + try: + yield + finally: + callback(*args) + + +class TestMultiprocessing(object): + """ + Cluster: tests must use the cluster specific connection class and client class + to make tests valid for a cluster case. + """ + # Test connection sharing between forks. + # See issue #1085 for details. + + # use a multi-connection client as that's the only type that is + # actuall fork/process-safe + @pytest.fixture() + def r(self, request): + return _get_client( + rediscluster.RedisCluster, + request=request, + single_connection_client=False) + + def test_close_connection_in_child(self): + """ + A connection owned by a parent and closed by a child doesn't + destroy the file descriptors so a parent can still use it. + """ + conn = ClusterConnection(port=7000) + conn.send_command('ping') + assert conn.read_response() == b'PONG' + + def target(conn): + conn.send_command('ping') + assert conn.read_response() == b'PONG' + conn.disconnect() + + proc = multiprocessing.Process(target=target, args=(conn,)) + proc.start() + proc.join(3) + assert proc.exitcode == 0 + + # The connection was created in the parent but disconnected in the + # child. The child called socket.close() but did not call + # socket.shutdown() because it wasn't the "owning" process. + # Therefore the connection still works in the parent. + conn.send_command('ping') + assert conn.read_response() == b'PONG' + + def test_close_connection_in_parent(self): + """ + A connection owned by a parent is unusable by a child if the parent + (the owning process) closes the connection. + """ + conn = ClusterConnection(port=7000) + conn.send_command('ping') + assert conn.read_response() == b'PONG' + + def target(conn, ev): + ev.wait() + # the parent closed the connection. because it also created the + # connection, the connection is shutdown and the child + # cannot use it. + with pytest.raises(ConnectionError): + conn.send_command('ping') + + ev = multiprocessing.Event() + proc = multiprocessing.Process(target=target, args=(conn, ev)) + proc.start() + + conn.disconnect() + ev.set() + + proc.join(3) + assert proc.exitcode == 0 + + @pytest.mark.parametrize('max_connections', [1, 2, None]) + def test_pool(self, max_connections): + """ + A child will create its own connections when using a pool created + by a parent. + """ + pool = ClusterConnectionPool.from_url('redis://localhost:7000', + max_connections=max_connections) + + conn = pool.get_random_connection() + main_conn_pid = conn.pid + with exit_callback(pool.release, conn): + conn.send_command('ping') + assert conn.read_response() == b'PONG' + + def target(pool): + with exit_callback(pool.disconnect): + conn = pool.get_random_connection() + assert conn.pid != main_conn_pid + with exit_callback(pool.release, conn): + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + + proc = multiprocessing.Process(target=target, args=(pool,)) + proc.start() + proc.join(3) + assert proc.exitcode == 0 + + # Check that connection is still alive after fork process has exited + # and disconnected the connections in its pool + conn = pool.get_random_connection() + with exit_callback(pool.release, conn): + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + + @pytest.mark.parametrize('max_connections', [1, 2, None]) + def test_close_pool_in_main(self, max_connections): + """ + A child process that uses the same pool as its parent isn't affected + when the parent disconnects all connections within the pool. + """ + pool = ClusterConnectionPool.from_url('redis://localhost:7000', + max_connections=max_connections) + + conn = pool.get_random_connection() + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + + def target(pool, disconnect_event): + conn = pool.get_random_connection() + with exit_callback(pool.release, conn): + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + disconnect_event.wait() + assert conn.send_command('ping') is None + assert conn.read_response() == b'PONG' + + ev = multiprocessing.Event() + + proc = multiprocessing.Process(target=target, args=(pool, ev)) + proc.start() + + pool.disconnect() + ev.set() + proc.join(3) + assert proc.exitcode == 0 From b0b39affdcecb7c12b0b6f7b9d9343307a1de4f8 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 12:01:31 +0100 Subject: [PATCH 199/263] Add block about what to do when a unsupported feature should be blocked off in tests --- CONTRIBUTING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 99360c2c..7ed77e11 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -71,3 +71,5 @@ How do you implement a test for this code? The simplest case, this is a new cluster only/specific test that has nothing to do with the upstream redis-py package. If the test is related or could be classified to be added to one of the already existing test files that is mirrored from redis-py, then you should put this new test in the `..._cluster.py` version of the same file. If you need to make some kind of cluster unique adjustment to a test mirrorer from redis-py upstream, then do the following. In the mirrored file, for example `test_commands.py` you add the following decorator `@skip_for_no_cluster_impl()` to the method you want to modify. Then you copy the entire method and add it to the same class/method structure but inside the cluster specific version of the test file. In this example you would put it in `test_commands_cluster.py`. Copy the entire test method and keep it as similar as possible to make it easier to update in the future in-case there is changes in upstream redis-py tests. + +In the case where some command or feature is not supported natively or is decided not to be supported by this library, you should block any tests from upstream redis-py package that deals with that feature with the following decorator `@skip_for_no_cluster_impl()`. This will mark it to not be run during tests. This is also a good indicator for users of this library what features is not supported or there is not really a good cluster implementation for. From 7eacb85b5c416231acb04ec3554527a1927ce8b3 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 12:16:48 +0100 Subject: [PATCH 200/263] Update test_pipeline.py to upstream redis-py version. Moved all cluster unique tests and overwrites into test_pipeline_cluster.py --- tests/test_pipeline.py | 469 +++++++-------------------------- tests/test_pipeline_cluster.py | 346 ++++++++++++++++++++++++ 2 files changed, 445 insertions(+), 370 deletions(-) create mode 100644 tests/test_pipeline_cluster.py diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index a85ee459..7d7e27a2 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -1,25 +1,12 @@ -# -*- coding: utf-8 -*- - -# python std lib from __future__ import unicode_literals -import re - -# rediscluster imports -from rediscluster.client import RedisCluster -from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool -from rediscluster.exceptions import RedisClusterException -from tests.conftest import _get_client, skip_if_server_version_lt - -# 3rd party imports import pytest -from mock import patch + +import redis from redis._compat import unichr, unicode -from redis.exceptions import WatchError, ResponseError, ConnectionError +from .conftest import wait_for_command, skip_if_server_version_lt, skip_for_no_cluster_impl class TestPipeline(object): - """ - """ def test_pipeline_is_true(self, r): "Ensure pipeline instances are not false-y" with r.pipeline() as pipe: @@ -33,14 +20,25 @@ def test_pipeline(self, r): .zadd('z', {'z2': 4}) .zincrby('z', 1, 'z1') .zrange('z', 0, 5, withscores=True)) - assert pipe.execute() == [ - True, - b'a1', - True, - True, - 2.0, - [(b'z1', 2.0), (b'z2', 4)], - ] + assert pipe.execute() == \ + [ + True, + b'a1', + True, + True, + 2.0, + [(b'z1', 2.0), (b'z2', 4)], + ] + + def test_pipeline_memoryview(self, r): + with r.pipeline() as pipe: + (pipe.set('a', memoryview(b'a1')) + .get('a')) + assert pipe.execute() == \ + [ + True, + b'a1', + ] def test_pipeline_length(self, r): with r.pipeline() as pipe: @@ -63,16 +61,7 @@ def test_pipeline_no_transaction(self, r): assert r['b'] == b'b1' assert r['c'] == b'c1' - def test_pipeline_eval(self, r): - with r.pipeline(transaction=False) as pipe: - pipe.eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", 2, "A{foo}", "B{foo}", "first", "second") - res = pipe.execute()[0] - assert res[0] == b'A{foo}' - assert res[1] == b'B{foo}' - assert res[2] == b'first' - assert res[3] == b'second' - - @pytest.mark.xfail(reason="unsupported command: watch") + @skip_for_no_cluster_impl() def test_pipeline_no_transaction_watch(self, r): r['a'] = 0 @@ -84,7 +73,7 @@ def test_pipeline_no_transaction_watch(self, r): pipe.set('a', int(a) + 1) assert pipe.execute() == [True] - @pytest.mark.xfail(reason="unsupported command: watch") + @skip_for_no_cluster_impl() def test_pipeline_no_transaction_watch_failure(self, r): r['a'] = 0 @@ -97,7 +86,7 @@ def test_pipeline_no_transaction_watch_failure(self, r): pipe.multi() pipe.set('a', int(a) + 1) - with pytest.raises(WatchError): + with pytest.raises(redis.WatchError): pipe.execute() assert r['a'] == b'bad' @@ -119,7 +108,7 @@ def test_exec_error_in_response(self, r): # we can't lpush to a key that's a string value, so this should # be a ResponseError exception - assert isinstance(result[2], ResponseError) + assert isinstance(result[2], redis.ResponseError) assert r['c'] == b'a' # since this isn't a transaction, the other commands after the @@ -135,7 +124,7 @@ def test_exec_error_raised(self, r): r['c'] = 'a' with r.pipeline() as pipe: pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) - with pytest.raises(ResponseError) as ex: + with pytest.raises(redis.ResponseError) as ex: pipe.execute() assert unicode(ex.value).startswith('Command # 3 (LPUSH c 3) of ' 'pipeline caused error: ') @@ -144,6 +133,7 @@ def test_exec_error_raised(self, r): assert pipe.set('z', 'zzz').execute() == [True] assert r['z'] == b'zzz' + @skip_for_no_cluster_impl() def test_transaction_with_empty_error_command(self, r): """ Commands with custom EMPTY_ERROR functionality return their default @@ -151,13 +141,14 @@ def test_transaction_with_empty_error_command(self, r): """ for error_switch in (True, False): with r.pipeline() as pipe: - pipe.set('a', 1).get("").set('c', 3) + pipe.set('a', 1).mget([]).set('c', 3) result = pipe.execute(raise_on_error=error_switch) assert result[0] - assert result[1] == None + assert result[1] == [] assert result[2] + @skip_for_no_cluster_impl() def test_pipeline_with_empty_error_command(self, r): """ Commands with custom EMPTY_ERROR functionality return their default @@ -165,18 +156,18 @@ def test_pipeline_with_empty_error_command(self, r): """ for error_switch in (True, False): with r.pipeline(transaction=False) as pipe: - pipe.set('a', 1).get("").set('c', 3) + pipe.set('a', 1).mget([]).set('c', 3) result = pipe.execute(raise_on_error=error_switch) assert result[0] - assert result[1] == None + assert result[1] == [] assert result[2] def test_parse_error_raised(self, r): with r.pipeline() as pipe: # the zrem is invalid because we don't pass any keys to it pipe.set('a', 1).zrem('b').set('b', 2) - with pytest.raises(ResponseError) as ex: + with pytest.raises(redis.ResponseError) as ex: pipe.execute() assert unicode(ex.value).startswith('Command # 2 (ZREM b) of ' @@ -186,7 +177,23 @@ def test_parse_error_raised(self, r): assert pipe.set('z', 'zzz').execute() == [True] assert r['z'] == b'zzz' - @pytest.mark.xfail(reason="unsupported command: watch") + @skip_for_no_cluster_impl() + def test_parse_error_raised_transaction(self, r): + with r.pipeline() as pipe: + pipe.multi() + # the zrem is invalid because we don't pass any keys to it + pipe.set('a', 1).zrem('b').set('b', 2) + with pytest.raises(redis.ResponseError) as ex: + pipe.execute() + + assert unicode(ex.value).startswith('Command # 2 (ZREM b) of ' + 'pipeline caused error: ') + + # make sure the pipe was restored to a working state + assert pipe.set('z', 'zzz').execute() == [True] + assert r['z'] == b'zzz' + + @skip_for_no_cluster_impl() def test_watch_succeed(self, r): r['a'] = 1 r['b'] = 2 @@ -204,22 +211,22 @@ def test_watch_succeed(self, r): assert pipe.execute() == [True] assert not pipe.watching - @pytest.mark.xfail(reason="unsupported command: watch") + @skip_for_no_cluster_impl() def test_watch_failure(self, r): r['a'] = 1 r['b'] = 2 with r.pipeline() as pipe: - pipe.watch('a', 'b') + pipe.watch('a', 'b')§ r['b'] = 3 pipe.multi() pipe.get('a') - with pytest.raises(WatchError): + with pytest.raises(redis.WatchError): pipe.execute() assert not pipe.watching - @pytest.mark.xfail(reason="unsupported command: watch") + @skip_for_no_cluster_impl() def test_watch_failure_in_empty_transaction(self, r): r['a'] = 1 r['b'] = 2 @@ -233,7 +240,7 @@ def test_watch_failure_in_empty_transaction(self, r): assert not pipe.watching - @pytest.mark.xfail(reason="unsupported command: watch") + @skip_for_no_cluster_impl() def test_unwatch(self, r): r['a'] = 1 r['b'] = 2 @@ -246,7 +253,43 @@ def test_unwatch(self, r): pipe.get('a') assert pipe.execute() == [b'1'] - @pytest.mark.xfail(reason="unsupported command: watch") + @skip_for_no_cluster_impl() + def test_watch_exec_no_unwatch(self, r): + r['a'] = 1 + r['b'] = 2 + + with r.monitor() as m: + with r.pipeline() as pipe: + pipe.watch('a', 'b') + assert pipe.watching + a_value = pipe.get('a') + b_value = pipe.get('b') + assert a_value == b'1' + assert b_value == b'2' + pipe.multi() + pipe.set('c', 3) + assert pipe.execute() == [True] + assert not pipe.watching + + unwatch_command = wait_for_command(r, m, 'UNWATCH') + assert unwatch_command is None, "should not send UNWATCH" + + @skip_for_no_cluster_impl() + def test_watch_reset_unwatch(self, r): + r['a'] = 1 + + with r.monitor() as m: + with r.pipeline() as pipe: + pipe.watch('a') + assert pipe.watching + pipe.reset() + assert not pipe.watching + + unwatch_command = wait_for_command(r, m, 'UNWATCH') + assert unwatch_command is not None + assert unwatch_command['command'] == 'UNWATCH' + + @skip_for_no_cluster_impl() def test_transaction_callable(self, r): r['a'] = 1 r['b'] = 2 @@ -271,7 +314,7 @@ def my_transaction(pipe): assert result == [True] assert r['c'] == b'4' - @pytest.mark.xfail(reason="unsupported command: watch") + @skip_for_no_cluster_impl() def test_transaction_callable_returns_value_from_callable(self, r): def callback(pipe): # No need to do anything here since we only want the return value @@ -286,7 +329,7 @@ def test_exec_error_in_no_transaction_pipeline(self, r): pipe.llen('a') pipe.expire('a', 100) - with pytest.raises(ResponseError) as ex: + with pytest.raises(redis.ResponseError) as ex: pipe.execute() assert unicode(ex.value).startswith('Command # 1 (LLEN a) of ' @@ -295,16 +338,17 @@ def test_exec_error_in_no_transaction_pipeline(self, r): assert r['a'] == b'1' def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): - key = unichr(3456) + u'abcd' + unichr(3421) + key = unichr(3456) + 'abcd' + unichr(3421) r[key] = 1 with r.pipeline(transaction=False) as pipe: pipe.llen(key) pipe.expire(key, 100) - with pytest.raises(ResponseError) as ex: + with pytest.raises(redis.ResponseError) as ex: pipe.execute() - expected = unicode('Command # 1 (LLEN {0}) of pipeline caused error: ').format(key) + expected = unicode('Command # 1 (LLEN %s) of pipeline caused ' + 'error: ') % key assert unicode(ex.value).startswith(expected) assert r[key] == b'1' @@ -326,318 +370,3 @@ def test_pipeline_with_bitfield(self, r): assert pipe == pipe2 assert response == [True, [0, 0, 15, 15, 14], b'1'] - - def test_blocked_methods(self, r): - """ - Currently some method calls on a Cluster pipeline - is blocked when using in cluster mode. - They maybe implemented in the future. - """ - pipe = r.pipeline(transaction=False) - with pytest.raises(RedisClusterException): - pipe.multi() - - with pytest.raises(RedisClusterException): - pipe.immediate_execute_command() - - with pytest.raises(RedisClusterException): - pipe._execute_transaction(None, None, None) - - with pytest.raises(RedisClusterException): - pipe.load_scripts() - - with pytest.raises(RedisClusterException): - pipe.watch() - - with pytest.raises(RedisClusterException): - pipe.unwatch() - - with pytest.raises(RedisClusterException): - pipe.script_load_for_pipeline(None) - - with pytest.raises(RedisClusterException): - pipe.transaction(None) - - def test_blocked_arguments(self, r): - """ - Currently some arguments is blocked when using in cluster mode. - They maybe implemented in the future. - """ - with pytest.raises(RedisClusterException) as ex: - r.pipeline(transaction=True) - - assert unicode(ex.value).startswith("transaction is deprecated in cluster mode"), True - - with pytest.raises(RedisClusterException) as ex: - r.pipeline(shard_hint=True) - - assert unicode(ex.value).startswith("shard_hint is deprecated in cluster mode"), True - - def test_redis_cluster_pipeline(self): - """ - Test that we can use a pipeline with the RedisCluster class - """ - r = _get_client(RedisCluster) - with r.pipeline(transaction=False) as pipe: - pipe.get("foobar") - - def test_mget_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.mget(['a']) - - def test_mset_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.mset({'a': 1, 'b': 2}) - - def test_rename_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.rename('a', 'b') - - def test_renamenx_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.renamenx('a', 'b') - - def test_delete_single(self, r): - r['a'] = 1 - with r.pipeline(transaction=False) as pipe: - pipe.delete('a') - assert pipe.execute(), True - - def test_multi_delete_unsupported(self, r): - with r.pipeline(transaction=False) as pipe: - r['a'] = 1 - r['b'] = 2 - with pytest.raises(RedisClusterException): - pipe.delete('a', 'b') - - def test_brpoplpush_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.brpoplpush() - - def test_rpoplpush_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.rpoplpush() - - def test_sort_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.sort() - - def test_sdiff_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.sdiff() - - def test_sdiffstore_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.sdiffstore() - - def test_sinter_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.sinter() - - def test_sinterstore_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.sinterstore() - - def test_smove_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.smove() - - def test_sunion_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.sunion() - - def test_sunionstore_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.sunionstore() - - def test_spfmerge_disabled(self, r): - with r.pipeline(transaction=False) as pipe: - with pytest.raises(RedisClusterException): - pipe.pfmerge() - - def test_multi_key_operation_with_shared_shards(self, r): - pipe = r.pipeline(transaction=False) - pipe.set('a{foo}', 1) - pipe.set('b{foo}', 2) - pipe.set('c{foo}', 3) - pipe.set('bar', 4) - pipe.set('bazz', 5) - pipe.get('a{foo}') - pipe.get('b{foo}') - pipe.get('c{foo}') - pipe.get('bar') - pipe.get('bazz') - res = pipe.execute() - assert res == [True, True, True, True, True, b'1', b'2', b'3', b'4', b'5'] - - @pytest.mark.xfail(reson="perform_execute_pipeline is not used any longer") - def test_connection_error(self, r): - test = self - test._calls = [] - - def perform_execute_pipeline(pipe): - if not test._calls: - e = ConnectionError('test') - test._calls.append({'exception': e}) - return [e] - result = pipe.execute(raise_on_error=False) - test._calls.append({'result': result}) - return result - - pipe = r.pipeline(transaction=False) - orig_perform_execute_pipeline = pipe.perform_execute_pipeline - pipe.perform_execute_pipeline = perform_execute_pipeline - - try: - pipe.set('foo', 1) - res = pipe.execute() - assert res, [True] - assert isinstance(test._calls[0]['exception'], ConnectionError) - if len(test._calls) == 2: - assert test._calls[1] == {'result': [True]} - else: - assert isinstance(test._calls[1]['result'][0], ResponseError) - assert test._calls[2] == {'result': [True]} - finally: - pipe.perform_execute_pipeline = orig_perform_execute_pipeline - del test._calls - - @pytest.mark.xfail(reson="perform_execute_pipeline is not used any longer") - def test_asking_error(self, r): - test = self - test._calls = [] - - def perform_execute_pipeline(pipe): - if not test._calls: - - e = ResponseError("ASK {0} 127.0.0.1:7003".format(r.keyslot('foo'))) - test._calls.append({'exception': e}) - return [e, e] - result = pipe.execute(raise_on_error=False) - test._calls.append({'result': result}) - return result - - pipe = r.pipeline(transaction=False) - orig_perform_execute_pipeline = pipe.perform_execute_pipeline - pipe.perform_execute_pipeline = perform_execute_pipeline - - try: - pipe.set('foo', 1) - pipe.get('foo') - res = pipe.execute() - assert res == [True, b'1'] - assert isinstance(test._calls[0]['exception'], ResponseError) - assert re.match("ASK", str(test._calls[0]['exception'])) - assert isinstance(test._calls[1]['result'][0], ResponseError) - assert re.match("MOVED", str(test._calls[1]['result'][0])) - assert test._calls[2] == {'result': [True, b'1']} - finally: - pipe.perform_execute_pipeline = orig_perform_execute_pipeline - del test._calls - - def test_empty_stack(self, r): - """ - If pipeline is executed with no commands it should - return a empty list. - """ - p = r.pipeline() - result = p.execute() - assert result == [] - - -class TestReadOnlyPipeline(object): - - def test_pipeline_readonly(self, r, ro): - """ - On readonly mode, we supports get related stuff only. - """ - r.set('foo71', 'a1') # we assume this key is set on 127.0.0.1:7001 - r.zadd('foo88', {'z1': 1}) # we assume this key is set on 127.0.0.1:7002 - r.zadd('foo88', {'z2': 4}) - - with ro.pipeline() as readonly_pipe: - readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True) - assert readonly_pipe.execute() == [ - b'a1', - [(b'z1', 1.0), (b'z2', 4)], - ] - - def assert_moved_redirection_on_slave(self, connection_pool_cls, cluster_obj): - with patch.object(connection_pool_cls, 'get_node_by_slot') as return_slave_mock: - with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: - def get_mock_node(role, port): - return { - 'name': '127.0.0.1:{0}'.format(port), - 'host': '127.0.0.1', - 'port': port, - 'server_type': role, - } - - return_slave_mock.return_value = get_mock_node('slave', 7005) - return_master_mock.return_value = get_mock_node('slave', 7001) - - with cluster_obj.pipeline() as pipe: - # we assume this key is set on 127.0.0.1:7001(7004) - pipe.get('foo87').get('foo88').execute() == [None, None] - - def test_moved_redirection_on_slave_with_default(self): - """ - On Pipeline, we redirected once and finally get from master with - readonly client when data is completely moved. - """ - self.assert_moved_redirection_on_slave( - ClusterConnectionPool, - RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) - ) - - def test_moved_redirection_on_slave_with_readonly_mode_client(self): - """ - Ditto with READONLY mode. - """ - self.assert_moved_redirection_on_slave( - ClusterReadOnlyConnectionPool, - RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) - ) - - def test_access_correct_slave_with_readonly_mode_client(self, sr): - """ - Test that the client can get value normally with readonly mode - when we connect to correct slave. - """ - - # we assume this key is set on 127.0.0.1:7001 - sr.set('foo87', 'foo') - sr.set('foo88', 'bar') - import time - time.sleep(1) - - with patch.object(ClusterReadOnlyConnectionPool, 'get_node_by_slot') as return_slave_mock: - return_slave_mock.return_value = { - 'name': '127.0.0.1:7004', - 'host': '127.0.0.1', - 'port': 7004, - 'server_type': 'slave', - } - - master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7001', 'port': 7001, 'server_type': 'master'} - with patch.object( - ClusterConnectionPool, - 'get_master_node_by_slot', - return_value=master_value) as return_master_mock: - readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) - with readonly_client.pipeline() as readonly_pipe: - assert readonly_pipe.get('foo88').get('foo87').execute() == [b'bar', b'foo'] diff --git a/tests/test_pipeline_cluster.py b/tests/test_pipeline_cluster.py new file mode 100644 index 00000000..e4a7f320 --- /dev/null +++ b/tests/test_pipeline_cluster.py @@ -0,0 +1,346 @@ +# -*- coding: utf-8 -*- + +# python std lib +from __future__ import unicode_literals +import re + +# rediscluster imports +from rediscluster.client import RedisCluster +from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool +from rediscluster.exceptions import RedisClusterException +from tests.conftest import _get_client, skip_if_server_version_lt + +# 3rd party imports +import pytest +from mock import patch +from redis._compat import unichr, unicode +from redis.exceptions import WatchError, ResponseError, ConnectionError + + +class TestPipeline(object): + """ + """ + + def test_pipeline_eval(self, r): + with r.pipeline(transaction=False) as pipe: + pipe.eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", 2, "A{foo}", "B{foo}", "first", "second") + res = pipe.execute()[0] + assert res[0] == b'A{foo}' + assert res[1] == b'B{foo}' + assert res[2] == b'first' + assert res[3] == b'second' + + def test_blocked_methods(self, r): + """ + Currently some method calls on a Cluster pipeline + is blocked when using in cluster mode. + They maybe implemented in the future. + """ + pipe = r.pipeline(transaction=False) + with pytest.raises(RedisClusterException): + pipe.multi() + + with pytest.raises(RedisClusterException): + pipe.immediate_execute_command() + + with pytest.raises(RedisClusterException): + pipe._execute_transaction(None, None, None) + + with pytest.raises(RedisClusterException): + pipe.load_scripts() + + with pytest.raises(RedisClusterException): + pipe.watch() + + with pytest.raises(RedisClusterException): + pipe.unwatch() + + with pytest.raises(RedisClusterException): + pipe.script_load_for_pipeline(None) + + with pytest.raises(RedisClusterException): + pipe.transaction(None) + + def test_blocked_arguments(self, r): + """ + Currently some arguments is blocked when using in cluster mode. + They maybe implemented in the future. + """ + with pytest.raises(RedisClusterException) as ex: + r.pipeline(transaction=True) + + assert unicode(ex.value).startswith("transaction is deprecated in cluster mode"), True + + with pytest.raises(RedisClusterException) as ex: + r.pipeline(shard_hint=True) + + assert unicode(ex.value).startswith("shard_hint is deprecated in cluster mode"), True + + def test_redis_cluster_pipeline(self): + """ + Test that we can use a pipeline with the RedisCluster class + """ + r = _get_client(RedisCluster) + with r.pipeline(transaction=False) as pipe: + pipe.get("foobar") + + def test_mget_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.mget(['a']) + + def test_mset_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.mset({'a': 1, 'b': 2}) + + def test_rename_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.rename('a', 'b') + + def test_renamenx_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.renamenx('a', 'b') + + def test_delete_single(self, r): + r['a'] = 1 + with r.pipeline(transaction=False) as pipe: + pipe.delete('a') + assert pipe.execute(), True + + def test_multi_delete_unsupported(self, r): + with r.pipeline(transaction=False) as pipe: + r['a'] = 1 + r['b'] = 2 + with pytest.raises(RedisClusterException): + pipe.delete('a', 'b') + + def test_brpoplpush_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.brpoplpush() + + def test_rpoplpush_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.rpoplpush() + + def test_sort_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.sort() + + def test_sdiff_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.sdiff() + + def test_sdiffstore_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.sdiffstore() + + def test_sinter_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.sinter() + + def test_sinterstore_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.sinterstore() + + def test_smove_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.smove() + + def test_sunion_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.sunion() + + def test_sunionstore_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.sunionstore() + + def test_spfmerge_disabled(self, r): + with r.pipeline(transaction=False) as pipe: + with pytest.raises(RedisClusterException): + pipe.pfmerge() + + def test_multi_key_operation_with_shared_shards(self, r): + pipe = r.pipeline(transaction=False) + pipe.set('a{foo}', 1) + pipe.set('b{foo}', 2) + pipe.set('c{foo}', 3) + pipe.set('bar', 4) + pipe.set('bazz', 5) + pipe.get('a{foo}') + pipe.get('b{foo}') + pipe.get('c{foo}') + pipe.get('bar') + pipe.get('bazz') + res = pipe.execute() + assert res == [True, True, True, True, True, b'1', b'2', b'3', b'4', b'5'] + + @pytest.mark.xfail(reson="perform_execute_pipeline is not used any longer") + def test_connection_error(self, r): + test = self + test._calls = [] + + def perform_execute_pipeline(pipe): + if not test._calls: + e = ConnectionError('test') + test._calls.append({'exception': e}) + return [e] + result = pipe.execute(raise_on_error=False) + test._calls.append({'result': result}) + return result + + pipe = r.pipeline(transaction=False) + orig_perform_execute_pipeline = pipe.perform_execute_pipeline + pipe.perform_execute_pipeline = perform_execute_pipeline + + try: + pipe.set('foo', 1) + res = pipe.execute() + assert res, [True] + assert isinstance(test._calls[0]['exception'], ConnectionError) + if len(test._calls) == 2: + assert test._calls[1] == {'result': [True]} + else: + assert isinstance(test._calls[1]['result'][0], ResponseError) + assert test._calls[2] == {'result': [True]} + finally: + pipe.perform_execute_pipeline = orig_perform_execute_pipeline + del test._calls + + @pytest.mark.xfail(reson="perform_execute_pipeline is not used any longer") + def test_asking_error(self, r): + test = self + test._calls = [] + + def perform_execute_pipeline(pipe): + if not test._calls: + + e = ResponseError("ASK {0} 127.0.0.1:7003".format(r.keyslot('foo'))) + test._calls.append({'exception': e}) + return [e, e] + result = pipe.execute(raise_on_error=False) + test._calls.append({'result': result}) + return result + + pipe = r.pipeline(transaction=False) + orig_perform_execute_pipeline = pipe.perform_execute_pipeline + pipe.perform_execute_pipeline = perform_execute_pipeline + + try: + pipe.set('foo', 1) + pipe.get('foo') + res = pipe.execute() + assert res == [True, b'1'] + assert isinstance(test._calls[0]['exception'], ResponseError) + assert re.match("ASK", str(test._calls[0]['exception'])) + assert isinstance(test._calls[1]['result'][0], ResponseError) + assert re.match("MOVED", str(test._calls[1]['result'][0])) + assert test._calls[2] == {'result': [True, b'1']} + finally: + pipe.perform_execute_pipeline = orig_perform_execute_pipeline + del test._calls + + def test_empty_stack(self, r): + """ + If pipeline is executed with no commands it should + return a empty list. + """ + p = r.pipeline() + result = p.execute() + assert result == [] + + +class TestReadOnlyPipeline(object): + + def test_pipeline_readonly(self, r, ro): + """ + On readonly mode, we supports get related stuff only. + """ + r.set('foo71', 'a1') # we assume this key is set on 127.0.0.1:7001 + r.zadd('foo88', {'z1': 1}) # we assume this key is set on 127.0.0.1:7002 + r.zadd('foo88', {'z2': 4}) + + with ro.pipeline() as readonly_pipe: + readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True) + assert readonly_pipe.execute() == [ + b'a1', + [(b'z1', 1.0), (b'z2', 4)], + ] + + def assert_moved_redirection_on_slave(self, connection_pool_cls, cluster_obj): + with patch.object(connection_pool_cls, 'get_node_by_slot') as return_slave_mock: + with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: + def get_mock_node(role, port): + return { + 'name': '127.0.0.1:{0}'.format(port), + 'host': '127.0.0.1', + 'port': port, + 'server_type': role, + } + + return_slave_mock.return_value = get_mock_node('slave', 7005) + return_master_mock.return_value = get_mock_node('slave', 7001) + + with cluster_obj.pipeline() as pipe: + # we assume this key is set on 127.0.0.1:7001(7004) + pipe.get('foo87').get('foo88').execute() == [None, None] + + def test_moved_redirection_on_slave_with_default(self): + """ + On Pipeline, we redirected once and finally get from master with + readonly client when data is completely moved. + """ + self.assert_moved_redirection_on_slave( + ClusterConnectionPool, + RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) + ) + + def test_moved_redirection_on_slave_with_readonly_mode_client(self): + """ + Ditto with READONLY mode. + """ + self.assert_moved_redirection_on_slave( + ClusterReadOnlyConnectionPool, + RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) + ) + + def test_access_correct_slave_with_readonly_mode_client(self, sr): + """ + Test that the client can get value normally with readonly mode + when we connect to correct slave. + """ + + # we assume this key is set on 127.0.0.1:7001 + sr.set('foo87', 'foo') + sr.set('foo88', 'bar') + import time + time.sleep(1) + + with patch.object(ClusterReadOnlyConnectionPool, 'get_node_by_slot') as return_slave_mock: + return_slave_mock.return_value = { + 'name': '127.0.0.1:7004', + 'host': '127.0.0.1', + 'port': 7004, + 'server_type': 'slave', + } + + master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7001', 'port': 7001, 'server_type': 'master'} + with patch.object( + ClusterConnectionPool, + 'get_master_node_by_slot', + return_value=master_value) as return_master_mock: + readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) + with readonly_client.pipeline() as readonly_pipe: + assert readonly_pipe.get('foo88').get('foo87').execute() == [b'bar', b'foo'] From c7c0d87747ee19079bbe68e67c33ee68f2aab4e6 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 12:20:23 +0100 Subject: [PATCH 201/263] Fix typo --- tests/test_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 7d7e27a2..e1b3eb28 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -217,7 +217,7 @@ def test_watch_failure(self, r): r['b'] = 2 with r.pipeline() as pipe: - pipe.watch('a', 'b')§ + pipe.watch('a', 'b') r['b'] = 3 pipe.multi() pipe.get('a') From e64294c6c4a41221dbfd58415bb95d22c8e3750a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 12:26:33 +0100 Subject: [PATCH 202/263] Update test_pubsub.py to upstream version. Block out all tests on the class level instead of having a few open and a few closed inside each class. PubSub is not viable or should be used really. --- tests/test_pubsub.py | 122 ++++++++--------------------------- tests/test_pubsub_cluster.py | 1 + 2 files changed, 28 insertions(+), 95 deletions(-) create mode 100644 tests/test_pubsub_cluster.py diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 4b254b6d..dcbf85a2 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -63,9 +63,11 @@ def make_subscribe_test_data(pubsub, type): assert False, 'invalid subscribe type: %s' % type +@pytest.mark.xfail(reason="Pubsub is not supported/working ina redis cluster") class TestPubSubSubscribeUnsubscribe(object): - def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): + def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func, + unsub_func, keys): for key in keys: assert sub_func(key) is None @@ -82,17 +84,17 @@ def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func, unsub_f i = len(keys) - 1 - i assert wait_for_message(p) == make_message(unsub_type, key, i) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_channel_subscribe_unsubscribe(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_subscribe_unsubscribe(**kwargs) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pattern_subscribe_unsubscribe(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_subscribe_unsubscribe(**kwargs) - def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): + def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type, + sub_func, unsub_func, keys): + for key in keys: assert sub_func(key) is None @@ -123,17 +125,17 @@ def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type, sub_func, u for channel in unique_channels: assert channel in keys - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_resubscribe_to_channels_on_reconnection(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_resubscribe_on_reconnection(**kwargs) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_resubscribe_to_patterns_on_reconnection(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_resubscribe_on_reconnection(**kwargs) - def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): + def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func, + unsub_func, keys): + assert p.subscribed is False sub_func(keys[0]) # we're now subscribed even though we haven't processed the @@ -177,43 +179,22 @@ def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func, unsub_fun # now we're finally unsubscribed assert p.subscribed is False - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_subscribe_property_with_channels(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_subscribed_property(**kwargs) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_subscribe_property_with_patterns(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_subscribed_property(**kwargs) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_ignore_all_subscribe_messages(self, r): p = r.pubsub(ignore_subscribe_messages=True) checks = ( (p.subscribe, 'foo'), (p.unsubscribe, 'foo'), - # (p.psubscribe, 'f*'), - # (p.punsubscribe, 'f*'), - ) - - assert p.subscribed is False - for func, channel in checks: - assert func(channel) is None - assert p.subscribed is True - assert wait_for_message(p) is None - assert p.subscribed is False - - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") - def test_ignore_individual_subscribe_messages(self, r): - p = r.pubsub(ignore_subscribe_messages=True) - - checks = ( - (p.subscribe, 'foo'), - (p.unsubscribe, 'foo'), - # (p.psubscribe, 'f*'), - # (p.punsubscribe, 'f*'), + (p.psubscribe, 'f*'), + (p.punsubscribe, 'f*'), ) assert p.subscribed is False @@ -223,15 +204,14 @@ def test_ignore_individual_subscribe_messages(self, r): assert wait_for_message(p) is None assert p.subscribed is False - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_ignore_individual_subscribe_messages(self, r): p = r.pubsub() checks = ( (p.subscribe, 'foo'), (p.unsubscribe, 'foo'), - # (p.psubscribe, 'f*'), - # (p.punsubscribe, 'f*'), + (p.psubscribe, 'f*'), + (p.punsubscribe, 'f*'), ) assert p.subscribed is False @@ -242,18 +222,16 @@ def test_ignore_individual_subscribe_messages(self, r): assert message is None assert p.subscribed is False - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_sub_unsub_resub_channels(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_sub_unsub_resub(**kwargs) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_sub_unsub_resub_patterns(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_sub_unsub_resub(**kwargs) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") - def _test_sub_unsub_resub(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): + def _test_sub_unsub_resub(self, p, sub_type, unsub_type, sub_func, + unsub_func, keys): # https://github.com/andymccurdy/redis-py/issues/764 key = keys[0] sub_func(key) @@ -265,18 +243,16 @@ def _test_sub_unsub_resub(self, p, sub_type, unsub_type, sub_func, unsub_func, k assert wait_for_message(p) == make_message(sub_type, key, 1) assert p.subscribed is True - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_sub_unsub_all_resub_channels(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'channel') self._test_sub_unsub_all_resub(**kwargs) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_sub_unsub_all_resub_patterns(self, r): kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') self._test_sub_unsub_all_resub(**kwargs) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") - def _test_sub_unsub_all_resub(self, p, sub_type, unsub_type, sub_func, unsub_func, keys): + def _test_sub_unsub_all_resub(self, p, sub_type, unsub_type, sub_func, + unsub_func, keys): # https://github.com/andymccurdy/redis-py/issues/764 key = keys[0] sub_func(key) @@ -289,25 +265,14 @@ def _test_sub_unsub_all_resub(self, p, sub_type, unsub_type, sub_func, unsub_fun assert p.subscribed is True +@pytest.mark.xfail(reason="Pubsub is not supported/working ina redis cluster") class TestPubSubMessages(object): - """ - Bug: Currently in cluster mode publish command will behave different then in - standard/non cluster mode. See (docs/Pubsub.md) for details. - - Currently Redis instances will be used to test pubsub because they - are easier to work with. - """ - - def get_strict_redis_node(self, port, host="127.0.0.1"): - return Redis(port=port, host=host) - - def setup_method(self, *args): + def setup_method(self, method): self.message = None def message_handler(self, message): self.message = message - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_published_message_to_channel(self, r): p = r.pubsub() p.subscribe('foo') @@ -318,27 +283,6 @@ def test_published_message_to_channel(self, r): assert isinstance(message, dict) assert message == make_message('message', 'foo', 'test message') - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") - def test_publish_message_to_channel_other_server(self): - """ - Test that pubsub still works across the cluster on different nodes - """ - node_subscriber = self.get_strict_redis_node(7000) - p = node_subscriber.pubsub(ignore_subscribe_messages=True) - p.subscribe('foo') - - node_sender = self.get_strict_redis_node(7001) - # This should return 0 because of no connected clients to this server. - assert node_sender.publish('foo', 'test message') == 0 - - message = wait_for_message(p) - assert isinstance(message, dict) - assert message == make_message('message', 'foo', 'test message') - - # Cleanup pubsub connections - p.close() - - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_published_message_to_pattern(self, r): p = r.pubsub() p.subscribe('foo') @@ -362,7 +306,6 @@ def test_published_message_to_pattern(self, r): assert message2 in expected assert message1 != message2 - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(foo=self.message_handler) @@ -371,7 +314,6 @@ def test_channel_message_handler(self, r): assert wait_for_message(p) is None assert self.message == make_message('message', 'foo', 'test message') - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{'f*': self.message_handler}) @@ -381,7 +323,6 @@ def test_pattern_message_handler(self, r): assert self.message == make_message('pmessage', 'foo', 'test message', pattern='f*') - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_unicode_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) channel = 'uni' + unichr(4456) + 'code' @@ -392,7 +333,6 @@ def test_unicode_channel_message_handler(self, r): assert wait_for_message(p) is None assert self.message == make_message('message', channel, 'test message') - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_unicode_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) pattern = 'uni' + unichr(4456) + '*' @@ -404,7 +344,6 @@ def test_unicode_pattern_message_handler(self, r): assert self.message == make_message('pmessage', channel, 'test message', pattern=pattern) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_get_message_without_subscribe(self, r): p = r.pubsub() with pytest.raises(RuntimeError) as info: @@ -414,6 +353,7 @@ def test_get_message_without_subscribe(self, r): assert expect in info.exconly() +@pytest.mark.xfail(reason="Pubsub is not supported/working ina redis cluster") class TestPubSubAutoDecoding(object): "These tests only validate that we get unicode values back" @@ -429,7 +369,7 @@ def make_message(self, type, channel, data, pattern=None): 'data': data } - def setup_method(self, *args): + def setup_method(self, method): self.message = None def message_handler(self, message): @@ -439,7 +379,6 @@ def message_handler(self, message): def r(self, request): return _get_client(redis.Redis, request=request, decode_responses=True) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_channel_subscribe_unsubscribe(self, r): p = r.pubsub() p.subscribe(self.channel) @@ -450,7 +389,6 @@ def test_channel_subscribe_unsubscribe(self, r): assert wait_for_message(p) == self.make_message('unsubscribe', self.channel, 0) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pattern_subscribe_unsubscribe(self, r): p = r.pubsub() p.psubscribe(self.pattern) @@ -461,7 +399,6 @@ def test_pattern_subscribe_unsubscribe(self, r): assert wait_for_message(p) == self.make_message('punsubscribe', self.pattern, 0) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_channel_publish(self, r): p = r.pubsub() p.subscribe(self.channel) @@ -472,7 +409,6 @@ def test_channel_publish(self, r): self.channel, self.data) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pattern_publish(self, r): p = r.pubsub() p.psubscribe(self.pattern) @@ -484,7 +420,6 @@ def test_pattern_publish(self, r): self.data, pattern=self.pattern) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe(**{self.channel: self.message_handler}) @@ -504,7 +439,6 @@ def test_channel_message_handler(self, r): assert self.message == self.make_message('message', self.channel, new_data) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe(**{self.pattern: self.message_handler}) @@ -526,7 +460,6 @@ def test_pattern_message_handler(self, r): new_data, pattern=self.pattern) - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_context_manager(self, r): with r.pubsub() as pubsub: pubsub.subscribe('foo') @@ -537,6 +470,7 @@ def test_context_manager(self, r): assert pubsub.patterns == {} +@pytest.mark.xfail(reason="Pubsub is not supported/working ina redis cluster") class TestPubSubRedisDown(object): def test_channel_subscribe(self, r): @@ -546,6 +480,7 @@ def test_channel_subscribe(self, r): p.subscribe('foo') +@pytest.mark.xfail(reason="Pubsub is not supported/working ina redis cluster") def test_pubsub_thread_publish(): """ This test will never fail but it will still show and be viable to use @@ -587,10 +522,10 @@ def t_run(rc): print("Error: unable to start thread") +@pytest.mark.xfail(reason="Pubsub is not supported/working ina redis cluster") class TestPubSubSubcommands(object): @skip_if_server_version_lt('2.8.0') - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_channels(self, r): p = r.pubsub() p.subscribe('foo', 'bar', 'baz', 'quux') @@ -603,7 +538,6 @@ def test_pubsub_channels(self, r): pdb.set_trace() @skip_if_server_version_lt('2.8.0') - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numsub(self, r): p1 = r.pubsub() p1.subscribe('foo', 'bar', 'baz') @@ -621,7 +555,6 @@ def test_pubsub_numsub(self, r): assert channels == r.pubsub_numsub('foo', 'bar', 'baz') @skip_if_server_version_lt('2.8.0') - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numpat(self, r): p = r.pubsub() p.psubscribe('*oo', '*ar', 'b*z') @@ -630,10 +563,10 @@ def test_pubsub_numpat(self, r): assert r.pubsub_numpat() == 3 +@pytest.mark.xfail(reason="Pubsub is not supported/working ina redis cluster") class TestPubSubPings(object): @skip_if_server_version_lt('3.0.0') - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_send_pubsub_ping(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') @@ -643,7 +576,6 @@ def test_send_pubsub_ping(self, r): pattern=None) @skip_if_server_version_lt('3.0.0') - @pytest.mark.xfail(reason="Pattern pubsub is not fully supported in cluster mode") def test_send_pubsub_ping_message(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') @@ -653,10 +585,10 @@ def test_send_pubsub_ping_message(self, r): pattern=None) +@pytest.mark.xfail(reason="Pubsub is not supported/working ina redis cluster") class TestPubSubConnectionKilled(object): @skip_if_server_version_lt('3.0.0') - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_connection_error_raised_when_connection_dies(self, r): p = r.pubsub() p.subscribe('foo') @@ -668,8 +600,8 @@ def test_connection_error_raised_when_connection_dies(self, r): wait_for_message(p) +@pytest.mark.xfail(reason="Pubsub is not supported/working ina redis cluster") class TestPubSubTimeouts(object): - @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_get_message_with_timeout_returns_none(self, r): p = r.pubsub() p.subscribe('foo') diff --git a/tests/test_pubsub_cluster.py b/tests/test_pubsub_cluster.py new file mode 100644 index 00000000..0c1bbac1 --- /dev/null +++ b/tests/test_pubsub_cluster.py @@ -0,0 +1 @@ +# All pubsub tests is blocked out as pubsub do not work properly in a clustered environment From 99b917544f3828c051b74fa30586e1dedf2b3476 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 12:28:11 +0100 Subject: [PATCH 203/263] Rename test file --- tests/{test_node_manager.py => test_cluster_node_manager.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{test_node_manager.py => test_cluster_node_manager.py} (100%) diff --git a/tests/test_node_manager.py b/tests/test_cluster_node_manager.py similarity index 100% rename from tests/test_node_manager.py rename to tests/test_cluster_node_manager.py From 5bd23d86f02abbeac1b599059111ffe48412b8ec Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 12:28:54 +0100 Subject: [PATCH 204/263] Add empty placeholder files for cluster versions of testfiles that might be populated at later time --- tests/test_lock_cluster.py | 1 + tests/test_monitor_cluster.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 tests/test_lock_cluster.py create mode 100644 tests/test_monitor_cluster.py diff --git a/tests/test_lock_cluster.py b/tests/test_lock_cluster.py new file mode 100644 index 00000000..0c1bbac1 --- /dev/null +++ b/tests/test_lock_cluster.py @@ -0,0 +1 @@ +# All pubsub tests is blocked out as pubsub do not work properly in a clustered environment diff --git a/tests/test_monitor_cluster.py b/tests/test_monitor_cluster.py new file mode 100644 index 00000000..0c1bbac1 --- /dev/null +++ b/tests/test_monitor_cluster.py @@ -0,0 +1 @@ +# All pubsub tests is blocked out as pubsub do not work properly in a clustered environment From d983b053a6610fe7591029f65e753939676a45bc Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 10 Nov 2020 12:35:20 +0100 Subject: [PATCH 205/263] Add empty placeholder files for cluster version of testfiles that might be populated at later time --- tests/test_scripting_cluster.py | 1 + tests/test_utils_cluster.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 tests/test_scripting_cluster.py create mode 100644 tests/test_utils_cluster.py diff --git a/tests/test_scripting_cluster.py b/tests/test_scripting_cluster.py new file mode 100644 index 00000000..0c1bbac1 --- /dev/null +++ b/tests/test_scripting_cluster.py @@ -0,0 +1 @@ +# All pubsub tests is blocked out as pubsub do not work properly in a clustered environment diff --git a/tests/test_utils_cluster.py b/tests/test_utils_cluster.py new file mode 100644 index 00000000..0c1bbac1 --- /dev/null +++ b/tests/test_utils_cluster.py @@ -0,0 +1 @@ +# All pubsub tests is blocked out as pubsub do not work properly in a clustered environment From 25b7e7dc86ed7d6402f8222ec9f265df8cc6d08e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 16 May 2020 12:43:13 +0200 Subject: [PATCH 206/263] WIP debugging exception in travis tests --- tests/test_cluster_node_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_cluster_node_manager.py b/tests/test_cluster_node_manager.py index c8dbcb90..f6852b26 100644 --- a/tests/test_cluster_node_manager.py +++ b/tests/test_cluster_node_manager.py @@ -300,8 +300,8 @@ def test_cluster_slots_error_expected_responseerror(): n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) - with pytest.raises(RedisClusterException) as e: - n.initialize() + # with pytest.raises(RedisClusterException) as e: + n.initialize() assert 'Redis Cluster cannot be connected' in unicode(e) From 3d56bcbddc2aa22a1397fc51fa46974e1ac97f0c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 16 May 2020 12:49:16 +0200 Subject: [PATCH 207/263] Fix lint and validation warnings/issues in travis.yml file --- .travis.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index d8c15210..99277850 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,4 @@ -sudo: false +os: linux dist: xenial language: python cache: pip @@ -10,7 +10,7 @@ python: - "3.8" - "nightly" services: - - redis-server + - redis install: - "if [[ $REDIS_VERSION == '3.0' ]]; then REDIS_VERSION=3.0 make redis-install; fi" - "if [[ $REDIS_VERSION == '3.2' ]]; then REDIS_VERSION=3.2 make redis-install; fi" @@ -49,7 +49,7 @@ after_success: - coverage combine - coveralls - "if [[ $TEST_PYCODESTYLE == '1' ]]; then pycodestyle --repeat --show-source --exclude=.venv,.tox,dist,docs,build,*.egg,redis_install .; fi" -matrix: +jobs: allow_failures: - python: "nightly" - python: 2.7 @@ -60,9 +60,7 @@ matrix: # https://github.com/travis-ci/travis-ci/issues/9815 - python: 3.7 dist: xenial - sudo: true env: TEST_HIREDIS=0 - python: 3.7 dist: xenial - sudo: true env: TEST_HIREDIS=1 From fdcc57e50d9aee3e59962ea7a9528341a0e1ca1e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 16 May 2020 13:01:24 +0200 Subject: [PATCH 208/263] No need to run pytest twice and to kill off the servers as travis will kill it for us at the end of the run anyway --- .travis.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 99277850..d6e4ab74 100644 --- a/.travis.yml +++ b/.travis.yml @@ -43,8 +43,6 @@ script: - make start - coverage erase - coverage run --source rediscluster -p -m py.test - - py.test - - make stop after_success: - coverage combine - coveralls From dd8eb9b3040bd92919fedf35284dca9e3e2f36b4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 16 May 2020 13:55:57 +0200 Subject: [PATCH 209/263] possible fix for the two broken tests that breaks in python 3.8 for unknown reasons --- tests/test_cluster_node_manager.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/tests/test_cluster_node_manager.py b/tests/test_cluster_node_manager.py index f6852b26..bdc9cc09 100644 --- a/tests/test_cluster_node_manager.py +++ b/tests/test_cluster_node_manager.py @@ -282,12 +282,16 @@ def test_cluster_slots_error(): with patch.object(Redis, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = Exception("foobar") - n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) + with pytest.raises(RedisClusterException): + n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) + n.initialize() - with pytest.raises(RedisClusterException) as e: + try: + n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) n.initialize() + except RedisClusterException as e: + assert "ERROR sending 'cluster slots' command" in e.args[0] - assert "ERROR sending 'cluster slots' command" in unicode(e) def test_cluster_slots_error_expected_responseerror(): @@ -298,12 +302,15 @@ def test_cluster_slots_error_expected_responseerror(): with patch.object(Redis, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = ResponseError("MASTERDOWN") - n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) - - # with pytest.raises(RedisClusterException) as e: - n.initialize() + with pytest.raises(RedisClusterException): + n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) + n.initialize() - assert 'Redis Cluster cannot be connected' in unicode(e) + try: + n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) + n.initialize() + except RedisClusterException as e: + assert "Redis Cluster cannot be connected" in e.args[0] def test_set_node(): From a5de9431d733979cbc04075564d5eee929d1d66c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 18 May 2020 17:29:40 +0200 Subject: [PATCH 210/263] Add python2 compatibility note to README --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index 257e4098..d12b5b52 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,16 @@ This project is a port of `redis-rb-cluster` by antirez, with a lot of added fun The branch `master` will always contain the latest unstable/development code that has been merged from Pull Requests. Use the latest commit from master branch on your own risk, there is no guarantees of compatibility or stability of non tagged commits on the master branch. Only tagged releases on the `master` branch is considered stable for use. +# Python 2 Compatibility Note + +This library follows the announced change from our upstream package redis-py. Due to this, +we will follow the same python 2.7 deprecation timeline as stated in there. + +redis-py-cluster 2.1.x will be the last major version release that supports Python 2.7. +The 2.1.x line will continue to get bug fixes and security patches that +support Python 2 until August 1, 2020. redis-py-cluster 3.0.x will be the next major +version and will require Python 3.5+. + # Documentation From 42a9c1a908a456c3a6f989a2ea47f3f77bfc5814 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 18 May 2020 17:44:51 +0200 Subject: [PATCH 211/263] Make python 3.5 working again when building and installing this package --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 706293b6..f58c7f17 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ install_requires=[ 'redis>=3.0.0,<4.0.0' ], - python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4, !=3.5", + python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4", extras_require={ 'hiredis': [ "hiredis>=0.1.3", @@ -57,6 +57,7 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', From 52d5834cc8ac5c6496032575748736a49f1cbbb0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 18 May 2020 17:50:09 +0200 Subject: [PATCH 212/263] Update release notes with all the changes that has currently been done and is in the code working --- docs/release-notes.rst | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index b727479e..c9a7b657 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,14 +1,35 @@ Release Notes ============= -2.1.0 (xxx yy, 2020) +2.1.0 (May **, 2020) +-------------------- * Add new config option for Client and Pipeline classes to controll how many attempts will be made before bailing out from a ClusterDownError. Use "cluster_down_retry_attempts=" when creating the client class to controll this behaviour. - * Updated redis-py compatbile version to support any version in the major version 3.0.x, 3.1.x, 3.2.x, 3.3.x. (#326) + * Updated redis-py compatbile version to support any version in the major version 3.0.x, 3.1.x, 3.2.x, 3.3.x., 3.4.x, 3.5.x (#326) + It is always recommended to use the latest version of redis-py to avoid issues and compatiblity problems. * Fixed bug preventing reinitialization after getting MOVED errors + * Add testing of redis-esrver 6.0 versions to travis and unit tests + * Add python 2.7 compatiblity note about deprecation and upcomming changes in python 2.7 support for this lib + * Updated tests and cluster tests versions of the same methods to latest tests from upstream redis-py package + * Reorganized tests and how cluster specific tests is written and run over the upstream version of the same test to make it easier + and much faster to update and keep them in sync over time going into the future (#368) + * Python 3.5.x or higher is now required if running on a python 3 version + * Removed the monkeypatching of RedisCluster, ClusterPubSub & ClusterPipeline class names into the "redis" python package namespace during runtime. + They are now exposed in the "rediscluster" namespace to mimic the same feature from redis-py + * cluster_down_retry_attempts can now be configured to any value when creating RedisCluster instance + * Creating RedisCluster from unix socket url:s has been disabled + * Patch the from_url method to use the corret cluster version of the same Connection class + * ConnectionError and TimeoutError is now handled seperately in the main execute loop to better handle each case (#363) + * Update scan_iter custom cluster implementation + * Improve description_format handling for connection classes to simplify how they work + * Implement new connection pool ClusterBlockingConnectionPool (#347) + * Nodemanager initiailize should now handle usernames properly (#365) + * PubSub tests has been all been disabled + 2.0.0 (Aug 12, 2019) +-------------------- Specific changes to redis-py-cluster is mentioned below here. From 19f53714de157547d7eedeae4ec2981047170f99 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 21 May 2020 00:18:10 +0200 Subject: [PATCH 213/263] Add section about documentation into the contributing document that is presented and used by github --- CONTRIBUTING.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7ed77e11..e7c454b8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -49,6 +49,30 @@ print("foobar {barfoo} {qwert}".format( +# Documentation + +This project currently uses RST files and sphinx to build the documentation and to allow for it to be hosted on ReadTheDocs. + +To test your documentation changes you must first install sphinx and sphinx-reload to render and view the docs files on your local machine before commiting them to this repo. + +Install the dependencies inside a python virtualenv + +``` +pip install sphinx sphinx-reload +``` + +To start the local webbserver and render the docs folder, run from the root of this project + +``` +sphinx-reload docs/ +``` + +It will open up the rendered website in your browser automatically. + +At some point in the future the docs format will change from RST to MkDocs. + + + # Tests I (Johan/Grokzen) have been allowed (by andymccurdy) explicitly to use all test code that already exists inside `redis-py` lib. If possible you should reuse code that exists in there. From 9321d49660fe96696892660436aeeb2f7a2d5cb1 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 21 May 2020 00:48:27 +0200 Subject: [PATCH 214/263] Implement new feature where remapping the response from "cluster nodes" is now possible to allow for a user to change what ip and port the client thinks the cluster is reachable at. This solves a few use-cases where clients can't connect or figure out how to connect to a cluster after it has reached the initial startup nodes set. Use feature with caution. --- docs/client.rst | 56 ++++++++++++++++++++ docs/index.rst | 1 + docs/release-notes.rst | 1 + rediscluster/client.py | 5 +- rediscluster/connection.py | 4 +- rediscluster/exceptions.py | 6 +++ rediscluster/nodemanager.py | 52 +++++++++++++++++- tests/test_cluster_node_manager.py | 84 +++++++++++++++++++++++++++++- 8 files changed, 203 insertions(+), 6 deletions(-) create mode 100644 docs/client.rst diff --git a/docs/client.rst b/docs/client.rst new file mode 100644 index 00000000..b9817de8 --- /dev/null +++ b/docs/client.rst @@ -0,0 +1,56 @@ +RedisCluster client configuration options +========================================= + +This chapter is supposed to describe all the configuration options and flags that can be sent into the RedisCluster class instance. + +Each option will be described in a seperate topic to describe how it works and what it does. This will only describe any options that does anything else when compared to redis-py, or new options that is cluster specific. + + + +host_port_remap +--------------- + +This option exists to enable the client to fix a problem where the redis-server internally tracks a different ip:port compared to what your clients would like to connect to. + +The simples example to describe this problem is if you start a redis cluster through docker on your local machine. If we assume that you start the docker image grokzen/redis-cluster, when the redis cluster is initialized it will track the docker network IP for each node in the cluster. + +For example this could be 172.18.0.2. The problem is that a client that runs outside on your local machine will recieve from the redis cluster that each node is reachable on the ip 172.18.0.2. But in some cases this IP is not available on your host system and to solve this we need a remapping table where we can tell this client that if you get back from your cluster 172.18.0.2 then your should remap it to localhost instead. When the client does this it can now connect and reach all nodes in your cluster. + +It is also possible to remap the port for each node as well. + +Example script + +.. code-block:: python + + from rediscluster import RedisCluster + + startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] + + rc = RedisCluster( + startup_nodes=startup_nodes, + decode_responses=True, + host_port_remap=[ + { + 'from_host': '172.18.0.2', + 'from_port': 7000, + 'to_host': 'localhost', + 'to_port': 7000, + }, + { + 'from_host': '172.22.0.1', + 'from_port': 7000, + 'to_host': 'localhost', + 'to_port': 7000, + }, + ] + ) + + ## Debug output to show the client config/setup after client has been initialized. + ## It should point to localhost:7000 for those nodes. + print(rc.connection_pool.nodes.nodes) + + ## Test the client that it can still send and recieve data from the nodes after the remap has been done + print(rc.set('foo', 'bar')) + + +Pleaes note that this host_port_remap feature will not work on the startup_nodes so you still need to put in a valid and reachable set of startup nodes. diff --git a/docs/index.rst b/docs/index.rst index bf884f6c..8933cbc2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -127,6 +127,7 @@ The Usage Guide :maxdepth: 2 :glob: + client commands limitations-and-differences pipelines diff --git a/docs/release-notes.rst b/docs/release-notes.rst index c9a7b657..68323e14 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -26,6 +26,7 @@ Release Notes * Implement new connection pool ClusterBlockingConnectionPool (#347) * Nodemanager initiailize should now handle usernames properly (#365) * PubSub tests has been all been disabled + * New feature, host_port_remap. Send in a remapping configuration to RedisCluster instance where the nodes configuration recieved from the redis cluster can be altered to allow for connection in certain circumstances. See new section in clients.rst in docs/ for usage example. 2.0.0 (Aug 12, 2019) diff --git a/rediscluster/client.py b/rediscluster/client.py index b6646b63..847a47e5 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -291,7 +291,7 @@ class RedisCluster(Redis): def __init__(self, host=None, port=None, startup_nodes=None, max_connections=None, max_connections_per_node=False, init_slot_cache=True, readonly_mode=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, - connection_class=None, read_from_replicas=False, cluster_down_retry_attempts=3, **kwargs): + connection_class=None, read_from_replicas=False, cluster_down_retry_attempts=3, host_port_remap=None, **kwargs): """ :startup_nodes: List of nodes that initial bootstrapping can be done from @@ -349,6 +349,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non skip_full_coverage_check=skip_full_coverage_check, nodemanager_follow_cluster=nodemanager_follow_cluster, connection_class=connection_class, + host_port_remap=host_port_remap, **kwargs ) @@ -724,7 +725,7 @@ def cluster_failover(self, node_id, option): Sends to specefied node """ assert option.upper() in ('FORCE', 'TAKEOVER') # TODO: change this option handling - return self.execute_command('CLUSTER FAILOVER', option) + return self.execute_command('CLUSTER FAILOVER', option, node_id=node_id) def cluster_info(self): """ diff --git a/rediscluster/connection.py b/rediscluster/connection.py index f63a9d53..1a91bb79 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -95,7 +95,8 @@ class ClusterConnectionPool(ConnectionPool): def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=None, max_connections=None, max_connections_per_node=False, reinitialize_steps=None, - skip_full_coverage_check=False, nodemanager_follow_cluster=False, **connection_kwargs): + skip_full_coverage_check=False, nodemanager_follow_cluster=False, host_port_remap=None, + **connection_kwargs): """ :skip_full_coverage_check: Skips the check of cluster-require-full-coverage config, useful for clusters @@ -131,6 +132,7 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No skip_full_coverage_check=skip_full_coverage_check, max_connections=self.max_connections, nodemanager_follow_cluster=nodemanager_follow_cluster, + host_port_remap=host_port_remap, **connection_kwargs ) diff --git a/rediscluster/exceptions.py b/rediscluster/exceptions.py index ea4a60a0..0637d143 100644 --- a/rediscluster/exceptions.py +++ b/rediscluster/exceptions.py @@ -5,6 +5,12 @@ ) +class RedisClusterConfigError(Exception): + """ + """ + pass + + class RedisClusterException(Exception): """ """ diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 9fe19f66..e587a837 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -5,7 +5,7 @@ # rediscluster imports from .crc import crc16 -from .exceptions import RedisClusterException +from .exceptions import RedisClusterException, RedisClusterConfigError # 3rd party imports from redis import Redis @@ -19,7 +19,8 @@ class NodeManager(object): """ RedisClusterHashSlots = 16384 - def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, **connection_kwargs): + def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, + host_port_remap=None, **connection_kwargs): """ :skip_full_coverage_check: Skips the check of cluster-require-full-coverage config, useful for clusters @@ -43,10 +44,34 @@ def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_covera connection_kwargs.get('encoding_errors', 'strict'), connection_kwargs.get('decode_responses', False) ) + self._validate_host_port_remap(host_port_remap) + self.host_port_remap = host_port_remap if not self.startup_nodes: raise RedisClusterException("No startup nodes provided") + def _validate_host_port_remap(self, host_port_remap): + """ + Helper method that validates all entries in the host_port_remap config. + """ + if host_port_remap is None: + # Nothing to validate if config not set + return + + if not isinstance(host_port_remap, list): + raise RedisClusterConfigError("host_port_remap must be a list") + + for item in host_port_remap: + if not isinstance(item, dict): + raise RedisClusterConfigError("items inside host_port_remap list must be of dict type") + + # If we have from_host, we must have a to_host option to allow for translation to work + if ('from_host' in item and 'to_host' not in item) or ('from_host' not in item and 'to_host' in item): + raise RedisClusterConfigError("Both from_host and to_host must be present in remap item if either is defined") + + if ('from_port' in item and 'to_port' not in item) or ('from_port' not in item and 'to_port' in item): + raise RedisClusterConfigError("Both from_port and to_port must be present in remap item") + def keyslot(self, key): """ Calculate keyslot for a given key. @@ -183,12 +208,15 @@ def initialize(self): # No need to decode response because Redis should handle that for us... for slot in cluster_slots: + # import pdb; pdb.set_trace() master_node = slot[2] if master_node[0] == '': master_node[0] = node['host'] master_node[1] = int(master_node[1]) + master_node = self.remap_internal_node_object(master_node) + node, node_name = self.make_node_obj(master_node[0], master_node[1], 'master') nodes_cache[node_name] = node @@ -198,6 +226,7 @@ def initialize(self): slave_nodes = [slot[j] for j in range(3, len(slot))] for slave_node in slave_nodes: + slave_node = self.remap_internal_node_object(slave_node) target_slave_node, slave_node_name = self.make_node_obj(slave_node[0], slave_node[1], 'slave') nodes_cache[slave_node_name] = target_slave_node tmp_slots[i].append(target_slave_node) @@ -240,6 +269,25 @@ def initialize(self): self.nodes = nodes_cache self.reinitialize_counter = 0 + def remap_internal_node_object(self, node_obj): + if not self.host_port_remap: + # No remapping rule set, return object unmodified + return node_obj + + for remap_rule in self.host_port_remap: + if 'from_host' in remap_rule and 'to_host' in remap_rule: + if remap_rule['from_host'] in node_obj[0]: + # print('remapping host', node_obj[0], remap_rule['to_host']) + node_obj[0] = remap_rule['to_host'] + + ## The port value is always an integer + if 'from_port' in remap_rule and 'to_port' in remap_rule: + if remap_rule['from_port'] == node_obj[1]: + # print('remapping port', node_obj[1], remap_rule['to_port']) + node_obj[1] = remap_rule['to_port'] + + return node_obj + def increment_reinitialize_counter(self, ct=1): for i in range(min(ct, self.reinitialize_steps)): self.reinitialize_counter += 1 diff --git a/tests/test_cluster_node_manager.py b/tests/test_cluster_node_manager.py index bdc9cc09..87fc2f4d 100644 --- a/tests/test_cluster_node_manager.py +++ b/tests/test_cluster_node_manager.py @@ -6,7 +6,7 @@ # rediscluster imports from tests.conftest import skip_if_server_version_lt from rediscluster import RedisCluster -from rediscluster.exceptions import RedisClusterException +from rediscluster.exceptions import RedisClusterException, RedisClusterConfigError from rediscluster.nodemanager import NodeManager # 3rd party imports @@ -399,3 +399,85 @@ def get_redis_link(host, port, decode_responses=False): with pytest.raises(RedisClusterException) as e: n.initialize() assert 'Redis Cluster cannot be connected' in unicode(e.value) + + +def test_host_port_remap(): + """ + """ + # Test that providing nothing to NodeManager will not cause error + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=None, + ) + # Test that providing wrong root level object type will raise config exception. List is expected + with pytest.raises(RedisClusterConfigError) as excp: + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap={}, + ) + # An empty host_port_remap list should not raise an error + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[], + ) + # A wrong object type inside host_port_remap list shold raise error + with pytest.raises(RedisClusterConfigError) as excp: + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[None], + ) + # The correct object typ inside list but empty should not give an error + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[{}, {}], + ) + # If we only have either or from_host or to_host set we should get an error + with pytest.raises(RedisClusterConfigError) as excp: + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[{'from_host': ''}], + ) + with pytest.raises(RedisClusterConfigError) as excp: + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[{'to_host': ''}], + ) + # If we only have either or from_port or to_port set we should get an error + with pytest.raises(RedisClusterConfigError) as excp: + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[{'from_port': ''}], + ) + with pytest.raises(RedisClusterConfigError) as excp: + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[{'to_port': ''}], + ) + + # Creating a valid config with multiple entries + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[ + {'from_host': '127.0.0.1', 'to_host': 'localhost', 'from_port': 7000, 'to_port': 70001}, + {'from_host': '172.1.0.1', 'to_host': 'localhost', 'from_port': 7000, 'to_port': 70001}, + ], + ) + + # If no host_port_remap is set then a node obj should not be modified in any way when remapping it + n = NodeManager( + host_port_remap=None, + startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + ) + initial_node_obj = ['127.0.0.1', 7000, 'xyz'] + unmodified_remapped_obj = n.remap_internal_node_object(initial_node_obj) + assert unmodified_remapped_obj == initial_node_obj + + # Test that modifying both host and port works + n = NodeManager( + host_port_remap=[{'from_host': '127.0.0.1', 'to_host': 'localhost', 'from_port': 7000, 'to_port': 7001}], + startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + ) + initial_node_obj = ['127.0.0.1', 7000, 'xyz'] + remapped_obj = n.remap_internal_node_object(initial_node_obj) + assert remapped_obj[0] == 'localhost' + assert remapped_obj[1] == 7001 From 2b484890c224a6ca369a42b3df5d517b9a230184 Mon Sep 17 00:00:00 2001 From: Teerayut Hiruntaraporn Date: Thu, 21 May 2020 21:13:14 +0700 Subject: [PATCH 215/263] Remove Token class checking when use XREADGROUP/XREAD --- rediscluster/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 847a47e5..32b16b0b 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -464,8 +464,8 @@ def _determine_slot(self, *args): return slots.pop() if command in ['XREADGROUP', 'XREAD']: - tokens = {args[i].value: i for i in range(len(args)) if type(args[i]) == Token} - keys_ids = list(args[tokens['STREAMS'] + 1: ]) + stream_idx = args.index(b'STREAMS') + keys_ids = list(args[stream_idx + 1: ]) idx_split = len(keys_ids) // 2 keys = keys_ids[: idx_split] slots = {self.connection_pool.nodes.keyslot(key) for key in keys} From f08c205ae518a96bc96df8c2aec1a75d8167df57 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 21 May 2020 13:06:27 +0200 Subject: [PATCH 216/263] Add the python2 compatibility note to the index.rst in docs/ to not only have it in the README file. Minor cleanup and fixes to index file. --- docs/index.rst | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 8933cbc2..5ee4e40a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,8 +1,3 @@ -.. redis-py-cluster documentation master file, created by - sphinx-quickstart on Tue Mar 29 23:29:46 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - Welcome to redis-py-cluster's documentation! ============================================ @@ -73,9 +68,11 @@ Additional code examples of more advance functionality can be found in the `exam Library Dependencies -------------------- +Even if the goal is to support all major versions of redis-py in the 3.x.x track, this is not a guarantee that all versions will work. + It is always recommended to use the latest version of the dependencies of this project. -- Redis-py: 'redis>=3.0.0,<3.1.0' is required in this major version of this cluster lib. +- Redis-py: 'redis>=3.0.0,<4.0.0' is required in this major version of this cluster lib. - Optional Python: hiredis >= `0.2.0`. Older versions might work but is not tested. - A working Redis cluster based on version `>=3.0.0` is required. @@ -95,6 +92,18 @@ If this library supports more then one major version line of `redis-py`, then th - 3.8 +Python 2 Compatibility Note +########################### + +This library follows the announced change from our upstream package redis-py. Due to this, +we will follow the same python 2.7 deprecation timeline as stated in there. + +redis-py-cluster 2.1.x will be the last major version release that supports Python 2.7. +The 2.1.x line will continue to get bug fixes and security patches that +support Python 2 until August 1, 2020. redis-py-cluster 3.0.x will be the next major +version and will require Python 3.5+. + + Regarding duplicate package name on pypi ---------------------------------------- From 7fe66534913d58efd9fc66e03fb3d6f81c15df47 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 21 May 2020 14:07:04 +0200 Subject: [PATCH 217/263] Rename variable inside execute_command to a better name --- rediscluster/client.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 32b16b0b..321055e9 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -566,9 +566,9 @@ def _execute_command(self, *args, **kwargs): if asking: node = self.connection_pool.nodes.nodes[redirect_addr] - r = self.connection_pool.get_connection_by_node(node) + connection = self.connection_pool.get_connection_by_node(node) elif try_random_node: - r = self.connection_pool.get_random_connection() + connection = self.connection_pool.get_random_connection() try_random_node = False else: if self.refresh_table_asap: @@ -579,26 +579,26 @@ def _execute_command(self, *args, **kwargs): else: node = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and (command in self.READ_COMMANDS)) is_read_replica = node['server_type'] == 'slave' - r = self.connection_pool.get_connection_by_node(node) + connection = self.connection_pool.get_connection_by_node(node) try: if asking: - r.send_command('ASKING') - self.parse_response(r, "ASKING", **kwargs) + connection.send_command('ASKING') + self.parse_response(connection, "ASKING", **kwargs) asking = False if is_read_replica: # Ask read replica to accept reads (see https://redis.io/commands/readonly) # TODO: do we need to handle errors from this response? - r.send_command('READONLY') - self.parse_response(r, 'READONLY', **kwargs) + connection.send_command('READONLY') + self.parse_response(connection, 'READONLY', **kwargs) is_read_replica = False - r.send_command(*args) - return self.parse_response(r, command, **kwargs) + connection.send_command(*args) + return self.parse_response(connection, command, **kwargs) except (RedisClusterException, BusyLoadingError): raise except ConnectionError: - r.disconnect() + connection.disconnect() except TimeoutError: if ttl < self.RedisClusterRequestTTL / 2: time.sleep(0.05) @@ -626,7 +626,7 @@ def _execute_command(self, *args, **kwargs): except AskError as e: redirect_addr, asking = "{0}:{1}".format(e.host, e.port), True finally: - self.connection_pool.release(r) + self.connection_pool.release(connection) raise ClusterError('TTL exhausted.') From 5274365000ae9b69ce697374f0f8e5879c42a217 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 22 May 2020 13:03:09 +0200 Subject: [PATCH 218/263] Unblock all stream tests as they all pass and can be run in a clustered env --- tests/test_commands.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index b9c2b934..62e11578 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -2191,7 +2191,6 @@ def test_georadiusmember(self, r): (2.1909382939338684, 41.433790281840835)]] @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xack(self, r): stream = 'stream' group = 'group' @@ -2213,7 +2212,6 @@ def test_xack(self, r): assert r.xack(stream, group, m2, m3) == 2 @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xadd(self, r): stream = 'stream' message_id = r.xadd(stream, {'foo': 'bar'}) @@ -2228,7 +2226,6 @@ def test_xadd(self, r): assert r.xlen(stream) == 2 @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xclaim(self, r): stream = 'stream' group = 'group' @@ -2260,7 +2257,6 @@ def test_xclaim(self, r): justid=True) == [message_id] @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xclaim_trimmed(self, r): # xclaim should not raise an exception if the item is not there stream = 'stream' @@ -2286,7 +2282,6 @@ def test_xclaim_trimmed(self, r): assert item[1][0] == sid2 @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xdel(self, r): stream = 'stream' @@ -2302,7 +2297,6 @@ def test_xdel(self, r): assert r.xdel(stream, m2, m3) == 2 @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xgroup_create(self, r): # tests xgroup_create and xinfo_groups stream = 'stream' @@ -2322,7 +2316,6 @@ def test_xgroup_create(self, r): assert r.xinfo_groups(stream) == expected @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xgroup_create_mkstream(self, r): # tests xgroup_create and xinfo_groups stream = 'stream' @@ -2345,7 +2338,6 @@ def test_xgroup_create_mkstream(self, r): assert r.xinfo_groups(stream) == expected @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xgroup_delconsumer(self, r): stream = 'stream' group = 'group' @@ -2364,7 +2356,6 @@ def test_xgroup_delconsumer(self, r): assert r.xgroup_delconsumer(stream, group, consumer) == 2 @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xgroup_destroy(self, r): stream = 'stream' group = 'group' @@ -2377,7 +2368,6 @@ def test_xgroup_destroy(self, r): assert r.xgroup_destroy(stream, group) @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xgroup_setid(self, r): stream = 'stream' group = 'group' @@ -2395,7 +2385,6 @@ def test_xgroup_setid(self, r): assert r.xinfo_groups(stream) == expected @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xinfo_consumers(self, r): stream = 'stream' group = 'group' @@ -2421,7 +2410,6 @@ def test_xinfo_consumers(self, r): assert info == expected @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xinfo_stream(self, r): stream = 'stream' m1 = r.xadd(stream, {'foo': 'bar'}) @@ -2433,7 +2421,6 @@ def test_xinfo_stream(self, r): assert info['last-entry'] == get_stream_message(r, stream, m2) @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xlen(self, r): stream = 'stream' assert r.xlen(stream) == 0 @@ -2442,7 +2429,6 @@ def test_xlen(self, r): assert r.xlen(stream) == 2 @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xpending(self, r): stream = 'stream' group = 'group' @@ -2477,7 +2463,6 @@ def test_xpending(self, r): assert r.xpending(stream, group) == expected @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xpending_range(self, r): stream = 'stream' group = 'group' @@ -2503,7 +2488,6 @@ def test_xpending_range(self, r): assert response[1]['consumer'] == consumer2.encode() @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xrange(self, r): stream = 'stream' m1 = r.xadd(stream, {'foo': 'bar'}) @@ -2527,7 +2511,6 @@ def get_ids(results): assert get_ids(results) == [m1] @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xread(self, r): stream = 'stream' m1 = r.xadd(stream, {'foo': 'bar'}) @@ -2571,7 +2554,6 @@ def test_xread(self, r): assert r.xread(streams={stream: m2}) == [] @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xreadgroup(self, r): stream = 'stream' group = 'group' @@ -2643,7 +2625,6 @@ def test_xreadgroup(self, r): assert r.xreadgroup(group, consumer, streams={stream: '0'}) == expected @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xrevrange(self, r): stream = 'stream' m1 = r.xadd(stream, {'foo': 'bar'}) @@ -2667,7 +2648,6 @@ def get_ids(results): assert get_ids(results) == [m4] @skip_if_server_version_lt('5.0.0') - @skip_for_no_cluster_impl() def test_xtrim(self, r): stream = 'stream' From bf011040b8fceec7d63371b1229d71255e6cb3b5 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 22 May 2020 13:36:07 +0200 Subject: [PATCH 219/263] Remove accidental pdb in the code --- rediscluster/nodemanager.py | 1 - 1 file changed, 1 deletion(-) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index e587a837..1a2ef438 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -208,7 +208,6 @@ def initialize(self): # No need to decode response because Redis should handle that for us... for slot in cluster_slots: - # import pdb; pdb.set_trace() master_node = slot[2] if master_node[0] == '': From 76aa25ffe4a9d009c0008ff83428d0e7e572f2e7 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 22 May 2020 13:56:09 +0200 Subject: [PATCH 220/263] Create a new more specific error for the cases where a slot is not covered by the cluster. If this happens it will attempt to rebuild the cluster until TTL is expired and then raise that exception back to the user. Fixes #350 --- docs/release-notes.rst | 1 + docs/upgrading.rst | 5 +++++ rediscluster/client.py | 11 +++++++++++ rediscluster/connection.py | 4 ++-- rediscluster/exceptions.py | 11 +++++++++++ tests/test_cluster_connection_pool.py | 7 ++++++- 6 files changed, 36 insertions(+), 3 deletions(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 68323e14..0020c946 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -27,6 +27,7 @@ Release Notes * Nodemanager initiailize should now handle usernames properly (#365) * PubSub tests has been all been disabled * New feature, host_port_remap. Send in a remapping configuration to RedisCluster instance where the nodes configuration recieved from the redis cluster can be altered to allow for connection in certain circumstances. See new section in clients.rst in docs/ for usage example. + * When a slot is not covered by the cluster, it will not raise SlotNotCoveredError instead of the old generic RedisClusterException. The client will not attempt to rebuild the cluster layout a few times before giving up and raising that exception to the user. (#350) 2.0.0 (Aug 12, 2019) diff --git a/docs/upgrading.rst b/docs/upgrading.rst index f5824568..e4b6c0c8 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -8,6 +8,11 @@ This document describes what must be done when upgrading between different versi Python3 version must now be one of 3.5, 3.6, 3.7, 3.8 +The following exception example has now a new more specific exception class that will be attempted to be caught and the client to resolve the cluster layout. If enough attempts has been made then SlotNotCoveredError will be raised with the same message as before. If you have catch for RedisClusterException you either remove it and let the client try to resolve the cluster layout itself, or start to catch SlotNotCoveredError. This error usually happens during failover if you run skip_full_coverage_check=True when running on AWS ElasticCache for example. + + ## Example exception + rediscluster.exceptions.RedisClusterException: Slot "6986" not covered by the cluster. "skip_full_coverage_check=True" + 1.3.x --> 2.0.0 --------------- diff --git a/rediscluster/client.py b/rediscluster/client.py index 321055e9..bd88e408 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -21,6 +21,7 @@ ClusterError, MovedError, RedisClusterException, + SlotNotCoveredError, TryAgainError, ) from .pubsub import ClusterPubSub @@ -595,6 +596,16 @@ def _execute_command(self, *args, **kwargs): connection.send_command(*args) return self.parse_response(connection, command, **kwargs) + except SlotNotCoveredError as e: + # In some cases during failover to a replica is happening + # a slot sometimes is not covered by the cluster layout and + # we need to attempt to refresh the cluster layout and try again + self.refresh_table_asap = True + time.sleep(0.05) + + # This is the last attempt before we run out of TTL, raise the exception + if ttl == 1: + raise e except (RedisClusterException, BusyLoadingError): raise except ConnectionError: diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 1a91bb79..5b1962e6 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -14,7 +14,7 @@ from .exceptions import ( RedisClusterException, AskError, MovedError, TryAgainError, ClusterDownError, ClusterCrossSlotError, - MasterDownError, + MasterDownError, SlotNotCoveredError, ) # 3rd party imports @@ -329,7 +329,7 @@ def get_master_node_by_slot(self, slot): try: return self.nodes.slots[slot][0] except KeyError as ke: - raise RedisClusterException('Slot "{slot}" not covered by the cluster. "skip_full_coverage_check={skip_full_coverage_check}"'.format( + raise SlotNotCoveredError('Slot "{slot}" not covered by the cluster. "skip_full_coverage_check={skip_full_coverage_check}"'.format( slot=slot, skip_full_coverage_check=self.nodes._skip_full_coverage_check, )) diff --git a/rediscluster/exceptions.py b/rediscluster/exceptions.py index 0637d143..af204d79 100644 --- a/rediscluster/exceptions.py +++ b/rediscluster/exceptions.py @@ -85,3 +85,14 @@ class MasterDownError(ClusterDownError): """ """ pass + + +class SlotNotCoveredError(RedisClusterException): + """ + This error only happens in the case where the connection pool will try to + fetch what node that is covered by a given slot. + + If this error is raised the client should drop the current node layout and + attempt to reconnect and refresh the node layout again + """ + pass diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 1840dbee..654eb6fe 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -11,7 +11,7 @@ from rediscluster.connection import ( ClusterConnectionPool, ClusterBlockingConnectionPool, ClusterReadOnlyConnectionPool, ClusterConnection, UnixDomainSocketConnection) -from rediscluster.exceptions import RedisClusterException +from rediscluster.exceptions import RedisClusterException, SlotNotCoveredError from .conftest import (skip_if_server_version_lt, skip_for_no_cluster_impl) # 3rd party imports @@ -205,6 +205,11 @@ def test_master_node_by_slot(self): node = pool.get_master_node_by_slot(12182) node['port'] = 7002 + pool = self.get_pool(connection_kwargs={}) + pool.nodes.slots = {} + with pytest.raises(SlotNotCoveredError): + pool.get_master_node_by_slot(12182) + def test_from_url_connection_classes(self): from rediscluster.client import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterConnection, SSLClusterConnection From 2605ed0adf58c4540dda0d9121a9767238be76e8 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Fri, 22 May 2020 14:52:56 +0200 Subject: [PATCH 221/263] Fix implementation of CLIENT SETNAME so that it is no longer blocked. Also added note about how to set the connection name by default on all connections newly created. See issue 802 in redis-py. Fixes #327 --- docs/release-notes.rst | 1 + rediscluster/client.py | 3 ++- tests/test_commands.py | 2 +- tests/test_commands_cluster.py | 5 +++++ 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 0020c946..1104370b 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -28,6 +28,7 @@ Release Notes * PubSub tests has been all been disabled * New feature, host_port_remap. Send in a remapping configuration to RedisCluster instance where the nodes configuration recieved from the redis cluster can be altered to allow for connection in certain circumstances. See new section in clients.rst in docs/ for usage example. * When a slot is not covered by the cluster, it will not raise SlotNotCoveredError instead of the old generic RedisClusterException. The client will not attempt to rebuild the cluster layout a few times before giving up and raising that exception to the user. (#350) + * CLIENT SETNAME is now possible to use from the client instance. For setting the name for all connections from the client by default, see issue #802 in redis-py repo for the change that was implemented in redis-py 3.4.0. 2.0.0 (Aug 12, 2019) diff --git a/rediscluster/client.py b/rediscluster/client.py index bd88e408..7266e1ae 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -101,7 +101,6 @@ class RedisCluster(Redis): 'ACL USERS', 'ACL WHOAMI', 'BITOP', - 'CLIENT SETNAME', 'MOVE', 'SCRIPT KILL', 'SENTINEL GET-MASTER-ADDR-BY-NAME', @@ -122,6 +121,7 @@ class RedisCluster(Redis): "CLIENT ID", "CLIENT KILL", "CLIENT LIST", + "CLIENT SETNAME", "CLUSTER INFO", "CONFIG GET", "CONFIG RESETSTAT", @@ -213,6 +213,7 @@ class RedisCluster(Redis): "CLIENT ID", "CLIENT KILL", "CLIENT LIST", + "CLIENT SETNAME", "CLUSTER INFO", "CONFIG GET", "CONFIG RESETSTAT", diff --git a/tests/test_commands.py b/tests/test_commands.py index 62e11578..63a7f0e4 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -295,7 +295,7 @@ def test_client_getname(self, r): assert r.client_getname() is None @skip_if_server_version_lt('2.6.9') - @skip_for_no_cluster_impl() + @pytest.mark.skip(reason="Cluster specific override") def test_client_setname(self, r): assert r.client_setname('redis_py_test') assert r.client_getname() == 'redis_py_test' diff --git a/tests/test_commands_cluster.py b/tests/test_commands_cluster.py index db32c1a6..089bc3ea 100644 --- a/tests/test_commands_cluster.py +++ b/tests/test_commands_cluster.py @@ -92,6 +92,11 @@ def test_client_unblock(self, r): def test_client_getname(self, r): assert get_main_cluster_node_data(r.client_getname()) is None + @skip_if_server_version_lt('2.6.9') + def test_client_setname(self, r): + assert r.client_setname('redis_py_test') + assert get_main_cluster_node_data(r.client_getname()) == 'redis_py_test' + def test_config_get(self, r): data = get_main_cluster_node_data(r.config_get()) assert 'maxmemory' in data From a154a6f42b9d99ba81ab7775f470e1b085464a7c Mon Sep 17 00:00:00 2001 From: Sebastien Williams-Wynn Date: Thu, 4 Jun 2020 17:23:13 +0100 Subject: [PATCH 222/263] DOCS: Fix spelling and grammar in pipelining documentation --- docs/pipelines.rst | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/pipelines.rst b/docs/pipelines.rst index 5ab9bdd8..2ab57898 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -11,7 +11,7 @@ Just like in `redis-py`, `redis-py-cluster` queues up all the commands inside th Ideally all the commands should be sent to each node in the cluster in parallel so that all the commands can be processed as fast as possible. We do this by first writing all of the commands to the sockets sequentially before reading any of the responses. This allows us to parallelize the network i/o without the overhead of managing python threads. -In previous versions of the library there were some bugs associated with pipelining operations. In an effort to simplify the logic and lessen the likelihood of bugs, if we get back connection errors, MOVED errors, ASK errors or any other error that can safely be retried, we fall back to sending these remaining commands sequentially to each individual node just as we would in a normal redis call. We still buffer the results inside the pipeline response so there will be no change in client behavior. During normal cluster operations, pipelined commands should work nearly efficiently as pipelined commands to a single instance redis. When there is a disruption to the cluster topography, like when keys are being resharded, or when a slave takes over for a master, there will be a slight loss of network efficiency. Commands that are rejected by the server are tried one at a time as we rebuild the slot mappings. Once the slots table is rebuilt correctly (usally in a second or so), the client resumes efficient networking behavior. We felt it was more important to prioritize correctness of behavior and reliable error handling over networking efficiency for the rare cases where the cluster topography is in flux. +In previous versions of the library there were some bugs associated with pipelining operations. In an effort to simplify the logic and lessen the likelihood of bugs, if we get back connection errors, MOVED errors, ASK errors or any other error that can safely be retried, we fall back to sending these remaining commands sequentially to each individual node just as we would in a normal redis call. We still buffer the results inside the pipeline response so there will be no change in client behavior. During normal cluster operations, pipelined commands should work nearly efficiently as pipelined commands to a single instance redis. When there is a disruption to the cluster topography, like when keys are being resharded, or when a slave takes over for a master, there will be a slight loss of network efficiency. Commands that are rejected by the server are tried one at a time as we rebuild the slot mappings. Once the slots table is rebuilt correctly (usually in a second or so), the client resumes efficient networking behavior. We felt it was more important to prioritize correctness of behavior and reliable error handling over networking efficiency for the rare cases where the cluster topography is in flux. @@ -56,7 +56,7 @@ In the solution for this lib multikey commands are blocked hard and will probabl Examples on commands that do not work is `MGET`, `MSET`, `MOVE`. -One good thing that comes out of blocking multi key commands is that correct execution order is less of a problem and as long as it applies to each slot in the cluster we shold be fine. +One good thing that comes out of blocking multi key commands is that correct execution order is less of a problem and as long as it applies to each slot in the cluster we should be fine. Consider the following example. Create a pipeline and issue 6 commands `A`, `B`, `C`, `D`, `E`, `F` and then execute it. The pipeline is calculated and 2 sub pipelines is created with `A`, `C`, `D`, `F` in the first and `B`, `E` in the second. Both pipelines are then sent to each node in the cluster and a response is sent back. For the first node `[True, MovedException(12345), MovedException(12345), True]` and from the second node [`True`, `True`]. After this response is parsed we see that 2 commands in the first pipeline did not work and must be sent to another node. This case happens if the client slots cache is wrong because a slot was migrated to another node in the cluster. After parsing the response we then build a third pipeline object with commands [`C`, `D`] to the second node. The third object is executed and passes and from the client perspective the entire pipeline was executed. @@ -76,9 +76,9 @@ The client is responsible for figuring out which commands map to which nodes. Le Parallel execution of pipeline ------------------------------ -In older version of `redis-py-cluster`, there was a thread implementation that helped to increaes the performance of running pipelines by running the connections and execution of all commands to all nodes in the pipeline in paralell. This implementation was later removed in favor of a much simpler and faster implementation. +In older version of `redis-py-cluster`, there was a thread implementation that helped to increase the performance of running pipelines by running the connections and execution of all commands to all nodes in the pipeline in parallel. This implementation was later removed in favor of a much simpler and faster implementation. -In this new implementation we execute everything in the same thread, but we do all the writing to all sockets in order to each different server and then start to wait for them in sequence until all of them is complete. There is no real need to run them in parralell since we still have to wait for a thread join of all parralell executions before the code can continue, so we can wait in sequence for all of them to complete. This is not the absolute fastest implementation, but it much simpler to implement and maintain and cause less issues becuase there is no threads or other parallel ipmlementation that will use some overhead and add complexity to the method. +In this new implementation we execute everything in the same thread, but we do all the writing to all sockets in order to each different server and then start to wait for them in sequence until all of them is complete. There is no real need to run them in parallel since we still have to wait for a thread join of all parallel executions before the code can continue, so we can wait in sequence for all of them to complete. This is not the absolute fastest implementation, but it much simpler to implement and maintain and cause less issues because there is no threads or other parallel implementation that will use some overhead and add complexity to the method. This feature is implemented by default and will be used in all pipeline requests. @@ -87,9 +87,9 @@ This feature is implemented by default and will be used in all pipeline requests Transactions and WATCH ---------------------- -Support for transactions and WATCH:es in pipelines. If we look on the entire pipeline across all nodes in the cluster there is no possible way to have a complete transaction across all nodes because if we need to issue commands to 3 servers, each server is handled by its own and there is no way to tell other nodes to abort a transaction if only one of the nodes fail but not the others. A possible solution for that could be to implement a 2 step commit process. The 2 steps would consist of building 2 batches of commands for each node where the first batch would consist of validating the state of each slot that the pipeline wants to operate on. If any of the slots is migrating or moved then the client can correct its slots cache and issue a more correct pipeline batch. The second step would be to issue the actual commands and the data would be commited to redis. The big problem with this is that 99% of the time this would work really well if you have a very stable cluster with no migrations/resharding/servers down. But there can be times where a slot has begun migration in between the 2 steps of the pipeline and that would cause a race condition where the client thinks it has corrected the pipeline and wants to commit the data but when it does it will still fail. +Support for transactions and WATCH:es in pipelines. If we look on the entire pipeline across all nodes in the cluster there is no possible way to have a complete transaction across all nodes because if we need to issue commands to 3 servers, each server is handled by its own and there is no way to tell other nodes to abort a transaction if only one of the nodes fail but not the others. A possible solution for that could be to implement a 2 step commit process. The 2 steps would consist of building 2 batches of commands for each node where the first batch would consist of validating the state of each slot that the pipeline wants to operate on. If any of the slots is migrating or moved then the client can correct its slots cache and issue a more correct pipeline batch. The second step would be to issue the actual commands and the data would be committed to redis. The big problem with this is that 99% of the time this would work really well if you have a very stable cluster with no migrations/resharding/servers down. But there can be times where a slot has begun migration in between the 2 steps of the pipeline and that would cause a race condition where the client thinks it has corrected the pipeline and wants to commit the data but when it does it will still fail. -Why `MULTI/EXEC` support won't work in a cluster environment. There is some test code in the second `MULTI/EXEC cluster test code` of this document that tests if `MULTI/EXEC` is possible to use in a cluster pipeline. The test shows a huge problem when errors occur. If we wrap `MULTI/EXEC` in a packed set of commands then if a slot is migrating we will not get a good error we can parse and use. Currently it will only report `True` or `False` so we can narrow down what command failed but not why it failed. This might work really well if used on a non clustered node becuase it does not have to take care of `ASK` or `MOVED` errors. But for a cluster we need to know what cluster error occured so the correct action to fix the problem can be taken. Since there is more then 1 error to take care of it is not possible to take action based on just `True` or `False`. +Why `MULTI/EXEC` support won't work in a cluster environment. There is some test code in the second `MULTI/EXEC cluster test code` of this document that tests if `MULTI/EXEC` is possible to use in a cluster pipeline. The test shows a huge problem when errors occur. If we wrap `MULTI/EXEC` in a packed set of commands then if a slot is migrating we will not get a good error we can parse and use. Currently it will only report `True` or `False` so we can narrow down what command failed but not why it failed. This might work really well if used on a non clustered node because it does not have to take care of `ASK` or `MOVED` errors. But for a cluster we need to know what cluster error occurred so the correct action to fix the problem can be taken. Since there is more then 1 error to take care of it is not possible to take action based on just `True` or `False`. Because of this problem with error handling `MULTI/EXEC` is blocked hard in the code from being used in a pipeline because the current implementation can't handle the errors. @@ -153,7 +153,7 @@ This section will describe different types of pipeline solutions. It will list t Suggestion one ************** -Simple but yet sequential pipeline. This solution acts more like an interface for the already existing pipeline implementation and only provides a simple backwards compatible interface to ensure that code that sexists still will work withouth any major modifications. The good this with this implementation is that because all commands is runned in sequence it will handle `MOVED` or `ASK` redirections very good and withouth any problems. The major downside to this solution is that no command is ever batched and ran in parallel and thus you do not get any major performance boost from this approach. Other plus is that execution order is preserved across the entire cluster but a major downside is that thte commands is no longer atomic on the cluster scale because they are sent in multiple commands to different nodes. +Simple but yet sequential pipeline. This solution acts more like an interface for the already existing pipeline implementation and only provides a simple backwards compatible interface to ensure that code that exists still will work without any major modifications. This is good because, with this implementation, all commands are run in sequence and it will handle `MOVED` or `ASK` redirections very well and without any problems. The major downside to this solution is that no command is ever batched and run in parallel and thus you do not get any major performance boost from this approach. Another plus is that execution order is preserved across the entire cluster but a major downside is that the commands are no longer atomic on the cluster scale because they are sent in multiple commands to different nodes. **Good** @@ -170,7 +170,7 @@ Suggestion two ************** Current pipeline implementation. This implementation is rather good and works well because it combines the existing pipeline interface and functionality and it also provides a basic handling of `ASK` or `MOVED` errors inside the client. One major downside to this is that execution order is not preserved across the cluster. Although the execution order is somewhat broken if you look at the entire cluster level because commands can be split so that cmd1, cmd3, cmd5 get sent to one server and cmd2, cmd4 gets sent to another server. The order is then broken globally but locally for each server it is preserved and maintained correctly. On the other hand I guess that there can't be any commands that can affect different hashslots within the same command so maybe it really doesn't matter if the execution order is not correct because for each slot/key the order is valid. -There might be some issues with rebuilding the correct response ordering from the scattered data because each command might be in different sub pipelines. But I think that our current code still handles this correctly. I think I have to figure out some weird case where the execution order actually matters. There might be some issues with the nonsupported mget/mset commands that acctually performs different sub commands then it currently supports. +There might be some issues with rebuilding the correct response ordering from the scattered data because each command might be in different sub pipelines. But I think that our current code still handles this correctly. I think I have to figure out some weird case where the execution order actually matters. There might be some issues with the nonsupported mget/mset commands that actually performs different sub commands then it currently supports. **Good** @@ -202,12 +202,12 @@ There is a even simpler form of pipelines that can be made where all commands is Suggestion four ************** -One other solution is the 2 step commit solution where you send for each server 2 batches of commands. The first command should somehow establish that each keyslot is in the correct state and able to handle the data. After the client have recieved OK from all nodes that all data slots is good to use then it will acctually send the real pipeline with all data and commands. The big problem with this approach is that ther eis a gap between the checking of the slots and the acctual sending of the data where things can happen to the already established slots setup. But at the same time there is no possibility of merging these 2 steps because if step 2 is automatically runned if step 1 is Ok then the pipeline for the first node that will fail will fail but for the other nodes it will suceed but when it should not because if one command gets `ASK` or `MOVED` redirection then all pipeline objects must be rebuilt to match the new specs/setup and then reissued by the client. The major advantage of this solution is that if you have total controll of the redis server and do controlled upgrades when no clients is talking to the server then it can actually work really well because there is no possibility that `ASK` or `MOVED` will triggered by migrations in between the 2 batches. +One other solution is the 2 step commit solution where you send for each server 2 batches of commands. The first command should somehow establish that each keyslot is in the correct state and able to handle the data. After the client have received OK from all nodes that all data slots is good to use then it will actually send the real pipeline with all data and commands. The big problem with this approach is that there is a gap between the checking of the slots and the actual sending of the data where things can happen to the already established slots setup. But at the same time there is no possibility of merging these 2 steps because if step 2 is automatically run if step 1 is Ok then the pipeline for the first node that will fail will fail but for the other nodes it will succeed but when it should not because if one command gets `ASK` or `MOVED` redirection then all pipeline objects must be rebuilt to match the new specs/setup and then reissued by the client. The major advantage of this solution is that if you have total control of the redis server and do controlled upgrades when no clients is talking to the server then it can actually work really well because there is no possibility that `ASK` or `MOVED` will triggered by migrations in between the 2 batches. **Good** - Still rather safe because of the 2 step commit solution - - Handles `ASK` or `MOVED` before commiting the data + - Handles `ASK` or `MOVED` before committing the data **Bad** From 707ae23d32f98c172b2c39c4b4ad0f0cbf8ebb57 Mon Sep 17 00:00:00 2001 From: Jake Hilton Date: Wed, 10 Jun 2020 09:28:52 -0600 Subject: [PATCH 223/263] Fix to the redis cluster ssl connection. --- rediscluster/connection.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 5b1962e6..eb4cbee3 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -70,6 +70,9 @@ class SSLClusterConnection(SSLConnection): def __init__(self, **kwargs): self.readonly = kwargs.pop('readonly', False) + # need to pop this off as the redis/connection.py SSLConnection init doesn't work with ssl passed in + if 'ssl' in kwargs: + kwargs.pop('ssl') kwargs['parser_class'] = ClusterParser super(SSLClusterConnection, self).__init__(**kwargs) From dcb53583869810dc9093b86293a85240ef2e8190 Mon Sep 17 00:00:00 2001 From: Will Giddens Date: Wed, 22 Jul 2020 17:24:39 -0400 Subject: [PATCH 224/263] Update cluster-setup.rst fix typo in readme --- docs/cluster-setup.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/cluster-setup.rst b/docs/cluster-setup.rst index 987bbd58..300be95e 100644 --- a/docs/cluster-setup.rst +++ b/docs/cluster-setup.rst @@ -24,7 +24,7 @@ See repo `README` for detailed instructions how to setup and run. Vagrant ------- -A fully functional vagrant box can be found at https://github.com/72squared/vagrant-redis-cluste +A fully functional vagrant box can be found at https://github.com/72squared/vagrant-redis-cluster See repo `README` for detailed instructions how to setup and run. From 83bacbdcce5a640eae88b1664a98b9fa3f076a6e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 6 Sep 2020 15:26:42 +0200 Subject: [PATCH 225/263] Fixed broken pipeline examples. Fixes #384 --- docs/pipelines.rst | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/docs/pipelines.rst b/docs/pipelines.rst index 2ab57898..324e934f 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -107,14 +107,21 @@ This code does NOT wrap `MULTI/EXEC` around the commands when packed .. code-block:: python >>> from rediscluster import RedisCluster as s + >>> r = s(startup_nodes=[{"host": "127.0.0.1", "port": "7002"}]) + >>> # Simulate that a slot is migrating to another node - >>> r.connection_pool.nodes.slots[14226] = {'host': '127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001'} + >>> r.connection_pool.nodes.slots[14226] = [{ + >>> 'host': '127.0.0.1', + >>> 'server_type': 'master', + >>> 'port': 7001, + >>> 'name': '127.0.0.1:7001', + >>> }] + >>> p = r.pipeline() - >>> p.command_stack = [] - >>> p.command_stack.append((["SET", "ert", "tre"], {})) - >>> p.command_stack.append((["SET", "wer", "rew"], {})) - >>> p.execute() + >>> p.set('ert', 'tre') + >>> p.set('wer', 'rew') + >>> print(p.execute()) ClusterConnection [True, ResponseError('MOVED 14226 127.0.0.1:7002',)] @@ -126,14 +133,22 @@ This code DO wrap MULTI/EXEC around the commands when packed .. code-block:: python >>> from rediscluster import RedisCluster as s + >>> r = s(startup_nodes=[{"host": "127.0.0.1", "port": "7002"}]) + >>> # Simulate that a slot is migrating to another node - >>> r.connection_pool.nodes.slots[14226] = {'host': '127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001'} + >>> r.connection_pool.nodes.slots[14226] = [{ + >>> 'host': '127.0.0.1', + >>> 'server_type': 'master', + >>> 'port': 7001, + >>> 'name': '127.0.0.1:7001', + >>> }] + >>> p = r.pipeline() - >>> p.command_stack = [] - >>> p.command_stack.append((["SET", "ert", "tre"], {})) - >>> p.command_stack.append((["SET", "wer", "rew"], {})) - >>> p.execute() + >>> p.set('ert', 'tre') + >>> p.set('wer', 'rew') + >>> print(p.execute()) + ClusterConnection [True, False] From ed381f53c2b8f2d7ccac4ba8801f8d7409bd1ad0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 6 Sep 2020 17:23:29 +0200 Subject: [PATCH 226/263] Setup NullLogging, code lint, Fixed ConnectionError retry code logic --- docs/logging.rst | 17 +++++ rediscluster/__init__.py | 4 ++ rediscluster/client.py | 101 +++++++++++++++++++++++------ rediscluster/connection.py | 26 +++++++- rediscluster/nodemanager.py | 39 +++++++++-- tests/test_cluster_node_manager.py | 82 +++++++++++++++++------ tests/test_cluster_obj.py | 16 ++++- 7 files changed, 232 insertions(+), 53 deletions(-) create mode 100644 docs/logging.rst diff --git a/docs/logging.rst b/docs/logging.rst new file mode 100644 index 00000000..2c0b0da9 --- /dev/null +++ b/docs/logging.rst @@ -0,0 +1,17 @@ +Setup client logging +#################### + +To setup logging for debugging inside the client during development you can add this as an example to your own code to enable `DEBUG` logging when using the library. + +.. code-block:: python + + import logging + + from rediscluster import RedisCluster + + logging.basicConfig() + logger = logging.getLogger('rediscluster') + logger.setLevel(logging.DEBUG) + logger.propergate = True + +Note that this logging is not reccommended to be used inside production as it can cause a performance drain and a slowdown of your client. diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 2f25c02b..a1704c3a 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # python std lib +import logging import sys # rediscluster imports @@ -51,3 +52,6 @@ def int_or_str(value): RedisClusterException, TryAgainError, ] + +# Set default logging handler to avoid "No handler found" warnings. +logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/rediscluster/client.py b/rediscluster/client.py index 7266e1ae..d08bcb09 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -3,6 +3,8 @@ # python std lib import datetime +import json +import logging import random import string import time @@ -54,6 +56,9 @@ ) +log = logging.getLogger(__name__) + + class CaseInsensitiveDict(dict): "Case insensitive dict implementation. Assumes string keys only." @@ -320,13 +325,19 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non - db (Redis do not support database SELECT in cluster mode) """ # Tweaks to Redis client arguments when running in cluster mode + log.info("Created new instance of RedisCluster client instance") + log.debug("startup_nodes : " + json.dumps(startup_nodes, indent=2)) + if "db" in kwargs: raise RedisClusterException("Argument 'db' is not possible to use in cluster mode") - if kwargs.pop('ssl', False): # Needs to be removed to avoid exception in redis Connection init + # Needs to be removed to avoid exception in redis Connection init + if kwargs.pop('ssl', False): + log.info("Patching connection_class to SSLClusterConnection") connection_class = SSLClusterConnection if "connection_pool" in kwargs: + log.info("Using custom created connection pool") pool = kwargs.pop('connection_pool') else: startup_nodes = [] if startup_nodes is None else startup_nodes @@ -337,10 +348,15 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non if readonly_mode: connection_pool_cls = ClusterReadOnlyConnectionPool + log.info("Using ClusterReadOnlyConnectionPool") elif read_from_replicas: connection_pool_cls = ClusterWithReadReplicasConnectionPool + log.info("Using ClusterWithReadReplicasConnectionPool") else: connection_pool_cls = ClusterConnectionPool + log.info("Using ClusterConnectionPool") + + log.debug("Connection pool class " + str(connection_pool_cls)) pool = connection_pool_cls( startup_nodes=startup_nodes, @@ -545,6 +561,7 @@ def _execute_command(self, *args, **kwargs): raise RedisClusterException("Unable to determine command to use") command = args[0] + log.debug("Command to execute : " + str(command) + " : " + str(args) + " : " + str(kwargs)) # If set externally we must update it before calling any commands if self.refresh_table_asap: @@ -562,28 +579,34 @@ def _execute_command(self, *args, **kwargs): try_random_node = False slot = self._determine_slot(*args) ttl = int(self.RedisClusterRequestTTL) + connection_error_retry_counter = 0 while ttl > 0: ttl -= 1 - if asking: - node = self.connection_pool.nodes.nodes[redirect_addr] - connection = self.connection_pool.get_connection_by_node(node) - elif try_random_node: - connection = self.connection_pool.get_random_connection() - try_random_node = False - else: - if self.refresh_table_asap: - # MOVED - node = self.connection_pool.get_master_node_by_slot(slot) - # Reset the flag when it has been consumed to avoid it being - self.refresh_table_asap = False + try: + if asking: + node = self.connection_pool.nodes.nodes[redirect_addr] + connection = self.connection_pool.get_connection_by_node(node) + elif try_random_node: + connection = self.connection_pool.get_random_connection() + try_random_node = False else: - node = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and (command in self.READ_COMMANDS)) - is_read_replica = node['server_type'] == 'slave' - connection = self.connection_pool.get_connection_by_node(node) + if self.refresh_table_asap: + # MOVED + node = self.connection_pool.get_master_node_by_slot(slot) + self.refresh_table_asap = False + else: + node = self.connection_pool.get_node_by_slot( + slot, + self.read_from_replicas and (command in self.READ_COMMANDS) + ) + is_read_replica = node['server_type'] == 'slave' + + connection = self.connection_pool.get_connection_by_node(node) + + log.debug("Determined node to execute : " + str(node)) - try: if asking: connection.send_command('ASKING') self.parse_response(connection, "ASKING", **kwargs) @@ -598,25 +621,53 @@ def _execute_command(self, *args, **kwargs): connection.send_command(*args) return self.parse_response(connection, command, **kwargs) except SlotNotCoveredError as e: + log.exception("SlotNotCoveredError") + # In some cases during failover to a replica is happening # a slot sometimes is not covered by the cluster layout and # we need to attempt to refresh the cluster layout and try again self.refresh_table_asap = True - time.sleep(0.05) + time.sleep(0.1) # This is the last attempt before we run out of TTL, raise the exception if ttl == 1: raise e - except (RedisClusterException, BusyLoadingError): + except (RedisClusterException, BusyLoadingError) as e: + log.exception("RedisClusterException || BusyLoadingError") raise - except ConnectionError: + except ConnectionError as e: + log.exception("ConnectionError") + connection.disconnect() - except TimeoutError: + connection_error_retry_counter += 1 + + # Give the node 0.1 seconds to get back up and retry again with same + # node and configuration. After 5 attempts then try to reinitialize + # the cluster and see if the nodes configuration has changed or not + if connection_error_retry_counter < 5: + time.sleep(0.25) + else: + # Reset the counter back to 0 as it should have 5 new attempts + # after the client tries to reinitailize the cluster setup to the + # new configuration. + connection_error_retry_counter = 0 + self.refresh_table_asap = True + + # Hard force of reinitialize of the node/slots setup + self.connection_pool.nodes.increment_reinitialize_counter( + count=self.connection_pool.nodes.reinitialize_steps, + ) + + except TimeoutError as e: + log.exception("TimeoutError") + if ttl < self.RedisClusterRequestTTL / 2: time.sleep(0.05) else: try_random_node = True except ClusterDownError as e: + log.exception("ClusterDownError") + self.connection_pool.disconnect() self.connection_pool.reset() self.refresh_table_asap = True @@ -627,19 +678,27 @@ def _execute_command(self, *args, **kwargs): # This counter will increase faster when the same client object # is shared between multiple threads. To reduce the frequency you # can set the variable 'reinitialize_steps' in the constructor. + log.exception("MovedError") + self.refresh_table_asap = True self.connection_pool.nodes.increment_reinitialize_counter() node = self.connection_pool.nodes.set_node(e.host, e.port, server_type='master') self.connection_pool.nodes.slots[e.slot_id][0] = node except TryAgainError as e: + log.exception("TryAgainError") + if ttl < self.RedisClusterRequestTTL / 2: time.sleep(0.05) except AskError as e: + log.exception("AskError") + redirect_addr, asking = "{0}:{1}".format(e.host, e.port), True finally: self.connection_pool.release(connection) + log.debug("TTL loop : " + str(ttl)) + raise ClusterError('TTL exhausted.') def _execute_command_on_nodes(self, nodes, *args, **kwargs): diff --git a/rediscluster/connection.py b/rediscluster/connection.py index eb4cbee3..561e7657 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -2,6 +2,7 @@ # python std lib from __future__ import unicode_literals +import logging import os import random import threading @@ -23,6 +24,8 @@ from redis.connection import ConnectionPool, Connection, DefaultParser, SSLConnection, UnixDomainSocketConnection from redis.exceptions import ConnectionError +log = logging.getLogger(__name__) + class ClusterParser(DefaultParser): """ @@ -42,6 +45,9 @@ class ClusterConnection(Connection): "Manages TCP communication to and from a Redis server" def __init__(self, *args, **kwargs): + log.info("Createing new ClusterConnection instance") + log.debug(str(args) + " : " + str(kwargs)) + self.readonly = kwargs.pop('readonly', False) kwargs['parser_class'] = ClusterParser super(ClusterConnection, self).__init__(*args, **kwargs) @@ -54,6 +60,9 @@ def on_connect(self): super(ClusterConnection, self).on_connect() if self.readonly: + log.debug("Sending READONLY command to server to configure connection as readonly") + log.debug(str(self)) + self.send_command('READONLY') if nativestr(self.read_response()) != 'OK': @@ -68,7 +77,10 @@ class SSLClusterConnection(SSLConnection): client = RedisCluster(connection_pool=pool) """ - def __init__(self, **kwargs): + def __init__(self, *args, **kwargs): + log.info("Createing new SSLClusterConnection instance") + log.debug(str(args) + " : " + str(kwargs)) + self.readonly = kwargs.pop('readonly', False) # need to pop this off as the redis/connection.py SSLConnection init doesn't work with ssl passed in if 'ssl' in kwargs: @@ -84,6 +96,8 @@ def on_connect(self): super(SSLClusterConnection, self).on_connect() if self.readonly: + log.debug("Sending READONLY command to server to configure connection as readonly") + self.send_command('READONLY') if nativestr(self.read_response()) != 'OK': @@ -109,8 +123,11 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No it was operating on. This will allow the client to drift along side the cluster if the cluster nodes move around a lot. """ + log.info("Creating new ClusterConnectionPool instance") + if connection_class is None: connection_class = ClusterConnection + super(ClusterConnectionPool, self).__init__(connection_class=connection_class, max_connections=max_connections) # Special case to make from_url method compliant with cluster setting. @@ -153,7 +170,10 @@ def __repr__(self): """ Return a string with all unique ip:port combinations that this pool is connected to. """ - nodes = [{'host': i['host'], 'port': i['port']} for i in self.nodes.startup_nodes] + nodes = [ + {'host': i['host'], 'port': i['port']} + for i in self.nodes.startup_nodes + ] return "{0}<{1}>".format( type(self).__name__, @@ -164,6 +184,8 @@ def reset(self): """ Resets the connection pool back to a clean state. """ + log.debug("Resetting ConnectionPool") + self.pid = os.getpid() self._created_connections = 0 self._created_connections_per_node = {} # Dict(Node, Int) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 1a2ef438..b435b617 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- # python std lib +import json +import logging import random # rediscluster imports @@ -13,6 +15,8 @@ from redis.connection import Encoder from redis import ConnectionError, TimeoutError, ResponseError +log = logging.getLogger(__name__) + class NodeManager(object): """ @@ -30,6 +34,8 @@ def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_covera it was operating on. This will allow the client to drift along side the cluster if the cluster nodes move around alot. """ + log.debug("Creating new NodeManager instance") + self.connection_kwargs = connection_kwargs self.nodes = {} self.slots = {} @@ -162,13 +168,27 @@ def get_redis_link(self, host, port, decode_responses=False): 'port', 'decode_responses', ) - connection_kwargs = {k: v for k, v in self.connection_kwargs.items() if k in set(allowed_keys) - set(disabled_keys)} - return Redis(host=host, port=port, decode_responses=decode_responses, **connection_kwargs) + connection_kwargs = { + k: v + for k, v in self.connection_kwargs.items() + if k in set(allowed_keys) - set(disabled_keys) + } + + return Redis( + host=host, + port=port, + decode_responses=decode_responses, + **connection_kwargs, + ) def initialize(self): """ Init the slots cache by asking all startup nodes what the current cluster configuration is """ + log.debug("Running initialize on NodeManager") + log.debug("Original startup nodes configuration") + log.debug(json.dumps(self.orig_startup_nodes, indent=2)) + nodes_cache = {} tmp_slots = {} @@ -268,6 +288,9 @@ def initialize(self): self.nodes = nodes_cache self.reinitialize_counter = 0 + log.debug("NodeManager initialize done : Nodes") + log.debug(json.dumps(self.nodes, indent=2)) + def remap_internal_node_object(self, node_obj): if not self.host_port_remap: # No remapping rule set, return object unmodified @@ -276,20 +299,18 @@ def remap_internal_node_object(self, node_obj): for remap_rule in self.host_port_remap: if 'from_host' in remap_rule and 'to_host' in remap_rule: if remap_rule['from_host'] in node_obj[0]: - # print('remapping host', node_obj[0], remap_rule['to_host']) node_obj[0] = remap_rule['to_host'] ## The port value is always an integer if 'from_port' in remap_rule and 'to_port' in remap_rule: if remap_rule['from_port'] == node_obj[1]: - # print('remapping port', node_obj[1], remap_rule['to_port']) node_obj[1] = remap_rule['to_port'] return node_obj - def increment_reinitialize_counter(self, ct=1): + def increment_reinitialize_counter(self, ct=1, count=1): for i in range(min(ct, self.reinitialize_steps)): - self.reinitialize_counter += 1 + self.reinitialize_counter += count if self.reinitialize_counter % self.reinitialize_steps == 0: self.initialize() @@ -303,7 +324,11 @@ def cluster_require_full_coverage(self, nodes_cache): def node_require_full_coverage(node): try: - r_node = self.get_redis_link(host=node["host"], port=node["port"], decode_responses=True) + r_node = self.get_redis_link( + host=node["host"], + port=node["port"], + decode_responses=True, + ) return "yes" in r_node.config_get("cluster-require-full-coverage").values() except ConnectionError: return False diff --git a/tests/test_cluster_node_manager.py b/tests/test_cluster_node_manager.py index 87fc2f4d..700e1c43 100644 --- a/tests/test_cluster_node_manager.py +++ b/tests/test_cluster_node_manager.py @@ -99,9 +99,9 @@ def patch_execute_command(*args, **kwargs): if args == ('cluster', 'slots'): # Missing slot 5460 return [ - [0, 5459, [b'127.0.0.1', 7000], [b'127.0.0.1', 7003]], - [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.1', 7004]], - [10923, 16383, [b'127.0.0.1', 7002], [b'127.0.0.1', 7005]], + [0, 5459, ['127.0.0.1', 7000], ['127.0.0.1', 7003]], + [5461, 10922, ['127.0.0.1', 7001], ['127.0.0.1', 7004]], + [10923, 16383, ['127.0.0.1', 7002], ['127.0.0.1', 7005]], ] elif args == ('CONFIG GET', 'cluster-require-full-coverage'): return {'cluster-require-full-coverage': 'no'} @@ -125,9 +125,9 @@ def test_init_slots_cache(s): Test that slots cache can in initialized and all slots are covered """ good_slots_resp = [ - [0, 5460, [b'127.0.0.1', 7000], [b'127.0.0.2', 7003]], - [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.2', 7004]], - [10923, 16383, [b'127.0.0.1', 7002], [b'127.0.0.2', 7005]], + [0, 5460, ['127.0.0.1', 7000], ['127.0.0.2', 7003]], + [5461, 10922, ['127.0.0.1', 7001], ['127.0.0.2', 7004]], + [10923, 16383, ['127.0.0.1', 7002], ['127.0.0.2', 7005]], ] with patch.object(Redis, 'execute_command') as execute_command_mock: @@ -142,7 +142,7 @@ def patch_execute_command(*args, **kwargs): s.connection_pool.nodes.initialize() assert len(s.connection_pool.nodes.slots) == NodeManager.RedisClusterHashSlots for slot_info in good_slots_resp: - all_hosts = [b'127.0.0.1', b'127.0.0.2'] + all_hosts = ['127.0.0.1', '127.0.0.2'] all_ports = [7000, 7001, 7002, 7003, 7004, 7005] slot_start = slot_info[0] slot_end = slot_info[1] @@ -181,7 +181,6 @@ def test_init_slots_cache_slots_collision(): In this test both nodes will say that the first slots block should be bound to different servers. """ - n = NodeManager(startup_nodes=[ {"host": "127.0.0.1", "port": 7000}, {"host": "127.0.0.1", "port": 7001}, @@ -192,17 +191,46 @@ def monkey_link(host=None, port=None, *args, **kwargs): Helper function to return custom slots cache data from different redis nodes """ if port == 7000: - result = [[0, 5460, [b'127.0.0.1', 7000], [b'127.0.0.1', 7003]], - [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.1', 7004]]] + result = [ + [ + 0, + 5460, + ['127.0.0.1', 7000], + ['127.0.0.1', 7003], + ], + [ + 5461, + 10922, + ['127.0.0.1', 7001], + ['127.0.0.1', 7004], + ], + ] elif port == 7001: - result = [[0, 5460, [b'127.0.0.1', 7001], [b'127.0.0.1', 7003]], - [5461, 10922, [b'127.0.0.1', 7000], [b'127.0.0.1', 7004]]] - + result = [ + [ + 0, + 5460, + ['127.0.0.1', 7001], + ['127.0.0.1', 7003], + ], + [ + 5461, + 10922, + ['127.0.0.1', 7000], + ['127.0.0.1', 7004], + ], + ] else: result = [] - r = RedisCluster(host=host, port=port, decode_responses=True) + r = RedisCluster( + host=host, + port=port, + decode_responses=True, + skip_full_coverage_check=False, + ) + orig_execute_command = r.execute_command def execute_command(*args, **kwargs): @@ -240,10 +268,18 @@ def test_all_nodes_masters(): Set a list of nodes with random masters/slaves config and it shold be possible to itterate over all of them. """ - n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}, {"host": "127.0.0.1", "port": 7001}]) + n = NodeManager( + startup_nodes=[ + {"host": "127.0.0.1", "port": 7000}, + {"host": "127.0.0.1", "port": 7001} + ] + ) n.initialize() - nodes = [node for node in n.nodes.values() if node['server_type'] == 'master'] + nodes = [ + node for node in n.nodes.values() + if node['server_type'] == 'master' + ] for node in n.all_masters(): assert node in nodes @@ -293,7 +329,6 @@ def test_cluster_slots_error(): assert "ERROR sending 'cluster slots' command" in e.args[0] - def test_cluster_slots_error_expected_responseerror(): """ Check that exception is not raised if initialize can't execute @@ -303,11 +338,15 @@ def test_cluster_slots_error_expected_responseerror(): execute_command_mock.side_effect = ResponseError("MASTERDOWN") with pytest.raises(RedisClusterException): - n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) + n = NodeManager(startup_nodes=[ + {"host": "127.0.0.1", "port": 7000}, + ]) n.initialize() try: - n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) + n = NodeManager(startup_nodes=[ + {"host": "127.0.0.1", "port": 7000}, + ]) n.initialize() except RedisClusterException as e: assert "Redis Cluster cannot be connected" in e.args[0] @@ -379,7 +418,10 @@ def patch_execute_command(*args, **kwargs): def test_initialize_follow_cluster(): - n = NodeManager(nodemanager_follow_cluster=True, startup_nodes=[{'host': '127.0.0.1', 'port': 7000}]) + n = NodeManager( + nodemanager_follow_cluster=True, + startup_nodes=[{'host': '127.0.0.1', 'port': 7000}] + ) n.orig_startup_nodes = None n.initialize() diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index fc1b6a1e..979ab37c 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -163,9 +163,19 @@ def test_blocked_commands(r): These commands should be blocked and raise RedisClusterException """ blocked_commands = [ - "CLIENT SETNAME", "SENTINEL GET-MASTER-ADDR-BY-NAME", 'SENTINEL MASTER', 'SENTINEL MASTERS', - 'SENTINEL MONITOR', 'SENTINEL REMOVE', 'SENTINEL SENTINELS', 'SENTINEL SET', - 'SENTINEL SLAVES', 'SHUTDOWN', 'SLAVEOF', 'SCRIPT KILL', 'MOVE', 'BITOP', + 'BITOP', + 'MOVE', + 'SCRIPT KILL', + 'SENTINEL GET-MASTER-ADDR-BY-NAME', + 'SENTINEL MASTER', + 'SENTINEL MASTERS', + 'SENTINEL MONITOR', + 'SENTINEL REMOVE', + 'SENTINEL SENTINELS', + 'SENTINEL SET', + 'SENTINEL SLAVES', + 'SHUTDOWN', + 'SLAVEOF', ] for command in blocked_commands: From 6da568ed43fdecf55d458edf39a53b0cf48b70cf Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 6 Sep 2020 18:33:21 +0200 Subject: [PATCH 227/263] Add logging document to index and change heading --- docs/index.rst | 1 + docs/logging.rst | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 5ee4e40a..17587bb6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -142,6 +142,7 @@ The Usage Guide pipelines pubsub readonly-mode + logging .. _setup_and_performance: diff --git a/docs/logging.rst b/docs/logging.rst index 2c0b0da9..e2ce5951 100644 --- a/docs/logging.rst +++ b/docs/logging.rst @@ -1,5 +1,5 @@ Setup client logging -#################### +==================== To setup logging for debugging inside the client during development you can add this as an example to your own code to enable `DEBUG` logging when using the library. From d69d290fb87cd23feabb134892f20f3caec25c2a Mon Sep 17 00:00:00 2001 From: Appurv Jain Date: Fri, 12 Jun 2020 16:22:01 -0700 Subject: [PATCH 228/263] [Bug Fix] Ensure port mapping is not done in isolation of host mapping The existing remapping logic looks at host remapping and port remapping in isolation, which leads to failure in some cases where all from_ports are same like the following example. ``` startup_nodes = [ {"host": "127.0.0.1", "port": "17000"}, {"host": "127.0.0.1", "port": "17001"}, {"host": "127.0.0.1", "port": "17002"}, {"host": "127.0.0.1", "port": "17003"}, {"host": "127.0.0.1", "port": "17004"}, {"host": "127.0.0.1", "port": "17005"} ] host_port_remap=[ {'from_host': '41.1.3.1', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17000}, {'from_host': '41.1.3.5', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17001}, {'from_host': '41.1.4.2', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17002}, {'from_host': '50.0.1.7', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17003}, {'from_host': '50.0.7.3', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17004}, {'from_host': '32.0.1.1', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17005} ] ``` This is becasue when just ports get remapped, it ends up producing overlapping node names and ends up corrupting both the `nodes_cache` and the `slots` dictionaries in nodemanager Changes: - This change modifies the remapping logic to use both host and port info(if avaiable) to decide if remapping should happen - Adds more cases in the host_port_remap list validation logic so that no unknown keys are allowed and only valid ips are allowed - Added a more explicit ip comparison logic(comparing octets indiviidually) - Also fixed an unrelated failing testcase `test_blocked_commands` in `test_cluster_obj.py` - Updated the docs/clients.rst with details about the new mapping logic and added an example Test Plan: - Updated Unit tests to catch this is issue - Tested the changes a real AWS Elasticache Redis cluster fronted by stunnel and tested both get, mget and set command with differrent keys --- .gitignore | 1 + docs/authors.rst | 1 + docs/client.rst | 80 ++++++++++++++++++++++++++++-- docs/release-notes.rst | 2 +- rediscluster/nodemanager.py | 47 ++++++++++++++++-- tests/test_cluster_node_manager.py | 73 +++++++++++++++++++++++++-- 6 files changed, 190 insertions(+), 14 deletions(-) diff --git a/.gitignore b/.gitignore index 7c1a18bf..0996c579 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ build .cache docs/_build docs/_build_html +.idea diff --git a/docs/authors.rst b/docs/authors.rst index e2464a2a..eb041cdb 100644 --- a/docs/authors.rst +++ b/docs/authors.rst @@ -27,3 +27,4 @@ Authors who contributed code or testing: - astrohsy - https://github.com/astrohsy - Artur Stawiarski - https://github.com/astawiarski - Matthew Anderson - https://github.com/mc3ander + - Appurv Jain - https://github.com/appurvj diff --git a/docs/client.rst b/docs/client.rst index b9817de8..5533a2ff 100644 --- a/docs/client.rst +++ b/docs/client.rst @@ -12,13 +12,45 @@ host_port_remap This option exists to enable the client to fix a problem where the redis-server internally tracks a different ip:port compared to what your clients would like to connect to. -The simples example to describe this problem is if you start a redis cluster through docker on your local machine. If we assume that you start the docker image grokzen/redis-cluster, when the redis cluster is initialized it will track the docker network IP for each node in the cluster. +A simple example to describe this problem is if you start a redis cluster through docker on your local machine. If we assume that you start the docker image grokzen/redis-cluster, +when the redis cluster is initialized it will track the docker network IP for each node in the cluster. -For example this could be 172.18.0.2. The problem is that a client that runs outside on your local machine will recieve from the redis cluster that each node is reachable on the ip 172.18.0.2. But in some cases this IP is not available on your host system and to solve this we need a remapping table where we can tell this client that if you get back from your cluster 172.18.0.2 then your should remap it to localhost instead. When the client does this it can now connect and reach all nodes in your cluster. +For example this could be 172.18.0.2. The problem is that a client that runs outside on your local machine will receive from the redis cluster that each node is reachable on the ip 172.18.0.2. +But in some cases this IP is not available on your host system.To solve this we need a remapping table where we can tell this client that if you get back from your cluster 172.18.0.2 then your should remap it to localhost instead. +When the client does this it can now connect and reach all nodes in your cluster. -It is also possible to remap the port for each node as well. -Example script +Remapping works off a rules list. Each rule is a dictionary of the form shown below + +.. code-block:: + + { + 'from_host': , # String + 'from_port': , # Integer + 'to_host': , # String + 'to_port': # Integer + } + + +Remapping properties: + +- This host_port_remap feature will not work on the startup_nodes so you still need to put in a valid and reachable set of startup nodes. +- The remapping logic treats host_port_remap list as a "rules list" and only the first matching remapping entry will be applied +- A remapping rule may contain just host or just port mapping, but both sides of the maping( i.e. from_host and to_host or from_port and to_port) are required for either +- If both from_host and from_port are specified, then both will be used to decide if a remapping rule applies + +Examples of valid rules: + +.. code-block:: python + + {'from_host': "1.2.3.4", 'from_port': 1000, 'to_host': "2.2.2.2", 'to_port': 2000} + + {'from_host': "1.1.1.1", 'to_host': "127.0.0.1"} + + {'from_port': 1000, 'to_port': 2000} + + +Example scripts: .. code-block:: python @@ -52,5 +84,43 @@ Example script ## Test the client that it can still send and recieve data from the nodes after the remap has been done print(rc.set('foo', 'bar')) +This feature is also useful in cases such as when one is trying to access AWS ElastiCache cluster secured by Stunnel (https://www.stunnel.org/) + +.. code-block:: python + + from rediscluster import RedisCluster + + startup_nodes = [ + {"host": "127.0.0.1", "port": "17000"}, + {"host": "127.0.0.1", "port": "17001"}, + {"host": "127.0.0.1", "port": "17002"}, + {"host": "127.0.0.1", "port": "17003"}, + {"host": "127.0.0.1", "port": "17004"}, + {"host": "127.0.0.1", "port": "17005"} + ] + + host_port_remap=[ + {'from_host': '41.1.3.1', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17000}, + {'from_host': '41.1.3.5', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17001}, + {'from_host': '41.1.4.2', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17002}, + {'from_host': '50.0.1.7', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17003}, + {'from_host': '50.0.7.3', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17004}, + {'from_host': '32.0.1.1', 'from_port': 6379, 'to_host': '127.0.0.1', 'to_port': 17005} + ] + + + # Note: decode_responses must be set to True when used with python3 + rc = RedisCluster( + startup_nodes=startup_nodes, + host_port_remap=host_port_remap, + decode_responses=True, + ssl=True, + ssl_cert_reqs=None, + # Needed for Elasticache Clusters + skip_full_coverage_check=True) -Pleaes note that this host_port_remap feature will not work on the startup_nodes so you still need to put in a valid and reachable set of startup nodes. + + print(rc.connection_pool.nodes.nodes) + print(rc.ping()) + print(rc.set('foo', 'bar')) + print(rc.get('foo')) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 1104370b..7d6b6c84 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -26,7 +26,7 @@ Release Notes * Implement new connection pool ClusterBlockingConnectionPool (#347) * Nodemanager initiailize should now handle usernames properly (#365) * PubSub tests has been all been disabled - * New feature, host_port_remap. Send in a remapping configuration to RedisCluster instance where the nodes configuration recieved from the redis cluster can be altered to allow for connection in certain circumstances. See new section in clients.rst in docs/ for usage example. + * New feature, host_port_remap. Send in a remapping configuration to RedisCluster instance where the nodes configuration recieved from the redis cluster can be altered to allow for connection in certain circumstances. See new section in client.rst in docs/ for usage example. * When a slot is not covered by the cluster, it will not raise SlotNotCoveredError instead of the old generic RedisClusterException. The client will not attempt to rebuild the cluster layout a few times before giving up and raising that exception to the user. (#350) * CLIENT SETNAME is now possible to use from the client instance. For setting the name for all connections from the client by default, see issue #802 in redis-py repo for the change that was implemented in redis-py 3.4.0. diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index b435b617..515a5e15 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -4,6 +4,7 @@ import json import logging import random +import socket # rediscluster imports from .crc import crc16 @@ -11,7 +12,6 @@ # 3rd party imports from redis import Redis -from redis._compat import unicode, long, basestring from redis.connection import Encoder from redis import ConnectionError, TimeoutError, ResponseError @@ -71,12 +71,30 @@ def _validate_host_port_remap(self, host_port_remap): if not isinstance(item, dict): raise RedisClusterConfigError("items inside host_port_remap list must be of dict type") + if len(set(item.keys()) - {'from_host', 'from_port', 'to_host', 'to_port'}) != 0: + raise RedisClusterConfigError("Invalid keys provided in host_port_remap rule") + # If we have from_host, we must have a to_host option to allow for translation to work if ('from_host' in item and 'to_host' not in item) or ('from_host' not in item and 'to_host' in item): - raise RedisClusterConfigError("Both from_host and to_host must be present in remap item if either is defined") + raise RedisClusterConfigError("Both from_host and to_host must be present in host_port_remap rule if either is defined") + + if ('from_port' in item and 'to_port' not in item) or ('from_port' not in item and 'to_port' in item): + raise RedisClusterConfigError("Both from_port and to_port must be present in host_port_remap rule if either is defined") + + try: + socket.inet_aton(item.get('from_host', '0.0.0.0').strip()) + socket.inet_aton(item.get('to_host', '0.0.0.0').strip()) + except socket.error: + raise RedisClusterConfigError("Both from_host and to_host in host_port_remap rule must be a valid ip address") + if len(item.get('from_host', '0.0.0.0').split('.')) < 4 or len(item.get('to_host', '0.0.0.0').split('.')) < 4 : + raise RedisClusterConfigError( + "Both from_host and to_host in host_port_remap rule must must have all octets specified") - if ('from_port' in item and 'to_port' not in item) or ('from_port' not in item and 'to_port' in item): - raise RedisClusterConfigError("Both from_port and to_port must be present in remap item") + try: + int(item.get('from_port', 0)) + int(item.get('to_port', 0)) + except ValueError: + raise RedisClusterConfigError("Both from_port and to_port in host_port_remap rule must be integers") def keyslot(self, key): """ @@ -305,9 +323,30 @@ def remap_internal_node_object(self, node_obj): if 'from_port' in remap_rule and 'to_port' in remap_rule: if remap_rule['from_port'] == node_obj[1]: node_obj[1] = remap_rule['to_port'] + # At this point remapping has occurred, so no further rules should be processed + break return node_obj + def _remap_rule_applies(self, remap_rule, node_obj): + # Double check to make sure that the relevant host and/or port fields are present + if not (('from_host' in remap_rule and 'to_host' in remap_rule) or ('from_port' in remap_rule and 'to_port' in remap_rule)): + return False + if 'from_host' in remap_rule and not self._ips_equal(remap_rule['from_host'], node_obj[0]): + return False + if 'from_port' in remap_rule and remap_rule['from_port'] != node_obj[1]: + return False + # If the previous conditions are not met then this is a valid match. + return True + + def _ips_equal(self, ip1, ip2): + split_ip1 = ip1.strip().split(".") + split_ip2 = ip2.strip().split(".") + for i, octet in enumerate(split_ip1): + if int(octet) != int(split_ip2[i]): + return False + return True + def increment_reinitialize_counter(self, ct=1, count=1): for i in range(min(ct, self.reinitialize_steps)): self.reinitialize_counter += count diff --git a/tests/test_cluster_node_manager.py b/tests/test_cluster_node_manager.py index 700e1c43..f126f101 100644 --- a/tests/test_cluster_node_manager.py +++ b/tests/test_cluster_node_manager.py @@ -495,13 +495,32 @@ def test_host_port_remap(): startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[{'to_port': ''}], ) + # Invalid keys in the rules should also raise exception + with pytest.raises(RedisClusterConfigError) as excp: + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[{'invalid_key': ''}], + ) + + # Invalid ips in the rules should raise exception + with pytest.raises(RedisClusterConfigError) as excp: + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[{'from_host': '127.2.x.w', 'to_host': '127.0.0.1'}], + ) + # Incomplete ips in the rules should raise exception + with pytest.raises(RedisClusterConfigError) as excp: + n = NodeManager( + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], + host_port_remap=[{'from_host': '127.2', 'to_host': '127.0.0.1'}], + ) # Creating a valid config with multiple entries n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[ - {'from_host': '127.0.0.1', 'to_host': 'localhost', 'from_port': 7000, 'to_port': 70001}, - {'from_host': '172.1.0.1', 'to_host': 'localhost', 'from_port': 7000, 'to_port': 70001}, + {'from_host': '127.0.0.1', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 70001}, + {'from_host': '172.1.0.1', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 70001}, ], ) @@ -516,10 +535,56 @@ def test_host_port_remap(): # Test that modifying both host and port works n = NodeManager( - host_port_remap=[{'from_host': '127.0.0.1', 'to_host': 'localhost', 'from_port': 7000, 'to_port': 7001}], + host_port_remap=[{'from_host': '127.1.1.1', 'to_host': '128.0.0.1', 'from_port': 7000, 'to_port': 7001}, + {'from_host': '127.2.2.2', 'to_host': '128.0.0.1', 'from_port': 7000, 'to_port': 7005}], + startup_nodes=[{"host": "128.0.0.1", "port": 7000}] + ) + initial_node_obj = ['127.1.1.1', 7000, 'xyz'] + remapped_obj = n.remap_internal_node_object(initial_node_obj) + assert remapped_obj[0] == '128.0.0.1' + assert remapped_obj[1] == 7001 + + # Validate that ports are NOT remapped in isolation if hosts are also present + n = NodeManager( + host_port_remap=[{'from_host': '127.2.2.2', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7001}, + {'from_host': '127.3.3.3', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7005}], startup_nodes=[{"host": "127.0.0.1", "port": 7000}] ) initial_node_obj = ['127.0.0.1', 7000, 'xyz'] remapped_obj = n.remap_internal_node_object(initial_node_obj) - assert remapped_obj[0] == 'localhost' + assert remapped_obj[0] == '127.0.0.1' + assert remapped_obj[1] == 7000 + + # Validate that first applicable rule is applied + n = NodeManager( + host_port_remap=[{'from_host': '127.2.2.2', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7001}, + {'from_host': '127.3.3.3', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7005}, + {'from_host': '127.2.2.2', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7006}], + startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + ) + initial_node_obj = ['127.2.2.2', 7000, 'xyz'] + remapped_obj = n.remap_internal_node_object(initial_node_obj) + assert remapped_obj[0] == '127.0.0.1' assert remapped_obj[1] == 7001 + + # Validate just port mapping works + n = NodeManager( + host_port_remap=[{'from_port': 7000, 'to_port': 7001}, + {'from_port': 7002, 'to_port': 7005}], + startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + ) + initial_node_obj = ['127.0.0.1', 7000, 'xyz'] + remapped_obj = n.remap_internal_node_object(initial_node_obj) + assert remapped_obj[0] == '127.0.0.1' + assert remapped_obj[1] == 7001 + + # Validate just host mapping works + n = NodeManager( + host_port_remap=[{'from_host': '127.2.2.2', 'to_host': '127.0.0.1'}, + {'from_host': '127.3.3.3', 'to_host': '127.0.0.2'}], + startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + ) + initial_node_obj = ['127.3.3.3', 7000, 'xyz'] + remapped_obj = n.remap_internal_node_object(initial_node_obj) + assert remapped_obj[0] == '127.0.0.2' + assert remapped_obj[1] == 7000 From d3ef787e033e0bbf97d0b3d17fc46cb5ae36d660 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 6 Sep 2020 18:06:19 +0200 Subject: [PATCH 229/263] Fix syntax error on py27 environments --- rediscluster/nodemanager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 515a5e15..f4243310 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -196,7 +196,7 @@ def get_redis_link(self, host, port, decode_responses=False): host=host, port=port, decode_responses=decode_responses, - **connection_kwargs, + **connection_kwargs ) def initialize(self): From 1e7fc4bb04c1f16b790849e7f73f4b1bd09fc352 Mon Sep 17 00:00:00 2001 From: Dan Blanchard Date: Mon, 14 Sep 2020 10:40:37 -0400 Subject: [PATCH 230/263] "propergate" -> "propagate" typo fix --- docs/logging.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/logging.rst b/docs/logging.rst index e2ce5951..8b897ad7 100644 --- a/docs/logging.rst +++ b/docs/logging.rst @@ -12,6 +12,6 @@ To setup logging for debugging inside the client during development you can add logging.basicConfig() logger = logging.getLogger('rediscluster') logger.setLevel(logging.DEBUG) - logger.propergate = True + logger.propagate = True Note that this logging is not reccommended to be used inside production as it can cause a performance drain and a slowdown of your client. From eb2b738efeb97d0f54fa07149ea430b259c174d4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 16 Sep 2020 23:41:24 +0200 Subject: [PATCH 231/263] Fix broken merge that caused the old code master code before the PR to be present after the PR was merged #378 --- rediscluster/nodemanager.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index f4243310..0a53e309 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -315,13 +315,11 @@ def remap_internal_node_object(self, node_obj): return node_obj for remap_rule in self.host_port_remap: - if 'from_host' in remap_rule and 'to_host' in remap_rule: - if remap_rule['from_host'] in node_obj[0]: + if self._remap_rule_applies(remap_rule, node_obj): + # We have found a valid match and can proceed with the remapping + if 'to_host' in remap_rule: node_obj[0] = remap_rule['to_host'] - - ## The port value is always an integer - if 'from_port' in remap_rule and 'to_port' in remap_rule: - if remap_rule['from_port'] == node_obj[1]: + if 'to_port' in remap_rule: node_obj[1] = remap_rule['to_port'] # At this point remapping has occurred, so no further rules should be processed break From 71001ade21440fdf34912e9097b8ae11a9283651 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 17 Sep 2020 17:10:05 +0200 Subject: [PATCH 232/263] A major pass on implemented commands documentation to use a new format that will make browing commands much easier and logical over the old documentation. This is only the first pass on it and still requires more work to be complete. --- docs/client.rst | 8 +- docs/commands.rst | 550 +++++++++++++++++++++++++++++++++++-------- docs/conf.py | 15 +- docs/development.rst | 22 ++ docs/index.rst | 10 +- docs/testing.rst | 14 +- docs/tox.rst | 11 - 7 files changed, 503 insertions(+), 127 deletions(-) create mode 100644 docs/development.rst delete mode 100644 docs/tox.rst diff --git a/docs/client.rst b/docs/client.rst index 5533a2ff..83b1e7db 100644 --- a/docs/client.rst +++ b/docs/client.rst @@ -1,14 +1,14 @@ RedisCluster client configuration options ========================================= -This chapter is supposed to describe all the configuration options and flags that can be sent into the RedisCluster class instance. +This chapter will describe all the configuration options and flags that can be sent into the RedisCluster class instance. -Each option will be described in a seperate topic to describe how it works and what it does. This will only describe any options that does anything else when compared to redis-py, or new options that is cluster specific. +Each option will be described in a seperate topic to describe how it works and what it does. This will only describe any options that does anything else when compared to the options that redis-py already provides, or new options that is cluster specific. To find out what options redis-py provides please consult the documentation and/or git repo for that project. -host_port_remap ---------------- +Host port remapping +------------------- This option exists to enable the client to fix a problem where the redis-server internally tracks a different ip:port compared to what your clients would like to connect to. diff --git a/docs/commands.rst b/docs/commands.rst index b618a423..8e703b12 100644 --- a/docs/commands.rst +++ b/docs/commands.rst @@ -1,132 +1,478 @@ -Implemented commands -==================== +Implemented redis commands in RedisCluster +========================================== -This will describe all changes made in RedisCluster enable a command for a clustered environment. +This document will enumerate and describe all implemented redis commands and if there is any cluster specific customization/changes done to the command to make them work for a cluster workload. -If a command is not listed here then the default implementation from `Redis` in the `redis-py` library is used. +If a command is specified here but there is no comments on it, then you can assume it will work and behave the same way as when using it from `redis-py`. +If a new command has been added to redis-server and it is not documented here then please open a issue on github telling that it is missing and needs to be added to this documentation. +.. danger:: -Fanout Commands ---------------- + If a command below begins with `[NYV] / Not Yet Verified` it means that the command is documented here but it is not yet verified that it works or is properly implemented or decided what implementation to use in a clustered environment. -The following commands will send the same request to all nodes in the cluster. Results is returned as a dict with k,v pair (NodeID, Result). - - bgrewriteaof - - bgsave - - client_getname - - client_kill - - client_list - - client_setname - - config_get - - config_resetstat - - config_rewrite - - config_set - - dbsize - - echo - - info - - lastsave - - ping - - save - - slowlog_get - - slowlog_len - - slowlog_reset - - time +Cluster +------- -The pubsub commands are sent to all nodes, and the resulting replies are merged together. They have an optional keyword argument `aggregate` which when set to `False` will return a dict with k,v pair (NodeID, Result) instead of the merged result. +https://redis.io/commands#cluster - - pubsub_channels - - pubsub_numsub - - pubsub_numpat +- **[NYV]** - CLUSTER ADDSLOTS slot [slot ...] +- **[NYV]** - CLUSTER BUMPEPOCH +- **[NYV]** - CLUSTER COUNT_FAILURE-REPORTS node-id +- CLUSTER COUNTKEYSINSLOT slot -This command will send the same request to all nodes in the cluster in sequence. Results is appended to a unified list. + .. note:: - - keys + Client will route command to node that owns the slot -The following commands will only be send to the master nodes in the cluster. Results is returned as a dict with k,v pair (NodeID, Command-Result). +- **[NYV]** - CLUSTER DELSLOTS slot [slot ...] +- **[NYV]** - CLUSTER FAILOVER [FORCE|TAKEOVER] +- **[NYV]** - CLUSTER FLUSHSLOTS +- **[NYV]** - CLUSTER FORGET node-id +- CLUSTER GETKEYSINSLOT slot count - - flushall - - flushdb - - scan + .. note:: -This command will sent to a random node in the cluster. + Client will route command to node that owns the slot - - publish +- CLUSTER INFO -The following commands will be sent to the server that matches the first key. + .. note:: + + Command is sent to all nodees in the cluster and result is merged into a single dict with node as key. - - eval - - evalsha +- **[NYV]** - CLUSTER KEYSLOT key +- **[NYV]** - CLUSTER MEET ip port +- **[NYV]** - CLUSTER MYID +- CLUSTER NODES -This following commands will be sent to the master nodes in the cluster. + .. note:: -- script load - the result is the hash of loaded script -- script flush - the result is `True` if the command succeeds on all master nodes, else `False` -- script exists - the result is an array of booleans. An entry is `True` only if the script exists on all the master nodes. + Command will be sent to random node in the cluster as the data should be the same on all nodes in a stable/working cluster -The following commands will be sent to the sever that matches the specified key. +- **[NYV]** - CLUSTER REPLICATE node-id +- **[NYV]** - CLUSTER RESET [HARD|SOFT] +- **[NYV]** - CLUSTER SAVECONFIG +- **[NYV]** - CLUSTER SET-CONFIG-EPOCH config-epoch +- **[NYV]** - CLUSTER SETSLOT slot IMPORTING|MIGRATING|STABLE|NODE [node-id] +- **[NYV]** - CLUSTER SLAVES node-id +- **[NYV]** - CLUSTER REPLICAS node-id +- CLUSTER SLOTS - - hscan - - hscan_iter - - scan_iter - - sscan - - sscan_iter - - zscan - - zscan_iter + .. note:: + Command will be sent to random node in the cluster as the data should be the same on all nodes in a stable/working cluster +- **[NYV]** - READONLY +- **[NYV]** - READWRITE -Blocked commands ----------------- -The following commands is blocked from use. +Connection +---------- -Either because they do not work, there is no working implementation or it is not good to use them within a cluster. +https://redis.io/commands#connection + +- **[NYV]** - AUTH [username] password +- **[NYV]** - CLIENT CACHING YES|NO +- **[NYV]** - CLIENT ID +- **[NYV]** - CLIENT KILL [ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [USER username] [ADDR ip:port] [SKIPME yes/no] +- **[NYV]** - CLIENT LIST [TYPE normal|master|replica|pubsub] +- **[NYV]** - CLIENT GETNAME +- **[NYV]** - CLIENT GETREDIR +- **[NYV]** - CLIENT PAUSE timeout +- **[NYV]** - CLIENT REPLY ON|OFF|SKIP +- **[NYV]** - CLIENT SETNAME connection-name +- **[NYV]** - CLIENT TRACKING ON|OFF [REDIRECT client-id] [PREFIX prefix [PREFIX prefix ...]] [BCAST] [OPTIN] [OPTOUT] [NOLOOP] +- **[NYV]** - CLIENT UNBLOCK client-id [TIMEOUT|ERROR] +- **[NYV]** - ECHO message +- **[NYV]** - HELLO protover [AUTH username password] [SETNAME clientname] +- **[NYV]** - PING [message] +- **[NYV]** - QUIT +- **[NYV]** - SELECT index - - bitop - Currently to hard to implement a solution in python space - - client_setname - Not yet implemented - - move - It is not possible to move a key from one db to another in cluster mode - - restore - - script_kill - Not yet implemented - - sentinel - - sentinel_get_master_addr_by_name - - sentinel_master - - sentinel_masters - - sentinel_monitor - - sentinel_remove - - sentinel_sentinels - - sentinel_set - - sentinel_slaves - - shutdown - - slaveof - Cluster management should be done via redis-trib.rb manually - - unwatch - Not yet implemented - - watch - Not yet implemented +Geo +--- +https://redis.io/commands#geo -Overridden methods ------------------- - -The following methods is overridden from Redis with a custom implementation. - -They can operate on keys that exists in different hashslots and require a client side implementation to work. - - - brpoplpus - - mget - - mset - - msetnx - - pfmerge - - randomkey - - rename - - renamenx - - rpoplpush - - sdiff - - sdiffstore - - sinter - - sinterstore - - smove - - sort - - sunion - - sunionstore - - zinterstore - - zunionstore +- **[NYV]** - GEOADD key longitude latitude member [longitude latitude member ...] +- **[NYV]** - GEOHASH key member [member ...] +- **[NYV]** - GEOPOS key member [member ...] +- **[NYV]** - GEODIST key member1 member2 [m|km|ft|mi] +- **[NYV]** - GEORADIUS key longitude latitude radius m|km|ft|mi [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count] [ASC|DESC] [STORE key] [STOREDIST key] +- **[NYV]** - GEORADIUSBYMEMBER key member radius m|km|ft|mi [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count] [ASC|DESC] [STORE key] [STOREDIST key] + + +Hashes +------ + +https://redis.io/commands#hash + +- **[NYV]** - HDEL key field [field ...] +- **[NYV]** - HEXISTS key field +- **[NYV]** - HGET key field +- **[NYV]** - HGETALL key +- **[NYV]** - HINCRBY key field increment +- **[NYV]** - HINCRBYFLOAT key field increment +- **[NYV]** - HKEYS key +- **[NYV]** - HLEN key +- **[NYV]** - HMGET key field [field ...] +- **[NYV]** - HMSET key field value [field value ...] +- **[NYV]** - HSET key field value [field value ...] +- **[NYV]** - HSETNX key field value +- **[NYV]** - HSTRLEN key field +- **[NYV]** - HVALS key +- **[NYV]** - HSCAN key cursor [MATCH pattern] [COUNT count] + + +Hyperloglog +----------- + +https://redis.io/commands#hyperloglog + +- **[NYV]** - PFADD key element [element ...] +- **[NYV]** - PFCOUNT key [key ...] +- **[NYV]** - PFMERGE destkey sourcekey [sourcekey ...] + + +Keys/Generic +------------ + +https://redis.io/commands#generic + +- **[NYV]** - DEL key [key ...] +- **[NYV]** - DUMP key +- **[NYV]** - EXISTS key [key ...] +- **[NYV]** - EXPIRE key seconds +- **[NYV]** - EXPIREAT key timestamp +- **[NYV]** - KEYS pattern +- **[NYV]** - MIGRATE host port key|"" destination-db timeout [COPY] [REPLACE] [AUTH password] [AUTH2 username password] [KEYS key [key ...]] +- **[NYV]** - MOVE key db +- **[NYV]** - OBJECT subcommand [arguments [arguments ...]] +- **[NYV]** - PERSIST key +- **[NYV]** - PEXPIRE key milliseconds +- **[NYV]** - PEXPIREAT key milliseconds-timestamp +- **[NYV]** - PTTL key +- **[NYV]** - RANDOMKEY +- **[NYV]** - RENAME key newkey +- **[NYV]** - RENAMENX key newkey +- **[NYV]** - RESTORE key ttl serialized-value [REPLACE] [ABSTTL] [IDLETIME seconds] [FREQ frequency] +- **[NYV]** - SORT key [BY pattern] [LIMIT offset count] [GET pattern [GET pattern ...]] [ASC|DESC] [ALPHA] [STORE destination] +- **[NYV]** - TOUCH key [key ...] +- **[NYV]** - TTL key +- **[NYV]** - TYPE key +- **[NYV]** - UNLINK key [key ...] +- **[NYV]** - WAIT numreplicas timeout +- **[NYV]** - SCAN cursor [MATCH pattern] [COUNT count] [TYPE type] + + +Lists +----- + +https://redis.io/commands#list + +- **[NYV]** - BLPOP key [key ...] timeout +- **[NYV]** - BRPOP key [key ...] timeout +- **[NYV]** - BRPOPLPUSH source destination timeout +- **[NYV]** - LINDEX key index +- **[NYV]** - LINSERT key BEFORE|AFTER pivot element +- **[NYV]** - LLEN key +- **[NYV]** - LPOP key +- **[NYV]** - LPOS key element [RANK rank] [COUNT num-matches] [MAXLEN len] +- **[NYV]** - LPUSH key element [element ...] +- **[NYV]** - LPUSHX key element [element ...] +- **[NYV]** - LRANGE key start stop +- **[NYV]** - LREM key count element +- **[NYV]** - LSET key index element +- **[NYV]** - LTRIM key start stop +- **[NYV]** - RPOP key +- **[NYV]** - RPOPLPUSH source destination +- **[NYV]** - RPUSH key element [element ...] +- **[NYV]** - RPUSHX key element [element ...] + + + +PubSub +------ + +https://redis.io/commands#pubsub + +- **[NYV]** - PSUBSCRIBE pattern [pattern ...] +- **[NYV]** - PUBSUB subcommand [argument [argument ...]] +- **[NYV]** - PUBLISH channel message +- **[NYV]** - PUNSUBSCRIBE [pattern [pattern ...]] +- **[NYV]** - SUBSCRIBE channel [channel ...] +- **[NYV]** - UNSUBSCRIBE [channel [channel ...]] + + +Scripting +--------- + +https://redis.io/commands#scripting + +-- **[NYV]** - EVAL script numkeys key [key ...] arg [arg ...] +-- **[NYV]** - SCRIPT DEBUG YES|SYNC|NO +-- **[NYV]** - SCRIPT EXISTS sha1 [sha1 ...] +-- **[NYV]** - SCRIPT FLUSH +-- **[NYV]** - SCRIPT KILL +-- **[NYV]** - SCRIPT LOAD script + + +Server +------ + +https://redis.io/commands#server + +- ACL LOAD + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL SAVE + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL LIST + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL USERS + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL GETUSER username + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL SETUSER username [rule [rule ...]] + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL DELUSER username [username ...] + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL CAT [categoryname] + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL GENPASS [bits] + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL WHOAMI + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL LOG [count or RESET] + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- ACL HELP + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- **[NYV]** - BGREWRITEAOF +- **[NYV]** - BGSAVE [SCHEDULE] +- **[NYV]** - COMMAND +- **[NYV]** - COMMAND COUNT +- **[NYV]** - COMMAND GETKEYS +- **[NYV]** - COMMAND INFO command-name [command-name ...] +- **[NYV]** - CONFIG GET parameter +- **[NYV]** - CONFIG REWRITE +- **[NYV]** - CONFIG SET parameter value +- **[NYV]** - CONFIG RESETSTAT +- **[NYV]** - DBSIZE +- **[NYV]** - DEBUG OBJECT key +- **[NYV]** - DEBUG SEGFAULT +- **[NYV]** - FLUSHALL [ASYNC] +- **[NYV]** - FLUSHDB [ASYNC] +- **[NYV]** - INFO [section] +- **[NYV]** - LOLWUT [VERSION version] +- **[NYV]** - LASTSAVE +- **[NYV]** - MEMORY DOCTOR +- **[NYV]** - MEMORY HELP +- **[NYV]** - MEMORY MALLOC-STATS +- **[NYV]** - MEMORY PURGE +- **[NYV]** - MEMORY STATS +- **[NYV]** - MEMORY USAGE key [SAMPLES count] +- **[NYV]** - MODULE LIST +- **[NYV]** - MODULE LOAD path [ arg [arg ...]] +- **[NYV]** - MODULE UNLOAD name +- **[NYV]** - MONITOR +- **[NYV]** - ROLE +- **[NYV]** - SAVE +- **[NYV]** - SHUTDOWN [NOSAVE|SAVE] +- **[NYV]** - SLAVEOF host port +- **[NYV]** - REPLICAOF host port +- **[NYV]** - SLOWLOG subcommand [argument] +- **[NYV]** - SWAPDB index1 index2 +- **[NYV]** - SYNC +- **[NYV]** - PSYNC replicationid offset +- **[NYV]** - TIME +- **[NYV]** - LATENCY DOCTOR +- **[NYV]** - LATENCY GRAPH event +- **[NYV]** - LATENCY HISTORY event +- **[NYV]** - LATENCY LATEST +- **[NYV]** - LATENCY RESET [event [event ...]] +- **[NYV]** - LATENCY HELP + + +Sets +---- + +https://redis.io/commands#set + +- **[NYV]** - SADD key member [member ...] +- **[NYV]** - SCARD key +- **[NYV]** - SDIFF key [key ...] +- **[NYV]** - SDIFFSTORE destination key [key ...] +- **[NYV]** - SINTER key [key ...] +- **[NYV]** - SINTERSTORE destination key [key ...] +- **[NYV]** - SISMEMBER key member +- **[NYV]** - SMEMBERS key +- **[NYV]** - SMOVE source destination member +- **[NYV]** - SPOP key [count] +- **[NYV]** - SRANDMEMBER key [count] +- **[NYV]** - SREM key member [member ...] +- **[NYV]** - SUNION key [key ...] +- **[NYV]** - SUNIONSTORE destination key [key ...] +- **[NYV]** - SSCAN key cursor [MATCH pattern] [COUNT count] + + +Sorted Sets +----------- + +https://redis.io/commands#sorted_set + +- **[NYV]** - BZPOPMIN key [key ...] timeout +- **[NYV]** - BZPOPMAX key [key ...] timeout +- **[NYV]** - ZADD key [NX|XX] [CH] [INCR] score member [score member ...] +- **[NYV]** - ZCARD key +- **[NYV]** - ZCOUNT key min max +- **[NYV]** - ZINCRBY key increment member +- **[NYV]** - ZINTERSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX] +- **[NYV]** - ZLEXCOUNT key min max +- **[NYV]** - ZPOPMAX key [count] +- **[NYV]** - ZPOPMIN key [count] +- **[NYV]** - ZRANGE key start stop [WITHSCORES] +- **[NYV]** - ZRANGEBYLEX key min max [LIMIT offset count] +- **[NYV]** - ZREVRANGEBYLEX key max min [LIMIT offset count] +- **[NYV]** - ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count] +- **[NYV]** - ZRANK key member +- **[NYV]** - ZREM key member [member ...] +- **[NYV]** - ZREMRANGEBYLEX key min max +- **[NYV]** - ZREMRANGEBYRANK key start stop +- **[NYV]** - ZREMRANGEBYSCORE key min max +- **[NYV]** - ZREVRANGE key start stop [WITHSCORES] +- **[NYV]** - ZREVRANGEBYSCORE key max min [WITHSCORES] [LIMIT offset count] +- **[NYV]** - ZREVRANK key member +- **[NYV]** - ZSCORE key member +- **[NYV]** - ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS weight [weight ...]] [AGGREGATE SUM|MIN|MAX] +- **[NYV]** - ZSCAN key cursor [MATCH pattern] [COUNT count] + + +Streams +------- + +https://redis.io/commands#stream + +- **[NYV]** - XINFO [CONSUMERS key groupname] [GROUPS key] [STREAM key] [HELP] +- **[NYV]** - XADD key ID field value [field value ...] +- **[NYV]** - XTRIM key MAXLEN [~] count +- **[NYV]** - XDEL key ID [ID ...] +- **[NYV]** - XRANGE key start end [COUNT count] +- **[NYV]** - XREVRANGE key end start [COUNT count] +- **[NYV]** - XLEN key +- **[NYV]** - XREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] id [id ...] +- **[NYV]** - XGROUP [CREATE key groupname id-or-$] [SETID key groupname id-or-$] [DESTROY key groupname] [DELCONSUMER key groupname consumername] +- **[NYV]** - XREADGROUP GROUP group consumer [COUNT count] [BLOCK milliseconds] [NOACK] STREAMS key [key ...] ID [ID ...] +- **[NYV]** - XACK key group ID [ID ...] +- **[NYV]** - XCLAIM key group consumer min-idle-time ID [ID ...] [IDLE ms] [TIME ms-unix-time] [RETRYCOUNT count] [FORCE] [JUSTID] +- **[NYV]** - XPENDING key group [start end count] [consumer] + + +Strings +------- + +https://redis.io/commands#string + +- **[NYV]** - APPEND key value +- **[NYV]** - BITCOUNT key [start end] +- **[NYV]** - BITFIELD key [GET type offset] [SET type offset value] [INCRBY type offset increment] [OVERFLOW WRAP|SAT|FAIL] +- **[NYV]** - BITOP operation destkey key [key ...] +- **[NYV]** - BITPOS key bit [start] [end] +- **[NYV]** - DECR key +- **[NYV]** - DECRBY key decrement +- **[NYV]** - GET key +- **[NYV]** - GETBIT key offset +- **[NYV]** - GETRANGE key start end +- **[NYV]** - GETSET key value +- **[NYV]** - INCR key +- **[NYV]** - INCRBY key increment +- **[NYV]** - INCRBYFLOAT key increment +- **[NYV]** - MGET key [key ...] +- **[NYV]** - MSET key value [key value ...] +- **[NYV]** - MSETNX key value [key value ...] +- **[NYV]** - PSETEX key milliseconds value +- **[NYV]** - SET key value [EX seconds|PX milliseconds|KEEPTTL] [NX|XX] +- **[NYV]** - SETBIT key offset value +- **[NYV]** - SETEX key seconds value +- **[NYV]** - SETNX key value +- **[NYV]** - SETRANGE key offset value +- **[NYV]** - STRALGO LCS algo-specific-argument [algo-specific-argument ...] +- **[NYV]** - STRLEN key + + +Transactions +------------ + +https://redis.io/commands#transactions + +- **[NYV]** - DISCARD +- **[NYV]** - EXEC +- **[NYV]** - MULTI +- **[NYV]** - UNWATCH +- **[NYV]** - WATCH key [key ...] + + +Sentinel +-------- + +https://redis.io/topics/sentinel + +Sentinel commands is no longer needed or really supported by redis now when cluster solution is in place. All `SENTINEL` commands have been blocked by this client to be executed on any node in the cluster. + +- SENTINEL GET-MASTER-ADDR-BY-NAME +- SENTINEL MASTER +- SENTINEL MASTERS +- SENTINEL MONITOR +- SENTINEL REMOVE +- SENTINEL SENTINELS +- SENTINEL SET +- SENTINEL SLAVES diff --git a/docs/conf.py b/docs/conf.py index 392b8078..d64b91fe 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -15,6 +15,9 @@ import sys import os +# Custom RTD sphinx theme +import sphinx_rtd_theme + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -28,7 +31,9 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [] +extensions = [ + "sphinx_rtd_theme", +] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -46,7 +51,7 @@ # General information about the project. project = u'redis-py-cluster' -copyright = u'2016, Johan Andersson' +copyright = u'2013-2020, Johan Andersson' author = u'Johan Andersson' # The version info for the project you're documenting, acts as replacement for @@ -54,9 +59,9 @@ # built documents. # # The short X.Y version. -version = u'1.2.0' +version = u'2.1.0' # The full version, including alpha/beta/rc tags. -release = u'1.2.0' +release = u'2.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -108,7 +113,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the diff --git a/docs/development.rst b/docs/development.rst new file mode 100644 index 00000000..6f85f1d8 --- /dev/null +++ b/docs/development.rst @@ -0,0 +1,22 @@ +Development +=========== + + +Documentation +------------- + +To build and test/view documentation you need to install sphinx and addons to be able to run the local dev server to render the documentation. + +Install sphinx plus addons + +.. code-block:: + + pip install sphinx sphinx-autobuild sphinx-rtd-theme + +To start the local development server run from the root folder of this git repo + +.. code-block:: + + sphinx-autobuild docs docs/_build/html + +Open up `localhost:8000` in your web-browser to view the online documentaion diff --git a/docs/index.rst b/docs/index.rst index 17587bb6..f4ae3300 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -29,7 +29,7 @@ or from source code Basic usage example -------------- +------------------- Small sample script that shows how to get started with RedisCluster. It can also be found in the file `exmaples/basic.py`. @@ -135,6 +135,7 @@ The Usage Guide .. toctree:: :maxdepth: 2 :glob: + :caption: Usage guide client commands @@ -150,6 +151,7 @@ The Usage Guide .. toctree:: :maxdepth: 2 :glob: + :caption: Setup and performance cluster-setup benchmarks @@ -157,16 +159,18 @@ The Usage Guide The Community Guide --------------------- +------------------- -.. _community-guide: +.. _community_guide: .. toctree:: :maxdepth: 2 :glob: + :caption: Community Guide project-status testing + development upgrading release-notes authors diff --git a/docs/testing.rst b/docs/testing.rst index a71a780a..879d9e31 100644 --- a/docs/testing.rst +++ b/docs/testing.rst @@ -7,7 +7,17 @@ The easiest way to setup a cluster is to use either a Docker or Vagrant. They ar -Tox ---- +Tox - Multi environment testing +------------------------------- To run all tests in all supported environments with `tox` read this [Tox multienv testing](docs/Tox.md) + +Tox is the easiest way to run all tests because it will manage all dependencies and run the correct test command for you. + +TravisCI will use tox to run tests on all supported python & hiredis versions. + +Install tox with `pip install tox` + +To run all environments you need all supported python versions installed on your machine. (See supported python versions list) and you also need the python-dev package for all python versions to build hiredis. + +To run a specific python version use either `tox -e py27` or `tox -e py34` diff --git a/docs/tox.rst b/docs/tox.rst deleted file mode 100644 index 629e744b..00000000 --- a/docs/tox.rst +++ /dev/null @@ -1,11 +0,0 @@ -# Tox - Multi environment testing - -Tox is the easiest way to run all tests because it will manage all dependencies and run the correct test command for you. - -TravisCI will use tox to run tests on all supported python & hiredis versions. - -Install tox with `pip install tox` - -To run all environments you need all supported python versions installed on your machine. (See supported python versions list) and you also need the python-dev package for all python versions to build hiredis. - -To run a specific python version use either `tox -e py27` or `tox -e py34` From 8146155e62b1ff08cbdf9c437860e35720133ba1 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 20 Sep 2020 11:50:19 +0200 Subject: [PATCH 233/263] Set tag to 2.0.99rc2 for RC2 pypi upload --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f58c7f17..bb9a9921 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="2.1.0", + version="2.0.99rc2", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, long_description_content_type="text/markdown", From 667910ed1d0bce084338a73758206d72ce6f36bc Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 20 Sep 2020 11:50:58 +0200 Subject: [PATCH 234/263] Update documentation for implemented and verified commands with a bunch of new commands --- docs/commands.rst | 387 ++++++++++++++++++++++++++++++++--------- rediscluster/client.py | 6 +- 2 files changed, 312 insertions(+), 81 deletions(-) diff --git a/docs/commands.rst b/docs/commands.rst index 8e703b12..aae86549 100644 --- a/docs/commands.rst +++ b/docs/commands.rst @@ -17,17 +17,37 @@ Cluster https://redis.io/commands#cluster -- **[NYV]** - CLUSTER ADDSLOTS slot [slot ...] +- CLUSTER ADDSLOTS slot [slot ...] + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + - **[NYV]** - CLUSTER BUMPEPOCH -- **[NYV]** - CLUSTER COUNT_FAILURE-REPORTS node-id +- CLUSTER COUNT_FAILURE-REPORTS node-id + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + - CLUSTER COUNTKEYSINSLOT slot .. note:: Client will route command to node that owns the slot -- **[NYV]** - CLUSTER DELSLOTS slot [slot ...] -- **[NYV]** - CLUSTER FAILOVER [FORCE|TAKEOVER] +- CLUSTER DELSLOTS slot [slot ...] + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + +- CLUSTER FAILOVER [FORCE|TAKEOVER] + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + - **[NYV]** - CLUSTER FLUSHSLOTS - **[NYV]** - CLUSTER FORGET node-id - CLUSTER GETKEYSINSLOT slot count @@ -40,10 +60,22 @@ https://redis.io/commands#cluster .. note:: - Command is sent to all nodees in the cluster and result is merged into a single dict with node as key. + Command is sent to all nodes in the cluster. + + Result is merged into a single dict with node as key. + +- CLUSTER KEYSLOT key + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + +- CLUSTER MEET ip port + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. -- **[NYV]** - CLUSTER KEYSLOT key -- **[NYV]** - CLUSTER MEET ip port - **[NYV]** - CLUSTER MYID - CLUSTER NODES @@ -51,12 +83,42 @@ https://redis.io/commands#cluster Command will be sent to random node in the cluster as the data should be the same on all nodes in a stable/working cluster -- **[NYV]** - CLUSTER REPLICATE node-id -- **[NYV]** - CLUSTER RESET [HARD|SOFT] -- **[NYV]** - CLUSTER SAVECONFIG -- **[NYV]** - CLUSTER SET-CONFIG-EPOCH config-epoch -- **[NYV]** - CLUSTER SETSLOT slot IMPORTING|MIGRATING|STABLE|NODE [node-id] -- **[NYV]** - CLUSTER SLAVES node-id +- CLUSTER REPLICATE node-id + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + +- CLUSTER RESET [HARD|SOFT] + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + +- CLUSTER SAVECONFIG + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + +- CLUSTER SET-CONFIG-EPOCH config-epoch + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + +- CLUSTER SETSLOT slot IMPORTING|MIGRATING|STABLE|NODE [node-id] + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + +- CLUSTER SLAVES node-id + + .. note:: + + Client has custom implementation where the user has to route the command to the correct node manually. + - **[NYV]** - CLUSTER REPLICAS node-id - CLUSTER SLOTS @@ -75,19 +137,61 @@ https://redis.io/commands#connection - **[NYV]** - AUTH [username] password - **[NYV]** - CLIENT CACHING YES|NO -- **[NYV]** - CLIENT ID -- **[NYV]** - CLIENT KILL [ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [USER username] [ADDR ip:port] [SKIPME yes/no] -- **[NYV]** - CLIENT LIST [TYPE normal|master|replica|pubsub] -- **[NYV]** - CLIENT GETNAME +- CLIENT ID + + .. warning:: + + Command is sent to all nodes in the cluster. + + Result from each node will be aggregated into a dict where the key will be the internal node name. + +- CLIENT KILL [ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [USER username] [ADDR ip:port] [SKIPME yes/no] + + .. warning:: + + Command is sent to all nodes in the cluster. + + Result from each node will be aggregated into a dict where the key will be the internal node name. + +- CLIENT LIST [TYPE normal|master|replica|pubsub] + + .. warning:: + + Command is sent to all nodes in the cluster. + + Result from each node will be aggregated into a dict where the key will be the internal node name. + +- CLIENT GETNAME + + .. warning:: + + Command is sent to all nodes in the cluster. + + Result from each node will be aggregated into a dict where the key will be the internal node name. + - **[NYV]** - CLIENT GETREDIR - **[NYV]** - CLIENT PAUSE timeout - **[NYV]** - CLIENT REPLY ON|OFF|SKIP - **[NYV]** - CLIENT SETNAME connection-name - **[NYV]** - CLIENT TRACKING ON|OFF [REDIRECT client-id] [PREFIX prefix [PREFIX prefix ...]] [BCAST] [OPTIN] [OPTOUT] [NOLOOP] - **[NYV]** - CLIENT UNBLOCK client-id [TIMEOUT|ERROR] -- **[NYV]** - ECHO message +- ECHO message + + .. warning:: + + Command is sent to all nodes in the cluster. + + Result from each node will be aggregated into a dict where the key will be the internal node name. + - **[NYV]** - HELLO protover [AUTH username password] [SETNAME clientname] -- **[NYV]** - PING [message] +- PING [message] + + .. warning:: + + Command is sent to all nodes in the cluster. + + Result from each node will be aggregated into a dict where the key will be the internal node name. + - **[NYV]** - QUIT - **[NYV]** - SELECT index @@ -110,21 +214,27 @@ Hashes https://redis.io/commands#hash -- **[NYV]** - HDEL key field [field ...] -- **[NYV]** - HEXISTS key field -- **[NYV]** - HGET key field -- **[NYV]** - HGETALL key -- **[NYV]** - HINCRBY key field increment -- **[NYV]** - HINCRBYFLOAT key field increment -- **[NYV]** - HKEYS key -- **[NYV]** - HLEN key -- **[NYV]** - HMGET key field [field ...] -- **[NYV]** - HMSET key field value [field value ...] -- **[NYV]** - HSET key field value [field value ...] -- **[NYV]** - HSETNX key field value -- **[NYV]** - HSTRLEN key field -- **[NYV]** - HVALS key -- **[NYV]** - HSCAN key cursor [MATCH pattern] [COUNT count] +- HDEL key field [field ...] +- HEXISTS key field +- HGET key field +- HGETALL key +- HINCRBY key field increment +- HINCRBYFLOAT key field increment +- HKEYS key +- HLEN key +- HMGET key field [field ...] +- HMSET key field value [field value ...] +- HSET key field value [field value ...] +- HSETNX key field value +- HSTRLEN key field +- HVALS key +- HSCAN key cursor [MATCH pattern] [COUNT count] + + .. note:: + + HSCAN command has currently a buggy client side implementation. + + It is not recommended to use any *SCAN methods. Hyperloglog @@ -142,31 +252,84 @@ Keys/Generic https://redis.io/commands#generic -- **[NYV]** - DEL key [key ...] -- **[NYV]** - DUMP key +- DEL key [key ...] + + .. note:: + + Method has a custom client side implementation. + + Command is no longer atomic. + + DEL command is sent for each individual key to redis-server. + +- DUMP key - **[NYV]** - EXISTS key [key ...] -- **[NYV]** - EXPIRE key seconds -- **[NYV]** - EXPIREAT key timestamp +- EXPIRE key seconds +- EXPIREAT key timestamp - **[NYV]** - KEYS pattern - **[NYV]** - MIGRATE host port key|"" destination-db timeout [COPY] [REPLACE] [AUTH password] [AUTH2 username password] [KEYS key [key ...]] -- **[NYV]** - MOVE key db -- **[NYV]** - OBJECT subcommand [arguments [arguments ...]] -- **[NYV]** - PERSIST key -- **[NYV]** - PEXPIRE key milliseconds -- **[NYV]** - PEXPIREAT key milliseconds-timestamp -- **[NYV]** - PTTL key -- **[NYV]** - RANDOMKEY -- **[NYV]** - RENAME key newkey -- **[NYV]** - RENAMENX key newkey +- MOVE key db + + .. note:: + + Concept of databases do not exists in a cluter + +- OBJECT subcommand [arguments [arguments ...]] + + .. note:: + + Command is blocked from executing in the client. + +- PERSIST key +- PEXPIRE key milliseconds +- PEXPIREAT key milliseconds-timestamp +- PTTL key +- RANDOMKEY +- RENAME key newkey + + .. note:: + + Method has a custom client side implementation. + + Command is no longer atomic. + + If the slots is the same RENAME will be sent to that shard. + If the source and destination keys have different slots then a dump (old key/slot) -> restore (new key/slot) -> delete (old key) will be performed. + +- RENAMENX key newkey + + .. note:: + + Method has a custom client side implementation. + + Command is no longer atomic. + + Method will check if key exists and if it does it uses the custom RENAME implementation mentioned above. + - **[NYV]** - RESTORE key ttl serialized-value [REPLACE] [ABSTTL] [IDLETIME seconds] [FREQ frequency] -- **[NYV]** - SORT key [BY pattern] [LIMIT offset count] [GET pattern [GET pattern ...]] [ASC|DESC] [ALPHA] [STORE destination] +- SORT key [BY pattern] [LIMIT offset count] [GET pattern [GET pattern ...]] [ASC|DESC] [ALPHA] [STORE destination] + + .. note:: + + SORT command will only work on the most basic sorting of lists. + + Any additional arguments or more complex sorts can't get guaranteed to work if working with cross slots. + + Command works if all used keys is in same slot. + - **[NYV]** - TOUCH key [key ...] -- **[NYV]** - TTL key -- **[NYV]** - TYPE key +- TTL key +- TYPE key - **[NYV]** - UNLINK key [key ...] - **[NYV]** - WAIT numreplicas timeout - **[NYV]** - SCAN cursor [MATCH pattern] [COUNT count] [TYPE type] + .. note:: + + SCAN command has currently a buggy client side implementation. + + It is not recommended to use any *SCAN methods. + Lists ----- @@ -193,18 +356,25 @@ https://redis.io/commands#list - **[NYV]** - RPUSHX key element [element ...] - PubSub ------ https://redis.io/commands#pubsub -- **[NYV]** - PSUBSCRIBE pattern [pattern ...] -- **[NYV]** - PUBSUB subcommand [argument [argument ...]] -- **[NYV]** - PUBLISH channel message -- **[NYV]** - PUNSUBSCRIBE [pattern [pattern ...]] -- **[NYV]** - SUBSCRIBE channel [channel ...] -- **[NYV]** - UNSUBSCRIBE [channel [channel ...]] + .. warning:: + + All pubsub commands is possible to execute and be routed to correct node when used. + + But in general pubsub solution should NOT be used inside a clustered environment unless you really know what you are doing. + + Please read the documentation section about pubsub to get more information about why. + +- PSUBSCRIBE pattern [pattern ...] +- PUBSUB subcommand [argument [argument ...]] +- PUBLISH channel message +- PUNSUBSCRIBE [pattern [pattern ...]] +- SUBSCRIBE channel [channel ...] +- UNSUBSCRIBE [channel [channel ...]] Scripting @@ -212,12 +382,43 @@ Scripting https://redis.io/commands#scripting --- **[NYV]** - EVAL script numkeys key [key ...] arg [arg ...] --- **[NYV]** - SCRIPT DEBUG YES|SYNC|NO --- **[NYV]** - SCRIPT EXISTS sha1 [sha1 ...] --- **[NYV]** - SCRIPT FLUSH --- **[NYV]** - SCRIPT KILL --- **[NYV]** - SCRIPT LOAD script +- EVAL script numkeys key [key ...] arg [arg ...] + + .. warning:: + + Method has a custom client side implementation. + + Command will only work if all keys point to the same slot. Otherwise a CROSSSLOT error will be raised. + +- SCRIPT DEBUG YES|SYNC|NO + + .. warning:: + + Command will only be sent to all master nodes in the cluster and result will be aggregated into a dict where the key will be the internal node name. + +- SCRIPT EXISTS sha1 [sha1 ...] + + .. warning:: + + Command will only be sent to all master nodes in the cluster and result will be aggregated into a dict where the key will be the internal node name. + +- SCRIPT FLUSH + + .. warning:: + + Command will only be sent to all master nodes in the cluster and result will be aggregated into a dict where the key will be the internal node name. + +- SCRIPT KILL + + .. warning:: + + Command has been blocked from executing in a cluster environment + +- SCRIPT LOAD script + + .. warning:: + + Command will only be sent to all master nodes in the cluster and result will be aggregated into a dict where the key will be the internal node name. Server @@ -297,8 +498,22 @@ https://redis.io/commands#server Command has been blocked from executing in a cluster environment -- **[NYV]** - BGREWRITEAOF -- **[NYV]** - BGSAVE [SCHEDULE] +- BGREWRITEAOF + + .. warning:: + + Command is sent to all nodes in the cluster. + + Result from each node will be aggregated into a dict where the key will be the internal node name. + +- BGSAVE [SCHEDULE] + + .. warning:: + + Command is sent to all nodes in the cluster. + + Result from each node will be aggregated into a dict where the key will be the internal node name. + - **[NYV]** - COMMAND - **[NYV]** - COMMAND COUNT - **[NYV]** - COMMAND GETKEYS @@ -335,6 +550,13 @@ https://redis.io/commands#server - **[NYV]** - SYNC - **[NYV]** - PSYNC replicationid offset - **[NYV]** - TIME + + .. note:: + + Command is sent to all nodes in the cluster. + + Result is merged into a single dict with node as key. + - **[NYV]** - LATENCY DOCTOR - **[NYV]** - LATENCY GRAPH event - **[NYV]** - LATENCY HISTORY event @@ -425,25 +647,30 @@ https://redis.io/commands#string - **[NYV]** - APPEND key value - **[NYV]** - BITCOUNT key [start end] - **[NYV]** - BITFIELD key [GET type offset] [SET type offset value] [INCRBY type offset increment] [OVERFLOW WRAP|SAT|FAIL] -- **[NYV]** - BITOP operation destkey key [key ...] -- **[NYV]** - BITPOS key bit [start] [end] -- **[NYV]** - DECR key -- **[NYV]** - DECRBY key decrement -- **[NYV]** - GET key -- **[NYV]** - GETBIT key offset -- **[NYV]** - GETRANGE key start end -- **[NYV]** - GETSET key value -- **[NYV]** - INCR key -- **[NYV]** - INCRBY key increment -- **[NYV]** - INCRBYFLOAT key increment +- BITOP operation destkey key [key ...] + + .. note:: + + Command only works if keys is in same slot. No custom client implementation exists. + +- BITPOS key bit [start] [end] +- DECR key +- DECRBY key decrement +- GET key +- GETBIT key offset +- GETRANGE key start end +- GETSET key value +- INCR key +- INCRBY key increment +- INCRBYFLOAT key increment - **[NYV]** - MGET key [key ...] - **[NYV]** - MSET key value [key value ...] - **[NYV]** - MSETNX key value [key value ...] - **[NYV]** - PSETEX key milliseconds value -- **[NYV]** - SET key value [EX seconds|PX milliseconds|KEEPTTL] [NX|XX] -- **[NYV]** - SETBIT key offset value -- **[NYV]** - SETEX key seconds value -- **[NYV]** - SETNX key value +- SET key value [EX seconds|PX milliseconds|KEEPTTL] [NX|XX] +- SETBIT key offset value +- SETEX key seconds value +- SETNX key value - **[NYV]** - SETRANGE key offset value - **[NYV]** - STRALGO LCS algo-specific-argument [algo-specific-argument ...] - **[NYV]** - STRLEN key diff --git a/rediscluster/client.py b/rediscluster/client.py index d08bcb09..4b2a4c0b 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -99,13 +99,15 @@ class RedisCluster(Redis): 'ACL DELUSER', 'ACL GENPASS', 'ACL GETUSER', + 'ACL HELP', 'ACL LIST', 'ACL LOAD', + 'ACL LOG', 'ACL SAVE', 'ACL SETUSER', 'ACL USERS', 'ACL WHOAMI', - 'BITOP', + 'OBJECT', 'MOVE', 'SCRIPT KILL', 'SENTINEL GET-MASTER-ADDR-BY-NAME', @@ -118,6 +120,7 @@ class RedisCluster(Redis): 'SENTINEL SLAVES', 'SHUTDOWN', 'SLAVEOF', + 'WAIT', ], 'blocked'), string_keys_to_dict([ "BGREWRITEAOF", @@ -154,6 +157,7 @@ class RedisCluster(Redis): "SCRIPT EXISTS", "SCRIPT FLUSH", "SCRIPT LOAD", + "SCRIPT DEBUG", ], 'all-masters'), string_keys_to_dict([ "CLUSTER NODES", From e260aa682e52344efc051b352fe8de856420f437 Mon Sep 17 00:00:00 2001 From: Tim Van Laer Date: Mon, 21 Sep 2020 13:40:46 +0200 Subject: [PATCH 235/263] Elasticache example --- examples/basic_elasticache_password_protected.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 examples/basic_elasticache_password_protected.py diff --git a/examples/basic_elasticache_password_protected.py b/examples/basic_elasticache_password_protected.py new file mode 100644 index 00000000..af92ace0 --- /dev/null +++ b/examples/basic_elasticache_password_protected.py @@ -0,0 +1,15 @@ +from rediscluster import RedisCluster + +rc = RedisCluster( + host='clustercfg.cfg-endpoint-name.aq25ta.euw1.cache.amazonaws.com', + port=6379, + password='password_is_protected', + skip_full_coverage_check=True, # Bypass Redis CONFIG call to elasticache + decode_responses=True, # decode_responses must be set to True when used with python3 + ssl=True, # in-transit encryption, https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/in-transit-encryption.html + ssl_cert_reqs=None # see https://github.com/andymccurdy/redis-py#ssl-connections +) + +rc.set("foo", "bar") + +print(rc.get("foo")) From 1cb65cb9336ad47fa2577b088820a46ae781123a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 20 Sep 2020 17:05:29 +0200 Subject: [PATCH 236/263] Updated tests. Fixed typos in what redis commands that shold be blocked and who should not be. --- rediscluster/client.py | 2 +- tests/test_cluster_obj.py | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 4b2a4c0b..cb8f59de 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -107,7 +107,7 @@ class RedisCluster(Redis): 'ACL SETUSER', 'ACL USERS', 'ACL WHOAMI', - 'OBJECT', + 'BITOP', 'MOVE', 'SCRIPT KILL', 'SENTINEL GET-MASTER-ADDR-BY-NAME', diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 979ab37c..825db8eb 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -163,7 +163,19 @@ def test_blocked_commands(r): These commands should be blocked and raise RedisClusterException """ blocked_commands = [ - 'BITOP', + 'ACL CAT', + 'ACL DELUSER', + 'ACL GENPASS', + 'ACL GETUSER', + 'ACL HELP', + 'ACL LIST', + 'ACL LOAD', + 'ACL LOG', + 'ACL SAVE', + 'ACL SETUSER', + 'ACL USERS', + 'ACL WHOAMI', + 'OBJECT', 'MOVE', 'SCRIPT KILL', 'SENTINEL GET-MASTER-ADDR-BY-NAME', @@ -176,6 +188,7 @@ def test_blocked_commands(r): 'SENTINEL SLAVES', 'SHUTDOWN', 'SLAVEOF', + 'WAIT', ] for command in blocked_commands: From 73f27edf7ceb4a408b3008ef7d82dac570ab9c6a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 26 Sep 2020 00:58:58 +0200 Subject: [PATCH 237/263] Set correct release version. Add a few lines to release notes with changes done recently. --- docs/release-notes.rst | 4 ++++ setup.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 7d6b6c84..e3590f34 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -29,6 +29,10 @@ Release Notes * New feature, host_port_remap. Send in a remapping configuration to RedisCluster instance where the nodes configuration recieved from the redis cluster can be altered to allow for connection in certain circumstances. See new section in client.rst in docs/ for usage example. * When a slot is not covered by the cluster, it will not raise SlotNotCoveredError instead of the old generic RedisClusterException. The client will not attempt to rebuild the cluster layout a few times before giving up and raising that exception to the user. (#350) * CLIENT SETNAME is now possible to use from the client instance. For setting the name for all connections from the client by default, see issue #802 in redis-py repo for the change that was implemented in redis-py 3.4.0. + * Rewrote implemented commands documentation to mimic the redis.io commands documentation and describe each command and any additional implementation that has been made. + * Added RTD theme to the rendered output when running the documentation in local dev mode. + * Added some basic logging to the client that should make it easier to debug and track down minor issues around the main execution loop. See docs/logging.rst for implementation example into your own code. + * Seperated some of the exception handling inside the main execution loop to get more fine grained controll what to do at certain errors. 2.0.0 (Aug 12, 2019) diff --git a/setup.py b/setup.py index bb9a9921..f58c7f17 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="2.0.99rc2", + version="2.1.0", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, long_description_content_type="text/markdown", From 3f853f25eb8adf46856289a3d280147bd20817b4 Mon Sep 17 00:00:00 2001 From: Matt Robenolt Date: Mon, 5 Oct 2020 17:13:14 -0700 Subject: [PATCH 238/263] fix(client): Possible UnboundLocalError within execute finally block I'm not sure what all cases trigger this, but within the finally block, there is no guarantee previously that the variable `connection` was even defined. In out case, we hit a SlotNotCoveredError, which cascaded into an UnboundLocalError from within the finally block. This change explicitly declares the `connection` variable so it can safely be checked later if it needs to be freed. --- rediscluster/client.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index cb8f59de..62ed2ec7 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -417,7 +417,7 @@ def from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=Fa connection_pool_cls = ClusterConnectionPool connection_pool = connection_pool_cls.from_url(url, db=db, skip_full_coverage_check=skip_full_coverage_check, **kwargs) - + if connection_pool.connection_class == SSLConnection: connection_pool.connection_class = SSLClusterConnection @@ -543,7 +543,7 @@ def execute_command(self, *args, **kwargs): It will try the number of times specified by the config option "self.cluster_down_retry_attempts" which defaults to 3 unless manually configured. - + If it reaches the number of times, the command will raises ClusterDownException. """ for _ in range(0, self.cluster_down_retry_attempts): @@ -587,6 +587,7 @@ def _execute_command(self, *args, **kwargs): while ttl > 0: ttl -= 1 + connection = None try: if asking: @@ -699,7 +700,8 @@ def _execute_command(self, *args, **kwargs): redirect_addr, asking = "{0}:{1}".format(e.host, e.port), True finally: - self.connection_pool.release(connection) + if connection is not None: + self.connection_pool.release(connection) log.debug("TTL loop : " + str(ttl)) From f0aaaa4e539bc62e38ce6e23839a12ff192cb7ea Mon Sep 17 00:00:00 2001 From: Tyler Lubeck Date: Fri, 16 Oct 2020 16:10:35 -0700 Subject: [PATCH 239/263] Reduce log level of ClusterConnection creation --- rediscluster/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 561e7657..977937d9 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -45,7 +45,7 @@ class ClusterConnection(Connection): "Manages TCP communication to and from a Redis server" def __init__(self, *args, **kwargs): - log.info("Createing new ClusterConnection instance") + log.debug("Creating new ClusterConnection instance") log.debug(str(args) + " : " + str(kwargs)) self.readonly = kwargs.pop('readonly', False) From 570d7f23faa9bde8031f5e2622e4c01aef3c4f7a Mon Sep 17 00:00:00 2001 From: Benji Visser Date: Thu, 24 Dec 2020 12:14:11 -0500 Subject: [PATCH 240/263] Update connection.py --- rediscluster/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 977937d9..a9765ec0 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -78,7 +78,7 @@ class SSLClusterConnection(SSLConnection): """ def __init__(self, *args, **kwargs): - log.info("Createing new SSLClusterConnection instance") + log.info("Creating new SSLClusterConnection instance") log.debug(str(args) + " : " + str(kwargs)) self.readonly = kwargs.pop('readonly', False) From f8f521e8ef5ff6edc5abdd80d885843e65a0944d Mon Sep 17 00:00:00 2001 From: Lev Kokotov Date: Wed, 3 Feb 2021 08:28:49 -0800 Subject: [PATCH 241/263] Fix "RedisCluster object has no attribute connection" error --- rediscluster/client.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rediscluster/client.py b/rediscluster/client.py index 62ed2ec7..d057492c 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -362,6 +362,11 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non log.debug("Connection pool class " + str(connection_pool_cls)) + # If connection pool fails to initialize, parent class (Redis) __del__ + # will try to access self.connection before it's defined + # throwing an AttributeError. + self.connection = None + pool = connection_pool_cls( startup_nodes=startup_nodes, init_slot_cache=init_slot_cache, From 277aa62cb82ca7d577dc737fd078fee524e51ae4 Mon Sep 17 00:00:00 2001 From: Fran Garcia Date: Tue, 9 Mar 2021 11:59:26 +0000 Subject: [PATCH 242/263] Export ClusterPipeline as part of rediscluster --- rediscluster/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index a1704c3a..a3e4f1a1 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -23,6 +23,7 @@ MovedError, MasterDownError, ) +from rediscluster.pipeline import ClusterPipeline def int_or_str(value): @@ -45,6 +46,7 @@ def int_or_str(value): ClusterDownError, ClusterDownException, ClusterError, + ClusterPipeline, MasterDownError, MovedError, RedisCluster, From 9eb7f62b74d2ae4cb06eda58e5d309f64fb23bba Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 17 Apr 2021 11:22:20 +0200 Subject: [PATCH 243/263] Code cleanup --- rediscluster/__init__.py | 1 - rediscluster/client.py | 24 ++++---- rediscluster/connection.py | 13 +++-- rediscluster/exceptions.py | 3 + rediscluster/nodemanager.py | 10 ++-- rediscluster/pipeline.py | 2 +- rediscluster/utils.py | 5 +- tests/conftest.py | 52 +++++++++-------- tests/test_cluster_connection_pool.py | 82 ++++++++++++++------------- tests/test_cluster_node_manager.py | 64 ++++++++++++--------- tests/test_cluster_obj.py | 48 ++++++++++++---- tests/test_commands.py | 16 ++++-- tests/test_commands_cluster.py | 5 -- tests/test_encoding_cluster.py | 3 +- tests/test_lock.py | 1 - tests/test_pipeline.py | 6 +- tests/test_pipeline_cluster.py | 18 +++--- tests/test_pubsub.py | 4 +- tests/test_utils.py | 4 +- 19 files changed, 200 insertions(+), 161 deletions(-) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index a3e4f1a1..85a1dd4c 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -2,7 +2,6 @@ # python std lib import logging -import sys # rediscluster imports from rediscluster.client import RedisCluster diff --git a/rediscluster/client.py b/rediscluster/client.py index d057492c..f648ba90 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals # python std lib -import datetime import json import logging import random @@ -45,11 +44,10 @@ from redis import Redis from redis.client import list_or_args, parse_info from redis.connection import Connection, SSLConnection -from redis._compat import iteritems, basestring, izip, nativestr, long +from redis._compat import iteritems, nativestr, long from redis.exceptions import ( BusyLoadingError, ConnectionError, - DataError, RedisError, ResponseError, TimeoutError, @@ -211,7 +209,7 @@ class RedisCluster(Redis): "ZCARD", "ZCOUNT", "ZRANGE", - "ZSCORE" + "ZSCORE", ] RESULT_CALLBACKS = dict_merge( @@ -377,7 +375,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non nodemanager_follow_cluster=nodemanager_follow_cluster, connection_class=connection_class, host_port_remap=host_port_remap, - **kwargs + **kwargs, ) super(RedisCluster, self).__init__(connection_pool=pool, **kwargs) @@ -447,7 +445,7 @@ def pubsub(self, **kwargs): """ return ClusterPubSub(self.connection_pool, **kwargs) - def pipeline(self, transaction=None, shard_hint=None): + def pipeline(self, transaction=None, shard_hint=None, read_from_replicas=False): """ Cluster impl: Pipelines do not work in cluster mode the same way they do in normal mode. @@ -466,6 +464,7 @@ def pipeline(self, transaction=None, shard_hint=None): result_callbacks=self.result_callbacks, response_callbacks=self.response_callbacks, cluster_down_retry_attempts=self.cluster_down_retry_attempts, + read_from_replicas=read_from_replicas, ) def transaction(self, *args, **kwargs): @@ -492,7 +491,7 @@ def _determine_slot(self, *args): if command in ['XREADGROUP', 'XREAD']: stream_idx = args.index(b'STREAMS') - keys_ids = list(args[stream_idx + 1: ]) + keys_ids = list(args[stream_idx + 1:]) idx_split = len(keys_ids) // 2 keys = keys_ids[: idx_split] slots = {self.connection_pool.nodes.keyslot(key) for key in keys} @@ -642,10 +641,10 @@ def _execute_command(self, *args, **kwargs): # This is the last attempt before we run out of TTL, raise the exception if ttl == 1: raise e - except (RedisClusterException, BusyLoadingError) as e: + except (RedisClusterException, BusyLoadingError): log.exception("RedisClusterException || BusyLoadingError") raise - except ConnectionError as e: + except ConnectionError: log.exception("ConnectionError") connection.disconnect() @@ -667,8 +666,7 @@ def _execute_command(self, *args, **kwargs): self.connection_pool.nodes.increment_reinitialize_counter( count=self.connection_pool.nodes.reinitialize_steps, ) - - except TimeoutError as e: + except TimeoutError: log.exception("TimeoutError") if ttl < self.RedisClusterRequestTTL / 2: @@ -695,7 +693,7 @@ def _execute_command(self, *args, **kwargs): node = self.connection_pool.nodes.set_node(e.host, e.port, server_type='master') self.connection_pool.nodes.slots[e.slot_id][0] = node - except TryAgainError as e: + except TryAgainError: log.exception("TryAgainError") if ttl < self.RedisClusterRequestTTL / 2: @@ -733,7 +731,7 @@ def _execute_command_on_nodes(self, nodes, *args, **kwargs): connection.send_command(*args) res[node["name"]] = self.parse_response(connection, command, **kwargs) - except ClusterDownError as e: + except ClusterDownError: self.connection_pool.disconnect() self.connection_pool.reset() self.refresh_table_asap = True diff --git a/rediscluster/connection.py b/rediscluster/connection.py index a9765ec0..d61c3055 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -102,7 +102,7 @@ def on_connect(self): if nativestr(self.read_response()) != 'OK': raise ConnectionError('READONLY command failed') - + class ClusterConnectionPool(ConnectionPool): """ @@ -328,7 +328,7 @@ def get_connection_by_slot(self, slot): try: return self.get_connection_by_node(self.get_node_by_slot(slot)) - except (KeyError, RedisClusterException) as exc: + except (KeyError, RedisClusterException): return self.get_random_connection() def get_connection_by_node(self, node): @@ -353,7 +353,7 @@ def get_master_node_by_slot(self, slot): """ try: return self.nodes.slots[slot][0] - except KeyError as ke: + except KeyError: raise SlotNotCoveredError('Slot "{slot}" not covered by the cluster. "skip_full_coverage_check={skip_full_coverage_check}"'.format( slot=slot, skip_full_coverage_check=self.nodes._skip_full_coverage_check, )) @@ -397,11 +397,11 @@ class ClusterBlockingConnectionPool(ClusterConnectionPool): # not available. >>> pool = ClusterBlockingConnectionPool(timeout=5) """ + def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=None, max_connections=None, max_connections_per_node=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, timeout=20, **connection_kwargs): - self.timeout = timeout super(ClusterBlockingConnectionPool, self).__init__( @@ -413,7 +413,7 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No reinitialize_steps=reinitialize_steps, skip_full_coverage_check=skip_full_coverage_check, nodemanager_follow_cluster=nodemanager_follow_cluster, - **connection_kwargs + **connection_kwargs, ) def _blocking_pool_factory(self): @@ -557,7 +557,8 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No max_connections=max_connections, readonly=True, nodemanager_follow_cluster=nodemanager_follow_cluster, - **connection_kwargs) + **connection_kwargs, + ) self.master_node_commands = ('SCAN', 'SSCAN', 'HSCAN', 'ZSCAN') diff --git a/rediscluster/exceptions.py b/rediscluster/exceptions.py index af204d79..b5e804ea 100644 --- a/rediscluster/exceptions.py +++ b/rediscluster/exceptions.py @@ -44,6 +44,7 @@ class ClusterCrossSlotError(ResponseError): class ClusterDownError(ClusterError, ResponseError): """ """ + def __init__(self, resp): self.args = (resp, ) self.message = resp @@ -72,6 +73,7 @@ def __init__(self, resp): class TryAgainError(ResponseError): """ """ + def __init__(self, *args, **kwargs): pass @@ -81,6 +83,7 @@ class MovedError(AskError): """ pass + class MasterDownError(ClusterDownError): """ """ diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 0a53e309..7ec40a28 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -21,7 +21,7 @@ class NodeManager(object): """ """ - RedisClusterHashSlots = 16384 + REDIS_CLUSTER_HASH_SLOTS = 16384 def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, host_port_remap=None, **connection_kwargs): @@ -86,7 +86,7 @@ def _validate_host_port_remap(self, host_port_remap): socket.inet_aton(item.get('to_host', '0.0.0.0').strip()) except socket.error: raise RedisClusterConfigError("Both from_host and to_host in host_port_remap rule must be a valid ip address") - if len(item.get('from_host', '0.0.0.0').split('.')) < 4 or len(item.get('to_host', '0.0.0.0').split('.')) < 4 : + if len(item.get('from_host', '0.0.0.0').split('.')) < 4 or len(item.get('to_host', '0.0.0.0').split('.')) < 4: raise RedisClusterConfigError( "Both from_host and to_host in host_port_remap rule must must have all octets specified") @@ -110,7 +110,7 @@ def keyslot(self, key): if end > -1 and end != start + 1: k = k[start + 1:end] - return crc16(k) % self.RedisClusterHashSlots + return crc16(k) % self.REDIS_CLUSTER_HASH_SLOTS def node_from_slot(self, slot): """ @@ -286,7 +286,7 @@ def initialize(self): need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache) # Validate if all slots are covered or if we should try next startup node - for i in range(0, self.RedisClusterHashSlots): + for i in range(0, self.REDIS_CLUSTER_HASH_SLOTS): if i not in tmp_slots and need_full_slots_coverage: all_slots_covered = False @@ -299,7 +299,7 @@ def initialize(self): if not all_slots_covered: raise RedisClusterException("All slots are not covered after query all startup_nodes. {0} of {1} covered...".format( - len(tmp_slots), self.RedisClusterHashSlots)) + len(tmp_slots), self.REDIS_CLUSTER_HASH_SLOTS)) # Set the tmp variables to the real variables self.slots = tmp_slots diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index 1b7065fc..3671737a 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -156,7 +156,7 @@ def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=T It will try the number of times specified by the config option "self.cluster_down_retry_attempts" which defaults to 3 unless manually configured. - + If it reaches the number of times, the command will raises ClusterDownException. """ for _ in range(0, self.cluster_down_retry_attempts): diff --git a/rediscluster/utils.py b/rediscluster/utils.py index fcd1e7f3..d63edee8 100644 --- a/rediscluster/utils.py +++ b/rediscluster/utils.py @@ -1,11 +1,8 @@ # -*- coding: utf-8 -*- from socket import gethostbyaddr -from functools import wraps # rediscluster imports -from .exceptions import ( - RedisClusterException, ClusterDownError -) +from .exceptions import RedisClusterException # 3rd party imports from redis._compat import basestring, nativestr diff --git a/tests/conftest.py b/tests/conftest.py index 9eaa3db4..2e156cb5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,8 +2,8 @@ # python std lib import os +import random import sys -import json # rediscluster imports from rediscluster import RedisCluster @@ -13,7 +13,6 @@ from distutils.version import StrictVersion from mock import Mock from redis import Redis -from redis.exceptions import ResponseError # put our path in front so we can be sure we are testing locally not against the global package basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -223,35 +222,38 @@ def mock_cluster_resp_int(request, **kwargs): @pytest.fixture() def mock_cluster_resp_info(request, **kwargs): r = _get_client(RedisCluster, request, **kwargs) - response = ('cluster_state:ok\r\ncluster_slots_assigned:16384\r\n' - 'cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n' - 'cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n' - 'cluster_size:3\r\ncluster_current_epoch:7\r\n' - 'cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n' - 'cluster_stats_messages_received:105653\r\n') + response = ( + 'cluster_state:ok\r\ncluster_slots_assigned:16384\r\n' + 'cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n' + 'cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n' + 'cluster_size:3\r\ncluster_current_epoch:7\r\n' + 'cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n' + 'cluster_stats_messages_received:105653\r\n' + ) return _gen_cluster_mock_resp(r, response) @pytest.fixture() def mock_cluster_resp_nodes(request, **kwargs): r = _get_client(RedisCluster, request, **kwargs) - response = ('c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 ' - 'slave aa90da731f673a99617dfe930306549a09f83a6b 0 ' - '1447836263059 5 connected\n' - '9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 ' - 'master - 0 1447836264065 0 connected\n' - 'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 ' - 'myself,master - 0 0 2 connected 5461-10922\n' - '1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 ' - 'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 ' - '1447836262556 3 connected\n' - '4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 ' - 'master - 0 1447836262555 7 connected 0-5460\n' - '19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 ' - 'master - 0 1447836263562 3 connected 10923-16383\n' - 'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 ' - 'master,fail - 1447829446956 1447829444948 1 disconnected\n' - ) + response = ( + 'c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 ' + 'slave aa90da731f673a99617dfe930306549a09f83a6b 0 ' + '1447836263059 5 connected\n' + '9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 ' + 'master - 0 1447836264065 0 connected\n' + 'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 ' + 'myself,master - 0 0 2 connected 5461-10922\n' + '1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 ' + 'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 ' + '1447836262556 3 connected\n' + '4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 ' + 'master - 0 1447836262555 7 connected 0-5460\n' + '19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 ' + 'master - 0 1447836263562 3 connected 10923-16383\n' + 'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 ' + 'master,fail - 1447829446956 1447829444948 1 disconnected\n' + ) return _gen_cluster_mock_resp(r, response) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 654eb6fe..41965a06 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -7,7 +7,6 @@ from threading import Thread # rediscluster imports -from rediscluster.client import RedisCluster from rediscluster.connection import ( ClusterConnectionPool, ClusterBlockingConnectionPool, ClusterReadOnlyConnectionPool, ClusterConnection, UnixDomainSocketConnection) @@ -213,7 +212,7 @@ def test_master_node_by_slot(self): def test_from_url_connection_classes(self): from rediscluster.client import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterConnection, SSLClusterConnection - + r = RedisCluster.from_url('redis://localhost:7000') assert isinstance(r.connection_pool, ClusterConnectionPool) # connection_class is not an object but a ref to the class @@ -224,7 +223,7 @@ def test_from_url_connection_classes(self): assert r.connection_pool.connection_class == SSLClusterConnection # Unix socket connections do not work in cluster environment - with pytest.raises(RedisClusterException) as ex: + with pytest.raises(RedisClusterException): r = RedisCluster.from_url('unix://foobar@/tmp/random.sock') @@ -243,7 +242,8 @@ def get_pool(self, connection_kwargs=None, max_connections=100, max_connections_ max_connections=max_connections, max_connections_per_node=max_connections_per_node, timeout=timeout, - **connection_kwargs) + **connection_kwargs, + ) return pool def test_connection_creation(self): @@ -370,9 +370,11 @@ def test_repr_contains_db_info_tcp(self): 'db': 0, 'client_name': 'test-client' } - pool = self.get_pool(connection_kwargs=connection_kwargs, - connection_class=ClusterConnection, - init_slot_cache=False) + pool = self.get_pool( + connection_kwargs=connection_kwargs, + connection_class=ClusterConnection, + init_slot_cache=False, + ) expected = 'ClusterBlockingConnectionPool>' assert repr(pool) == expected @@ -386,9 +388,11 @@ def test_repr_contains_db_info_unix(self): 'db': 1, 'client_name': 'test-client', } - pool = self.get_pool(connection_kwargs=connection_kwargs, - connection_class=UnixDomainSocketConnection, - init_slot_cache=False) + pool = self.get_pool( + connection_kwargs=connection_kwargs, + connection_class=UnixDomainSocketConnection, + init_slot_cache=False, + ) expected = 'ClusterBlockingConnectionPool>' assert repr(pool) == expected @@ -401,7 +405,8 @@ def get_pool(self, connection_kwargs=None, max_connections=None, init_slot_cache init_slot_cache=init_slot_cache, max_connections=max_connections, startup_nodes=startup_nodes, - **connection_kwargs) + **connection_kwargs, + ) return pool @pytest.mark.xfail(reason="Broken, needs repair") @@ -483,10 +488,12 @@ def test_get_node_by_slot_random(self): class TestBlockingConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20): connection_kwargs = connection_kwargs or {} - pool = redis.BlockingConnectionPool(connection_class=DummyConnection, - max_connections=max_connections, - timeout=timeout, - **connection_kwargs) + pool = redis.BlockingConnectionPool( + connection_class=DummyConnection, + max_connections=max_connections, + timeout=timeout, + **connection_kwargs, + ) return pool def test_connection_creation(self): @@ -688,15 +695,17 @@ def test_extra_typed_querystring_options(self): assert pool.max_connections == 10 def test_boolean_parsing(self): - for expected, value in ( - (None, None), - (None, ''), - (False, 0), (False, '0'), - (False, 'f'), (False, 'F'), (False, 'False'), - (False, 'n'), (False, 'N'), (False, 'No'), - (True, 1), (True, '1'), - (True, 'y'), (True, 'Y'), (True, 'Yes'), - ): + test_data = ( + (None, None), + (None, ''), + (False, 0), (False, '0'), + (False, 'f'), (False, 'F'), (False, 'False'), + (False, 'n'), (False, 'N'), (False, 'No'), + (True, 1), (True, '1'), + (True, 'y'), (True, 'Y'), (True, 'Yes'), + ) + + for expected, value in test_data: assert expected is to_bool(value) def test_extra_querystring_options(self): @@ -709,7 +718,7 @@ def test_extra_querystring_options(self): 'username': None, 'password': None, 'a': '1', - 'b': '2' + 'b': '2', } def test_calling_from_subclass_returns_correct_instance(self): @@ -788,7 +797,7 @@ def test_extra_querystring_options(self): 'username': None, 'password': None, 'a': '1', - 'b': '2' + 'b': '2', } @@ -813,24 +822,19 @@ class DummyConnectionPool(redis.ConnectionPool): def get_connection(self, *args, **kwargs): return self.make_connection() - pool = DummyConnectionPool.from_url( - 'rediss://?ssl_cert_reqs=none') + pool = DummyConnectionPool.from_url('rediss://?ssl_cert_reqs=none') assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE - pool = DummyConnectionPool.from_url( - 'rediss://?ssl_cert_reqs=optional') + pool = DummyConnectionPool.from_url('rediss://?ssl_cert_reqs=optional') assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL - pool = DummyConnectionPool.from_url( - 'rediss://?ssl_cert_reqs=required') + pool = DummyConnectionPool.from_url('rediss://?ssl_cert_reqs=required') assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED - pool = DummyConnectionPool.from_url( - 'rediss://?ssl_check_hostname=False') + pool = DummyConnectionPool.from_url('rediss://?ssl_check_hostname=False') assert pool.get_connection('_').check_hostname is False - pool = DummyConnectionPool.from_url( - 'rediss://?ssl_check_hostname=True') + pool = DummyConnectionPool.from_url('rediss://?ssl_check_hostname=True') assert pool.get_connection('_').check_hostname is True @@ -874,8 +878,10 @@ def test_busy_loading_from_pipeline_immediate_command(self, r): """ pipe = r.pipeline() with pytest.raises(redis.BusyLoadingError): - pipe.immediate_execute_command('DEBUG', 'ERROR', - 'LOADING fake message') + pipe.immediate_execute_command( + 'DEBUG', 'ERROR', + 'LOADING fake message', + ) pool = r.connection_pool assert not pipe.connection assert len(pool._available_connections) == 1 diff --git a/tests/test_cluster_node_manager.py b/tests/test_cluster_node_manager.py index f126f101..ac740629 100644 --- a/tests/test_cluster_node_manager.py +++ b/tests/test_cluster_node_manager.py @@ -140,7 +140,7 @@ def patch_execute_command(*args, **kwargs): execute_command_mock.side_effect = patch_execute_command s.connection_pool.nodes.initialize() - assert len(s.connection_pool.nodes.slots) == NodeManager.RedisClusterHashSlots + assert len(s.connection_pool.nodes.slots) == NodeManager.REDIS_CLUSTER_HASH_SLOTS for slot_info in good_slots_resp: all_hosts = ['127.0.0.1', '127.0.0.2'] all_ports = [7000, 7001, 7002, 7003, 7004, 7005] @@ -452,7 +452,7 @@ def test_host_port_remap(): host_port_remap=None, ) # Test that providing wrong root level object type will raise config exception. List is expected - with pytest.raises(RedisClusterConfigError) as excp: + with pytest.raises(RedisClusterConfigError): n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap={}, @@ -463,7 +463,7 @@ def test_host_port_remap(): host_port_remap=[], ) # A wrong object type inside host_port_remap list shold raise error - with pytest.raises(RedisClusterConfigError) as excp: + with pytest.raises(RedisClusterConfigError): n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[None], @@ -474,42 +474,42 @@ def test_host_port_remap(): host_port_remap=[{}, {}], ) # If we only have either or from_host or to_host set we should get an error - with pytest.raises(RedisClusterConfigError) as excp: + with pytest.raises(RedisClusterConfigError): n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[{'from_host': ''}], ) - with pytest.raises(RedisClusterConfigError) as excp: + with pytest.raises(RedisClusterConfigError): n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[{'to_host': ''}], ) # If we only have either or from_port or to_port set we should get an error - with pytest.raises(RedisClusterConfigError) as excp: + with pytest.raises(RedisClusterConfigError): n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[{'from_port': ''}], ) - with pytest.raises(RedisClusterConfigError) as excp: + with pytest.raises(RedisClusterConfigError): n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[{'to_port': ''}], ) # Invalid keys in the rules should also raise exception - with pytest.raises(RedisClusterConfigError) as excp: + with pytest.raises(RedisClusterConfigError): n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[{'invalid_key': ''}], ) # Invalid ips in the rules should raise exception - with pytest.raises(RedisClusterConfigError) as excp: + with pytest.raises(RedisClusterConfigError): n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[{'from_host': '127.2.x.w', 'to_host': '127.0.0.1'}], ) # Incomplete ips in the rules should raise exception - with pytest.raises(RedisClusterConfigError) as excp: + with pytest.raises(RedisClusterConfigError): n = NodeManager( startup_nodes=[{"host": "127.0.0.1", "port": 7000}], host_port_remap=[{'from_host': '127.2', 'to_host': '127.0.0.1'}], @@ -527,7 +527,7 @@ def test_host_port_remap(): # If no host_port_remap is set then a node obj should not be modified in any way when remapping it n = NodeManager( host_port_remap=None, - startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], ) initial_node_obj = ['127.0.0.1', 7000, 'xyz'] unmodified_remapped_obj = n.remap_internal_node_object(initial_node_obj) @@ -535,9 +535,11 @@ def test_host_port_remap(): # Test that modifying both host and port works n = NodeManager( - host_port_remap=[{'from_host': '127.1.1.1', 'to_host': '128.0.0.1', 'from_port': 7000, 'to_port': 7001}, - {'from_host': '127.2.2.2', 'to_host': '128.0.0.1', 'from_port': 7000, 'to_port': 7005}], - startup_nodes=[{"host": "128.0.0.1", "port": 7000}] + host_port_remap=[ + {'from_host': '127.1.1.1', 'to_host': '128.0.0.1', 'from_port': 7000, 'to_port': 7001}, + {'from_host': '127.2.2.2', 'to_host': '128.0.0.1', 'from_port': 7000, 'to_port': 7005}, + ], + startup_nodes=[{"host": "128.0.0.1", "port": 7000}], ) initial_node_obj = ['127.1.1.1', 7000, 'xyz'] remapped_obj = n.remap_internal_node_object(initial_node_obj) @@ -546,9 +548,11 @@ def test_host_port_remap(): # Validate that ports are NOT remapped in isolation if hosts are also present n = NodeManager( - host_port_remap=[{'from_host': '127.2.2.2', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7001}, - {'from_host': '127.3.3.3', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7005}], - startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + host_port_remap=[ + {'from_host': '127.2.2.2', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7001}, + {'from_host': '127.3.3.3', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7005}, + ], + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], ) initial_node_obj = ['127.0.0.1', 7000, 'xyz'] remapped_obj = n.remap_internal_node_object(initial_node_obj) @@ -557,10 +561,12 @@ def test_host_port_remap(): # Validate that first applicable rule is applied n = NodeManager( - host_port_remap=[{'from_host': '127.2.2.2', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7001}, - {'from_host': '127.3.3.3', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7005}, - {'from_host': '127.2.2.2', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7006}], - startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + host_port_remap=[ + {'from_host': '127.2.2.2', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7001}, + {'from_host': '127.3.3.3', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7005}, + {'from_host': '127.2.2.2', 'to_host': '127.0.0.1', 'from_port': 7000, 'to_port': 7006}, + ], + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], ) initial_node_obj = ['127.2.2.2', 7000, 'xyz'] remapped_obj = n.remap_internal_node_object(initial_node_obj) @@ -569,9 +575,11 @@ def test_host_port_remap(): # Validate just port mapping works n = NodeManager( - host_port_remap=[{'from_port': 7000, 'to_port': 7001}, - {'from_port': 7002, 'to_port': 7005}], - startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + host_port_remap=[ + {'from_port': 7000, 'to_port': 7001}, + {'from_port': 7002, 'to_port': 7005}, + ], + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], ) initial_node_obj = ['127.0.0.1', 7000, 'xyz'] remapped_obj = n.remap_internal_node_object(initial_node_obj) @@ -580,9 +588,11 @@ def test_host_port_remap(): # Validate just host mapping works n = NodeManager( - host_port_remap=[{'from_host': '127.2.2.2', 'to_host': '127.0.0.1'}, - {'from_host': '127.3.3.3', 'to_host': '127.0.0.2'}], - startup_nodes=[{"host": "127.0.0.1", "port": 7000}] + host_port_remap=[ + {'from_host': '127.2.2.2', 'to_host': '127.0.0.1'}, + {'from_host': '127.3.3.3', 'to_host': '127.0.0.2'}, + ], + startup_nodes=[{"host": "127.0.0.1", "port": 7000}], ) initial_node_obj = ['127.3.3.3', 7000, 'xyz'] remapped_obj = n.remap_internal_node_object(initial_node_obj) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 825db8eb..02c669d6 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -3,7 +3,6 @@ # python std lib from __future__ import with_statement import re -import time # rediscluster imports from rediscluster import RedisCluster @@ -116,7 +115,7 @@ def test_empty_startup_nodes(): Test that exception is raised when empty providing empty startup_nodes """ with pytest.raises(RedisClusterException) as ex: - r = RedisCluster(startup_nodes=[]) + RedisCluster(startup_nodes=[]) assert unicode(ex.value).startswith("No startup nodes provided"), unicode(ex.value) @@ -230,7 +229,12 @@ def ok_call(self, *args, **kwargs): def side_effect_rebuild_slots_cache(self): # make new node cache that points to 7007 instead of 7006 - self.nodes = [{'host': '127.0.0.1', 'server_type': 'master', 'port': 7006, 'name': '127.0.0.1:7006'}] + self.nodes = [{ + 'host': '127.0.0.1', + 'server_type': 'master', + 'port': 7006, + 'name': '127.0.0.1:7006', + }] self.slots = {} for i in range(0, 16383): @@ -243,7 +247,12 @@ def side_effect_rebuild_slots_cache(self): # Second call should map all to 7007 def map_7007(self): - self.nodes = [{'host': '127.0.0.1', 'server_type': 'master', 'port': 7007, 'name': '127.0.0.1:7007'}] + self.nodes = [{ + 'host': '127.0.0.1', + 'server_type': 'master', + 'port': 7007, + 'name': '127.0.0.1:7007', + }] self.slots = {} for i in range(0, 16383): @@ -452,11 +461,14 @@ def test_access_correct_slave_with_readonly_mode_client(sr): 'server_type': 'slave', } - master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7000', 'port': 7000, 'server_type': 'master'} - with patch.object( - ClusterConnectionPool, - 'get_master_node_by_slot', - return_value=master_value) as return_master_mock: + master_value = { + 'host': '127.0.0.1', + 'name': '127.0.0.1:7000', + 'port': 7000, + 'server_type': 'master', + } + + with patch.object(ClusterConnectionPool, 'get_master_node_by_slot', return_value=master_value): readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) assert b'foo' == readonly_client.get('foo16706') @@ -481,7 +493,14 @@ def side_effect(self, *args, **kwargs): def side_effect_rebuild_slots_cache(self): # start with all slots mapped to 7006 - self.nodes = {'127.0.0.1:7006': {'host': '127.0.0.1', 'server_type': 'master', 'port': 7006, 'name': '127.0.0.1:7006'}} + self.nodes = { + '127.0.0.1:7006': { + 'host': '127.0.0.1', + 'server_type': 'master', + 'port': 7006, + 'name': '127.0.0.1:7006', + }, + } self.slots = {} for i in range(0, 16383): @@ -494,7 +513,14 @@ def side_effect_rebuild_slots_cache(self): # After the first connection fails, a reinitialize should follow the cluster to 7007 def map_7007(self): - self.nodes = {'127.0.0.1:7007': {'host': '127.0.0.1', 'server_type': 'master', 'port': 7007, 'name': '127.0.0.1:7007'}} + self.nodes = { + '127.0.0.1:7007': { + 'host': '127.0.0.1', + 'server_type': 'master', + 'port': 7007, + 'name': '127.0.0.1:7007', + }, + } self.slots = {} for i in range(0, 16383): diff --git a/tests/test_commands.py b/tests/test_commands.py index 63a7f0e4..b85d069c 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -6,14 +6,20 @@ import redis import time -from redis._compat import (unichr, ascii_letters, iteritems, iterkeys, - itervalues, long, basestring) +from redis._compat import ( + unichr, ascii_letters, iteritems, iterkeys, + itervalues, long, basestring, +) from redis.client import parse_info from redis import exceptions -from .conftest import (skip_if_server_version_lt, skip_if_server_version_gte, - skip_unless_arch_bits, REDIS_6_VERSION, - skip_for_no_cluster_impl) +from .conftest import ( + REDIS_6_VERSION, + skip_for_no_cluster_impl, + skip_if_server_version_gte, + skip_if_server_version_lt, + skip_unless_arch_bits, +) @pytest.fixture() diff --git a/tests/test_commands_cluster.py b/tests/test_commands_cluster.py index 089bc3ea..ed338566 100644 --- a/tests/test_commands_cluster.py +++ b/tests/test_commands_cluster.py @@ -10,16 +10,11 @@ from rediscluster.utils import dict_merge from .conftest import ( skip_if_server_version_lt, - skip_if_redis_py_version_lt, - skip_if_server_version_gte, - skip_for_no_cluster_impl, - skip_unless_arch_bits, REDIS_6_VERSION, ) # 3rd party imports import pytest -from redis.exceptions import RedisError from redis import exceptions diff --git a/tests/test_encoding_cluster.py b/tests/test_encoding_cluster.py index 089cb75b..c8a2577a 100644 --- a/tests/test_encoding_cluster.py +++ b/tests/test_encoding_cluster.py @@ -1,11 +1,10 @@ from __future__ import unicode_literals import pytest -import redis from rediscluster import RedisCluster from redis._compat import unichr, unicode -from .conftest import _get_client, _init_client +from .conftest import _get_client class TestEncodingCluster(object): diff --git a/tests/test_lock.py b/tests/test_lock.py index 82ec43a2..b61e26c9 100644 --- a/tests/test_lock.py +++ b/tests/test_lock.py @@ -4,7 +4,6 @@ from rediscluster import RedisCluster from redis.exceptions import LockError, LockNotOwnedError -from redis.client import Redis from redis.lock import Lock from .conftest import _get_client diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index e1b3eb28..fd80cbc2 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -34,11 +34,7 @@ def test_pipeline_memoryview(self, r): with r.pipeline() as pipe: (pipe.set('a', memoryview(b'a1')) .get('a')) - assert pipe.execute() == \ - [ - True, - b'a1', - ] + assert pipe.execute() == [True, b'a1'] def test_pipeline_length(self, r): with r.pipeline() as pipe: diff --git a/tests/test_pipeline_cluster.py b/tests/test_pipeline_cluster.py index e4a7f320..a38eb6d6 100644 --- a/tests/test_pipeline_cluster.py +++ b/tests/test_pipeline_cluster.py @@ -8,13 +8,13 @@ from rediscluster.client import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool from rediscluster.exceptions import RedisClusterException -from tests.conftest import _get_client, skip_if_server_version_lt +from tests.conftest import _get_client # 3rd party imports import pytest from mock import patch -from redis._compat import unichr, unicode -from redis.exceptions import WatchError, ResponseError, ConnectionError +from redis._compat import unicode +from redis.exceptions import ResponseError, ConnectionError class TestPipeline(object): @@ -336,11 +336,13 @@ def test_access_correct_slave_with_readonly_mode_client(self, sr): 'server_type': 'slave', } - master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7001', 'port': 7001, 'server_type': 'master'} - with patch.object( - ClusterConnectionPool, - 'get_master_node_by_slot', - return_value=master_value) as return_master_mock: + master_value = { + 'host': '127.0.0.1', + 'name': '127.0.0.1:7001', + 'port': 7001, + 'server_type': 'master', + } + with patch.object(ClusterConnectionPool, 'get_master_node_by_slot', return_value=master_value): readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) with readonly_client.pipeline() as readonly_pipe: assert readonly_pipe.get('foo88').get('foo87').execute() == [b'bar', b'foo'] diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index dcbf85a2..a7d0ed57 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -12,12 +12,14 @@ import pytest # import redis +import redis from redis import Redis from redis.exceptions import ConnectionError from redis._compat import basestring, unichr from .conftest import _get_client -from .conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt +from .conftest import skip_if_server_version_lt + def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False): now = time.time() diff --git a/tests/test_utils.py b/tests/test_utils.py index 4e0c03db..62cde136 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -4,9 +4,7 @@ from __future__ import with_statement # rediscluster imports -from rediscluster.exceptions import ( - RedisClusterException, ClusterDownError -) +from rediscluster.exceptions import RedisClusterException from rediscluster.utils import ( string_keys_to_dict, dict_merge, From 95fee797dac63501f2933c8a0b46d72dbc5250d9 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 17 Apr 2021 11:56:33 +0200 Subject: [PATCH 244/263] Add in safety checks and better connection cleanup in certain situations when executing commands --- rediscluster/client.py | 6 ++++++ rediscluster/connection.py | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/rediscluster/client.py b/rediscluster/client.py index f648ba90..a97c8594 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -668,6 +668,7 @@ def _execute_command(self, *args, **kwargs): ) except TimeoutError: log.exception("TimeoutError") + connection.disconnect() if ttl < self.RedisClusterRequestTTL / 2: time.sleep(0.05) @@ -679,6 +680,7 @@ def _execute_command(self, *args, **kwargs): self.connection_pool.disconnect() self.connection_pool.reset() self.refresh_table_asap = True + connection = None raise e except MovedError as e: @@ -702,6 +704,10 @@ def _execute_command(self, *args, **kwargs): log.exception("AskError") redirect_addr, asking = "{0}:{1}".format(e.host, e.port), True + except BaseException as e: + log.exception("BaseException") + connection.disconnect() + raise e finally: if connection is not None: self.connection_pool.release(connection) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index d61c3055..f864f3a7 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -234,6 +234,27 @@ def get_connection(self, command_name, *keys, **options): self._in_use_connections[node['name']].add(connection) + try: + # ensure this connection is connected to Redis + connection.connect() + # connections that the pool provides should be ready to send + # a command. if not, the connection was either returned to the + # pool before all data has been read or the socket has been + # closed. either way, reconnect and verify everything is good. + try: + if connection.can_read(): + raise ConnectionError('Connection has data') + except ConnectionError: + connection.disconnect() + connection.connect() + if connection.can_read(): + raise ConnectionError('Connection not ready') + except BaseException: + # release the connection back to the pool so that we don't + # leak it + self.release(connection) + raise + return connection def make_connection(self, node): From 96f2172eb737abf82e73c6bef1205c0fc7e5e4ba Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 17 Apr 2021 12:34:36 +0200 Subject: [PATCH 245/263] Make option argument in cluster_failover optional argument --- rediscluster/client.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index a97c8594..79ac60ae 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -804,14 +804,19 @@ def cluster_delslots(self, *slots): for slot in slots ] - def cluster_failover(self, node_id, option): + def cluster_failover(self, node_id, option=None): """ Forces a slave to perform a manual failover of its master Sends to specefied node """ - assert option.upper() in ('FORCE', 'TAKEOVER') # TODO: change this option handling - return self.execute_command('CLUSTER FAILOVER', option, node_id=node_id) + if option: + if option.upper() not in ['FORCE', 'TAKEOVER']: + raise RedisError('Invalid option for CLUSTER FAILOVER command: {0}'.format(option)) + else: + return self.execute_command('CLUSTER FAILOVER', option, node_id=node_id) + else: + return self.execute_command('CLUSTER FAILOVER', node_id=node_id) def cluster_info(self): """ From 3756bf116d6f047e4445c4a9ef84885d5c612024 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 17 Apr 2021 12:55:14 +0200 Subject: [PATCH 246/263] Add logging of ReseponseError when doing the cluster initialization steps so the true error is visible in older python versions that do not merge and propegate the original exception automatically --- rediscluster/nodemanager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 7ec40a28..9fe2b13a 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -228,6 +228,8 @@ def initialize(self): except (ConnectionError, TimeoutError): continue except ResponseError as e: + log.exception("ReseponseError sending 'cluster slots' to redis server") + # Isn't a cluster connection, so it won't parse these exceptions automatically message = e.__str__() if 'CLUSTERDOWN' in message or 'MASTERDOWN' in message: From 4961bd86f176c2d6667bca1eac982f033710a2c4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 17 Apr 2021 13:10:01 +0200 Subject: [PATCH 247/263] Fix bug where slowlog_get command would fail on TypeError due to lambda implementation --- rediscluster/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 79ac60ae..e00be82c 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -512,7 +512,7 @@ def _merge_result(self, command, res, **kwargs): `res` is a dict with the following structure Dict(NodeName, CommandResult) """ if command in self.result_callbacks: - return self.result_callbacks[command](command, res, **kwargs) + return self.result_callbacks[command](command, res) # Default way to handle result return first_key(command, res) From 63e57b4ed32e7b7c796097a3c9f37d78f9dffb2b Mon Sep 17 00:00:00 2001 From: rafie Date: Sat, 17 Apr 2021 22:37:10 +0300 Subject: [PATCH 248/263] Remove arg list training commas (breaks py2) --- rediscluster/client.py | 2 +- rediscluster/connection.py | 4 ++-- tests/test_cluster_connection_pool.py | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index e00be82c..6f9dd986 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -375,7 +375,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non nodemanager_follow_cluster=nodemanager_follow_cluster, connection_class=connection_class, host_port_remap=host_port_remap, - **kwargs, + **kwargs ) super(RedisCluster, self).__init__(connection_pool=pool, **kwargs) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index f864f3a7..abafbfb7 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -434,7 +434,7 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No reinitialize_steps=reinitialize_steps, skip_full_coverage_check=skip_full_coverage_check, nodemanager_follow_cluster=nodemanager_follow_cluster, - **connection_kwargs, + **connection_kwargs ) def _blocking_pool_factory(self): @@ -578,7 +578,7 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No max_connections=max_connections, readonly=True, nodemanager_follow_cluster=nodemanager_follow_cluster, - **connection_kwargs, + **connection_kwargs ) self.master_node_commands = ('SCAN', 'SSCAN', 'HSCAN', 'ZSCAN') diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 41965a06..f5ad9a6a 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -242,7 +242,7 @@ def get_pool(self, connection_kwargs=None, max_connections=100, max_connections_ max_connections=max_connections, max_connections_per_node=max_connections_per_node, timeout=timeout, - **connection_kwargs, + **connection_kwargs ) return pool @@ -405,7 +405,7 @@ def get_pool(self, connection_kwargs=None, max_connections=None, init_slot_cache init_slot_cache=init_slot_cache, max_connections=max_connections, startup_nodes=startup_nodes, - **connection_kwargs, + **connection_kwargs ) return pool @@ -492,7 +492,7 @@ def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20): connection_class=DummyConnection, max_connections=max_connections, timeout=timeout, - **connection_kwargs, + **connection_kwargs ) return pool From 03414b06883029ea73e157dc9324c02e88c03641 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 18 Apr 2021 22:03:30 +0200 Subject: [PATCH 249/263] Update basic.py example with alternate way of using just host & port the regular redis-py way to connect to a cluster and not have to specify startup_nodes as a more complex object --- examples/basic.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/examples/basic.py b/examples/basic.py index d64a277e..acfdd1fb 100644 --- a/examples/basic.py +++ b/examples/basic.py @@ -4,7 +4,14 @@ # Note: decode_responses must be set to True when used with python3 rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) - rc.set("foo", "bar") +print(rc.get("foo")) +# Alternate simple mode of pointing to one startup node +rc = RedisCluster( + host="127.0.0.1", + port=7000, + decode_responses=True, +) +rc.set("foo", "bar") print(rc.get("foo")) From 5f7c903cf1aae49fc418491938f5fc4162068dbf Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 18 Apr 2021 22:13:21 +0200 Subject: [PATCH 250/263] Move logging commands to debug to avoid unwanted log spamming on regular info level --- rediscluster/connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index abafbfb7..90d5c58c 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -78,7 +78,7 @@ class SSLClusterConnection(SSLConnection): """ def __init__(self, *args, **kwargs): - log.info("Creating new SSLClusterConnection instance") + log.debug("Creating new SSLClusterConnection instance") log.debug(str(args) + " : " + str(kwargs)) self.readonly = kwargs.pop('readonly', False) @@ -123,7 +123,7 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No it was operating on. This will allow the client to drift along side the cluster if the cluster nodes move around a lot. """ - log.info("Creating new ClusterConnectionPool instance") + log.debug("Creating new ClusterConnectionPool instance") if connection_class is None: connection_class = ClusterConnection From 94eedfb16fb71c82717c37f20a942b3f5b1429b0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 18 Apr 2021 22:17:22 +0200 Subject: [PATCH 251/263] Prepare for 2.1.1 release --- LICENSE | 2 +- README.md | 2 +- docs/conf.py | 4 ++-- docs/release-notes.rst | 12 +++++++++++- rediscluster/__init__.py | 2 +- setup.py | 2 +- 6 files changed, 17 insertions(+), 7 deletions(-) diff --git a/LICENSE b/LICENSE index 130e281a..c3591721 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2020 Johan Andersson +Copyright (c) 2014-2021 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation diff --git a/README.md b/README.md index d12b5b52..5b73e740 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ True ## License & Authors -Copyright (c) 2013-2020 Johan Andersson +Copyright (c) 2013-2021 Johan Andersson MIT (See docs/License.txt file) diff --git a/docs/conf.py b/docs/conf.py index d64b91fe..547dda77 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,9 +59,9 @@ # built documents. # # The short X.Y version. -version = u'2.1.0' +version = u'2.1.1' # The full version, including alpha/beta/rc tags. -release = u'2.1.0' +release = u'2.1.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/release-notes.rst b/docs/release-notes.rst index e3590f34..50795eef 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,7 +1,17 @@ Release Notes ============= -2.1.0 (May **, 2020) +2.1.1 (Apr 18 2021) + + * ClusterPipeline is now exposed when doing "from rediscluster import *" + * Fix issue where connection would be None in some cases when connection pool fails to initialize + * Ported in a fix from redis-py where it now checks if a connection is ready or not before returning the connection for usage + * ClusterFailover command option is no longer mandatory but optional as it is intended + * Fixed "SLOWLOG GET" kwarg command where it failed on decode_responses + * BaseException is now caught when executing commands and it will disconnect and the connection before raising the exception. + * Logging exception on ReseponseError when doing the initial connection to the startup_nodes instances + +2.1.0 (Sept 26, 2020) -------------------- * Add new config option for Client and Pipeline classes to controll how many attempts will be made before bailing out from a ClusterDownError. diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 85a1dd4c..103cc122 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -33,7 +33,7 @@ def int_or_str(value): # Major, Minor, Fix version -__version__ = '2.1.0' +__version__ = '2.1.1' VERSION = tuple(map(int_or_str, __version__.split('.'))) __all__ = [ diff --git a/setup.py b/setup.py index f58c7f17..a49ca2ba 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="2.1.0", + version="2.1.1", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, long_description_content_type="text/markdown", From 3b68c18810c2e8cea20d7e900064b1f8ec811260 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 18 Apr 2021 22:43:43 +0200 Subject: [PATCH 252/263] Prepare for 2.1.2 hotfix release --- docs/License.txt | 2 +- docs/conf.py | 4 ++-- docs/license.rst | 2 +- docs/release-notes.rst | 6 ++++++ rediscluster/__init__.py | 32 ++++++++++++++++---------------- setup.py | 2 +- 6 files changed, 27 insertions(+), 21 deletions(-) diff --git a/docs/License.txt b/docs/License.txt index a690495b..b3f0532a 100644 --- a/docs/License.txt +++ b/docs/License.txt @@ -1,4 +1,4 @@ -Copyright (c) 2014-2020 Johan Andersson +Copyright (c) 2014-2021 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation diff --git a/docs/conf.py b/docs/conf.py index 547dda77..a180ced3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,9 +59,9 @@ # built documents. # # The short X.Y version. -version = u'2.1.1' +version = u'2.1.2' # The full version, including alpha/beta/rc tags. -release = u'2.1.1' +release = u'2.1.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/license.rst b/docs/license.rst index 854e0526..6c7dcebd 100644 --- a/docs/license.rst +++ b/docs/license.rst @@ -1,7 +1,7 @@ Licensing --------- -Copyright (c) 2013-2020 Johan Andersson +Copyright (c) 2013-2021 Johan Andersson MIT (See docs/License.txt file) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 50795eef..a8786083 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,7 +1,13 @@ Release Notes ============= +2.1.2 (Apr 18 2021) +------------------- + + * Fixed bug where "from rediscluster import *" would not work correct + 2.1.1 (Apr 18 2021) +------------------- * ClusterPipeline is now exposed when doing "from rediscluster import *" * Fix issue where connection would be None in some cases when connection pool fails to initialize diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 103cc122..e22e3e86 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -33,25 +33,25 @@ def int_or_str(value): # Major, Minor, Fix version -__version__ = '2.1.1' +__version__ = '2.1.2' VERSION = tuple(map(int_or_str, __version__.split('.'))) __all__ = [ - AskError, - ClusterBlockingConnectionPool, - ClusterConnection, - ClusterConnectionPool, - ClusterCrossSlotError, - ClusterDownError, - ClusterDownException, - ClusterError, - ClusterPipeline, - MasterDownError, - MovedError, - RedisCluster, - RedisClusterError, - RedisClusterException, - TryAgainError, + 'AskError', + 'ClusterBlockingConnectionPool', + 'ClusterConnection', + 'ClusterConnectionPool', + 'ClusterCrossSlotError', + 'ClusterDownError', + 'ClusterDownException', + 'ClusterError', + 'ClusterPipeline', + 'MasterDownError', + 'MovedError', + 'RedisCluster', + 'RedisClusterError', + 'RedisClusterException', + 'TryAgainError', ] # Set default logging handler to avoid "No handler found" warnings. diff --git a/setup.py b/setup.py index a49ca2ba..d88b1354 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="2.1.1", + version="2.1.2", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, long_description_content_type="text/markdown", From 2a4c77dfccd5cbcf834ed1c514e5c3c9b2cd3f25 Mon Sep 17 00:00:00 2001 From: Fran Garcia Date: Tue, 20 Apr 2021 13:50:27 +0100 Subject: [PATCH 253/263] Support reading from replicas on pipeline executions --- rediscluster/client.py | 90 ++++++++++++++++++++-------------------- rediscluster/pipeline.py | 4 +- 2 files changed, 47 insertions(+), 47 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 6f9dd986..58e6d1a6 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -53,6 +53,50 @@ TimeoutError, ) +# Not complete, but covers the major ones +# https://redis.io/commands +READ_COMMANDS = frozenset([ + "BITCOUNT", + "BITPOS", + "EXISTS", + "GEODIST", + "GEOHASH", + "GEOPOS", + "GEORADIUS", + "GEORADIUSBYMEMBER", + "GET", + "GETBIT", + "GETRANGE", + "HEXISTS", + "HGET", + "HGETALL", + "HKEYS", + "HLEN", + "HMGET", + "HSTRLEN", + "HVALS", + "KEYS", + "LINDEX", + "LLEN", + "LRANGE", + "MGET", + "PTTL", + "RANDOMKEY", + "SCARD", + "SDIFF", + "SINTER", + "SISMEMBER", + "SMEMBERS", + "SRANDMEMBER", + "STRLEN", + "SUNION", + "TTL", + "ZCARD", + "ZCOUNT", + "ZRANGE", + "ZSCORE", +]) + log = logging.getLogger(__name__) @@ -168,50 +212,6 @@ class RedisCluster(Redis): ], 'slot-id'), ) - # Not complete, but covers the major ones - # https://redis.io/commands - READ_COMMANDS = [ - "BITCOUNT", - "BITPOS", - "EXISTS", - "GEODIST", - "GEOHASH", - "GEOPOS", - "GEORADIUS", - "GEORADIUSBYMEMBER", - "GET", - "GETBIT", - "GETRANGE", - "HEXISTS", - "HGET", - "HGETALL", - "HKEYS", - "HLEN", - "HMGET", - "HSTRLEN", - "HVALS", - "KEYS", - "LINDEX", - "LLEN", - "LRANGE", - "MGET", - "PTTL", - "RANDOMKEY", - "SCARD", - "SDIFF", - "SINTER", - "SISMEMBER", - "SMEMBERS", - "SRANDMEMBER", - "STRLEN", - "SUNION", - "TTL", - "ZCARD", - "ZCOUNT", - "ZRANGE", - "ZSCORE", - ] - RESULT_CALLBACKS = dict_merge( string_keys_to_dict([ "BGREWRITEAOF", @@ -608,7 +608,7 @@ def _execute_command(self, *args, **kwargs): else: node = self.connection_pool.get_node_by_slot( slot, - self.read_from_replicas and (command in self.READ_COMMANDS) + self.read_from_replicas and (command in READ_COMMANDS) ) is_read_replica = node['server_type'] == 'slave' diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index 3671737a..3eae7a39 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -4,7 +4,7 @@ import sys # rediscluster imports -from .client import RedisCluster +from .client import RedisCluster, READ_COMMANDS from .exceptions import ( RedisClusterException, AskError, MovedError, TryAgainError, ClusterDownError, ) @@ -194,7 +194,7 @@ def _send_cluster_commands(self, stack, raise_on_error=True, allow_redirections= # refer to our internal node -> slot table that tells us where a given # command should route to. slot = self._determine_slot(*c.args) - node = self.connection_pool.get_node_by_slot(slot) + node = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and c.args[0] in READ_COMMANDS) # little hack to make sure the node name is populated. probably could clean this up. self.connection_pool.nodes.set_node_name(node) From ac4f36a175227f5d85874cf8411bc56fb3299810 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 24 Apr 2021 11:20:11 +0200 Subject: [PATCH 254/263] Add new docs section about release-process and document the package building and uploading using twine tool --- docs/index.rst | 1 + docs/release-process.rst | 60 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 docs/release-process.rst diff --git a/docs/index.rst b/docs/index.rst index f4ae3300..981ddaf0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -172,6 +172,7 @@ The Community Guide testing development upgrading + release-process release-notes authors license diff --git a/docs/release-process.rst b/docs/release-process.rst new file mode 100644 index 00000000..88a6cb22 --- /dev/null +++ b/docs/release-process.rst @@ -0,0 +1,60 @@ +Release process +=============== + +This section describes the process and how a release is made of this package. + +All steps for twine tool can be found here https://twine.readthedocs.io/en/latest/ + + +Install helper tools +-------------------- + +We use the standard sdist package build solution to package the source dist and wheel package into the format that pip and pypi understands. + +We then use `twine` as the helper tool to upload and interact with pypi to submit the package to both pypi & testpypi. + +First create a new venv that uses at least python3.7 but it is recommended to use the latest python version always. Published releases will be built with python 3.9.0+ + +Install twine with + +.. code-block:: + + pip install twine + + +Build python package +-------------------- + +First ensure that your `dist/` folder is empty so that you will not attempt to upload a dev version or other packages to the public index. + +Create the source dist and wheel dist by running + +.. code-block:: + + python setup.py sdist bdist_wheel + +The built python pakages can be found in ´dist/` + + +Submit to testpypi +------------------ + +It is always good to test out the build first locally so there are no obvious code problems but also to submit the build to testpypi to verify that the upload works and that you get the version number and `README` section working correct. + +To upload to `testpypi` run + +.. code-block:: + + twine upload -r testpypi dist/* + +It will upload everything to https://test.pypi.org/project/redis-py-cluster/ + + +Submit build to public pypi +--------------------------- + +To submit the final package to public official pypi run + +.. code-block:: + + twine upload dist/* From 0c6361ce1bad8e2396cb216e7e26dc7e1f8b10df Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 24 Apr 2021 11:55:15 +0200 Subject: [PATCH 255/263] Add new example script pipeline-readonly-replicas.py that will help to demonstrate and test out read_from_replica feature inside a pipeline object that can greatly reduce the load on master servers and offload some of the work if possible to slave nodes. --- examples/pipeline-readonly-replicas.py | 72 ++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 examples/pipeline-readonly-replicas.py diff --git a/examples/pipeline-readonly-replicas.py b/examples/pipeline-readonly-replicas.py new file mode 100644 index 00000000..315361d9 --- /dev/null +++ b/examples/pipeline-readonly-replicas.py @@ -0,0 +1,72 @@ +from rediscluster import RedisCluster +import threading +from time import sleep + +""" +This file will show the difference and how to use the READONLY feature to offload READ specific commands +to replica nodes in your cluster. The script will do two runs with 10 sets of commands each in a threaded environment +both with read_from_replica feature turned off and turned on so you can simulate both cases and test out your code +and ensure that it works before opting in to that feature. + +The absolute best way to show what node is used inside the pipeline is to add a print(node) here + +# pipeline.py +def _send_cluster_command(...): + ... + slot = self._determine_slot(*c.args) + node = self.connection_pool.get_node_by_slot(slot, self.read_from_replicas and c.args[0] in READ_COMMANDS) + print(node) + ... + +and when you run this test script it will show you what node is used in both cases and the first scenario it should show +only "master" as the node type all commands will be sent to. In the second run with read_from_replica=True it should +be a mix of "master" and "slave". +""" + + +def test_run(read_from_replica): + print(f"########\nStarting test run with read_from_replica={read_from_replica}") + rc = RedisCluster(host="127.0.0.1", port=7000, decode_responses=True, read_from_replicas=read_from_replica) + + print(rc.set("foo1", "bar")) + print(rc.set("foo2", "bar")) + print(rc.set("foo3", "bar")) + print(rc.set("foo4", "bar")) + print(rc.set("foo5", "bar")) + print(rc.set("foo6", "bar")) + print(rc.set("foo7", "bar")) + print(rc.set("foo8", "bar")) + print(rc.set("foo9", "bar")) + + print(rc.get("foo1")) + print(rc.get("foo2")) + print(rc.get("foo3")) + print(rc.get("foo4")) + print(rc.get("foo5")) + print(rc.get("foo6")) + print(rc.get("foo7")) + print(rc.get("foo8")) + print(rc.get("foo9")) + + def thread_func(num): + # sleep(0.1) + pipe = rc.pipeline(read_from_replicas=read_from_replica) + pipe.set(f"foo{num}", "bar") + pipe.get(f"foo{num}") + pipe.get(f"foo{num}") + pipe.get(f"foo{num}") + pipe.get(f"foo{num}") + pipe.get(f"foo{num}") + pipe.get(f"foo{num}") + pipe.get(f"foo{num}") + pipe.get(f"foo{num}") + print(threading.current_thread().getName(), pipe.execute()) + + for i in range(0, 15): + x = threading.Thread(target=thread_func, args=(i,), name=f"{i}") + x.start() + + +test_run(False) +sleep(2) +test_run(True) From 7514ec8d90c3909b3fcd51551dfcc4aafbcec029 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 24 Apr 2021 12:01:20 +0200 Subject: [PATCH 256/263] Minor update to simples example inside README.md --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5b73e740..4b828f22 100644 --- a/README.md +++ b/README.md @@ -52,10 +52,12 @@ Small sample script that shows how to get started with RedisCluster. It can also >>> from rediscluster import RedisCluster >>> # Requires at least one node for cluster discovery. Multiple nodes is recommended. ->>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] - +>>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}, {"host": "127.0.0.1", "port": "7001"}] >>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) +# Or you can use the simpler format of providing one node same way as with a Redis() instance +<<< rc = RedisCluster(host="127.0.0.1", port=7000, decode_responses=True) + >>> rc.set("foo", "bar") True >>> print(rc.get("foo")) From f19479bd263a5afc57a4f00f7d3564a07dd2718a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 24 Apr 2021 17:16:16 +0200 Subject: [PATCH 257/263] Set a default max_connection to 50 in ClusterBlockingConnectionPool to avoid issue where it would create infinite loop when used Fixes #435 --- rediscluster/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 90d5c58c..6429c408 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -420,7 +420,7 @@ class ClusterBlockingConnectionPool(ClusterConnectionPool): """ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=None, - max_connections=None, max_connections_per_node=False, reinitialize_steps=None, + max_connections=50, max_connections_per_node=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False, timeout=20, **connection_kwargs): self.timeout = timeout From 58bafe478f0d77d3c7a77a0533d2b11fcd1d1c9b Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 24 Apr 2021 17:23:35 +0200 Subject: [PATCH 258/263] Prepare for 2.1.3 release --- docs/conf.py | 4 ++-- docs/release-notes.rst | 7 +++++++ rediscluster/__init__.py | 2 +- setup.py | 2 +- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index a180ced3..9daff1aa 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,9 +59,9 @@ # built documents. # # The short X.Y version. -version = u'2.1.2' +version = u'2.1.3' # The full version, including alpha/beta/rc tags. -release = u'2.1.2' +release = u'2.1.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/release-notes.rst b/docs/release-notes.rst index a8786083..a5a87fc8 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,6 +1,13 @@ Release Notes ============= +2.1.3 (Apr 24 2021) +------------------- + + * Add example script pipelin-readonly-replica.py to show how to use replica nodes to offload read commands from primary node + * max_connection now defaults to 50 in ClusterBlockingConnectionPool to avoid issue with infinite loop in queue mechanism + * Using read replica for read commands inside pipeline is now better supported. Feature might be unstable to use as own risk. + 2.1.2 (Apr 18 2021) ------------------- diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index e22e3e86..4380d3a7 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -33,7 +33,7 @@ def int_or_str(value): # Major, Minor, Fix version -__version__ = '2.1.2' +__version__ = '2.1.3' VERSION = tuple(map(int_or_str, __version__.split('.'))) __all__ = [ diff --git a/setup.py b/setup.py index d88b1354..d9fdc0e6 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="2.1.2", + version="2.1.3", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, long_description_content_type="text/markdown", From 4b55305b2f4d20e6ca4356bce3708c07284ffad0 Mon Sep 17 00:00:00 2001 From: Fran Garcia Date: Sun, 2 May 2021 22:07:29 +0100 Subject: [PATCH 259/263] Do not attempt to disconnect if connection is none When handling a ConnectionError we could be faced with the situation that we still don't have a connection, so attempting to disconnect would result in an error. To prevent this, we first check if the connection is None before attempting to disconnect. --- rediscluster/client.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 58e6d1a6..a4d80f5e 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -647,7 +647,11 @@ def _execute_command(self, *args, **kwargs): except ConnectionError: log.exception("ConnectionError") - connection.disconnect() + # ConnectionError can also be raised if we couldn't get a connection + # from the pool before timing out, so check that this is an actual + # connection before attempting to disconnect. + if connection is not None: + connection.disconnect() connection_error_retry_counter += 1 # Give the node 0.1 seconds to get back up and retry again with same From 672dcc737dc100ebb278f31e08a46d9f796b0fed Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 30 May 2021 11:06:48 +0200 Subject: [PATCH 260/263] Add line to releasenotes about catching connection error --- docs/release-notes.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index a5a87fc8..2e0e444f 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -7,6 +7,7 @@ Release Notes * Add example script pipelin-readonly-replica.py to show how to use replica nodes to offload read commands from primary node * max_connection now defaults to 50 in ClusterBlockingConnectionPool to avoid issue with infinite loop in queue mechanism * Using read replica for read commands inside pipeline is now better supported. Feature might be unstable to use as own risk. + * Fixed that in some cases where ConnectionError is raised, a non existing connection was attempted to be disconnected and caused a sub exception to be raised. 2.1.2 (Apr 18 2021) ------------------- From d94f2feee0bc0e404a4d0680c21a9b2712842fcb Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 30 May 2021 11:25:55 +0200 Subject: [PATCH 261/263] Update date in changelog file --- docs/release-notes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index 2e0e444f..ef1a5201 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,7 +1,7 @@ Release Notes ============= -2.1.3 (Apr 24 2021) +2.1.3 (May 30 2021) ------------------- * Add example script pipelin-readonly-replica.py to show how to use replica nodes to offload read commands from primary node From f0627c91ce23e8784dbc996078428c9bdbacb20b Mon Sep 17 00:00:00 2001 From: Tim Gates Date: Tue, 22 Jun 2021 07:09:06 +1000 Subject: [PATCH 262/263] docs: fix a few simple typos There are small typos in: - CONTRIBUTING.md - docs/development.rst - docs/index.rst - docs/logging.rst - docs/pubsub.rst - docs/upgrading.rst - rediscluster/client.py - rediscluster/connection.py - tests/test_cluster_node_manager.py - tests/test_commands.py - tests/test_encoding_cluster.py Fixes: - Should read `specified` rather than `specefied`. - Should read `implementation` rather than `impelmentation`. - Should read `upstream` rather than `uptream`. - Should read `iterate` rather than `itterate`. - Should read `recommended` rather than `reccommended`. - Should read `received` rather than `recieved`. - Should read `preferred` rather than `preffered`. - Should read `possibility` rather than `posiblity`. - Should read `interval` rather than `intervall`. - Should read `examples` rather than `exmaples`. - Should read `documentation` rather than `documentaion`. Closes #461 --- CONTRIBUTING.md | 2 +- docs/development.rst | 2 +- docs/index.rst | 2 +- docs/logging.rst | 2 +- docs/pubsub.rst | 4 ++-- docs/upgrading.rst | 2 +- rediscluster/client.py | 20 ++++++++++---------- rediscluster/connection.py | 4 ++-- tests/test_cluster_node_manager.py | 6 +++--- tests/test_commands.py | 8 ++++---- tests/test_encoding_cluster.py | 2 +- 11 files changed, 27 insertions(+), 27 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e7c454b8..b78bb87c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,7 +88,7 @@ All tests should be assumed to work against the test environment that is impleme ## Testing strategy and how to implement cluster specific tests -A new way of having the old upstream tests from redis-py combined with the cluster specific and unique tests that is needed to validate cluster functionality. This has been designed to improve the speed of which tests is updated from uptream as new redis-py releases is made and to make it easier to port them into the cluster variant. +A new way of having the old upstream tests from redis-py combined with the cluster specific and unique tests that is needed to validate cluster functionality. This has been designed to improve the speed of which tests is updated from upstream as new redis-py releases is made and to make it easier to port them into the cluster variant. How do you implement a test for this code? diff --git a/docs/development.rst b/docs/development.rst index 6f85f1d8..16dea53e 100644 --- a/docs/development.rst +++ b/docs/development.rst @@ -19,4 +19,4 @@ To start the local development server run from the root folder of this git repo sphinx-autobuild docs docs/_build/html -Open up `localhost:8000` in your web-browser to view the online documentaion +Open up `localhost:8000` in your web-browser to view the online documentation diff --git a/docs/index.rst b/docs/index.rst index 981ddaf0..8072e977 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -31,7 +31,7 @@ or from source code Basic usage example ------------------- -Small sample script that shows how to get started with RedisCluster. It can also be found in the file `exmaples/basic.py`. +Small sample script that shows how to get started with RedisCluster. It can also be found in the file `examples/basic.py`. Additional code examples of more advance functionality can be found in the `examples/` folder in the source code git repo. diff --git a/docs/logging.rst b/docs/logging.rst index 8b897ad7..ebda1b93 100644 --- a/docs/logging.rst +++ b/docs/logging.rst @@ -14,4 +14,4 @@ To setup logging for debugging inside the client during development you can add logger.setLevel(logging.DEBUG) logger.propagate = True -Note that this logging is not reccommended to be used inside production as it can cause a performance drain and a slowdown of your client. +Note that this logging is not recommended to be used inside production as it can cause a performance drain and a slowdown of your client. diff --git a/docs/pubsub.rst b/docs/pubsub.rst index 9bb76074..a6eeb045 100644 --- a/docs/pubsub.rst +++ b/docs/pubsub.rst @@ -7,7 +7,7 @@ According to the current official redis documentation on `PUBLISH`:: Integer reply: the number of clients that received the message. -It was initially assumed that if we had clients connected to different nodes in the cluster it would still report back the correct number of clients that recieved the message. +It was initially assumed that if we had clients connected to different nodes in the cluster it would still report back the correct number of clients that received the message. However after some testing of this command it was discovered that it would only report the number of clients that have subscribed on the same server the `PUBLISH` command was executed on. @@ -60,7 +60,7 @@ This new solution is probably future safe and it will probably be a similar solu Known limitations with pubsub ----------------------------- -Pattern subscribe and publish do not work properly because if we hash a pattern like `fo*` we will get a keyslot for that string but there is a endless posiblity of channel names based on that pattern that we can't know in advance. This feature is not limited but the commands is not recommended to use right now. +Pattern subscribe and publish do not work properly because if we hash a pattern like `fo*` we will get a keyslot for that string but there is a endless possibility of channel names based on that pattern that we can't know in advance. This feature is not limited but the commands is not recommended to use right now. The implemented solution will only work if other clients use/adopt the same behaviour. If some other client behaves differently, there might be problems with `PUBLISH` and `SUBSCRIBE` commands behaving wrong. diff --git a/docs/upgrading.rst b/docs/upgrading.rst index e4b6c0c8..a0ebef68 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -108,7 +108,7 @@ Added new `ClusterCrossSlotError` exception class. Added optional `max_connections_per_node` parameter to `ClusterConnectionPool` which changes behavior of `max_connections` so that it applies per-node rather than across the whole cluster. The new feature is opt-in, and the existing default behavior is unchanged. Users are recommended to opt-in as the feature fixes two important problems. First is that some nodes could be starved for connections after max_connections is used up by connecting to other nodes. Second is that the asymmetric number of connections across nodes makes it challenging to configure file descriptor and redis max client settings. Reinitialize on `MOVED` errors will not run on every error but instead on every -25 error to avoid excessive cluster reinitialize when used in multiple threads and resharding at the same time. If you want to go back to the old behaviour with reinitialize on every error you should pass in `reinitialize_steps=1` to the client constructor. If you want to increase or decrease the intervall of this new behaviour you should set `reinitialize_steps` in the client constructor to a value that you want. +25 error to avoid excessive cluster reinitialize when used in multiple threads and resharding at the same time. If you want to go back to the old behaviour with reinitialize on every error you should pass in `reinitialize_steps=1` to the client constructor. If you want to increase or decrease the interval of this new behaviour you should set `reinitialize_steps` in the client constructor to a value that you want. Pipelines in general have received a lot of attention so if you are using pipelines in your code, ensure that you test the new code out a lot before using it to make sure it still works as you expect. diff --git a/rediscluster/client.py b/rediscluster/client.py index a4d80f5e..844cb4c9 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -774,7 +774,7 @@ def cluster_addslots(self, node_id, *slots): """ Assign new hash slots to receiving node - Sends to specefied node + Sends to specified node """ return self.execute_command('CLUSTER ADDSLOTS', *slots, node_id=node_id) @@ -782,7 +782,7 @@ def cluster_countkeysinslot(self, slot_id): """ Return the number of local keys in the specified hash slot - Send to node based on specefied slot_id + Send to node based on specified slot_id """ return self.execute_command('CLUSTER COUNTKEYSINSLOT', slot_id) @@ -790,7 +790,7 @@ def cluster_count_failure_report(self, node_id): """ Return the number of failure reports active for a given node - Sends to specefied node + Sends to specified node """ return self.execute_command('CLUSTER COUNT-FAILURE-REPORTS', node_id=node_id) @@ -812,7 +812,7 @@ def cluster_failover(self, node_id, option=None): """ Forces a slave to perform a manual failover of its master - Sends to specefied node + Sends to specified node """ if option: if option.upper() not in ['FORCE', 'TAKEOVER']: @@ -842,7 +842,7 @@ def cluster_meet(self, node_id, host, port): """ Force a node cluster to handshake with another node. - Sends to specefied node + Sends to specified node """ return self.execute_command('CLUSTER MEET', host, port, node_id=node_id) @@ -858,7 +858,7 @@ def cluster_replicate(self, target_node_id): """ Reconfigure a node as a slave of the specified master node - Sends to specefied node + Sends to specified node """ return self.execute_command('CLUSTER REPLICATE', target_node_id) @@ -869,7 +869,7 @@ def cluster_reset(self, node_id, soft=True): If 'soft' is True then it will send 'SOFT' argument If 'soft' is False then it will send 'HARD' argument - Sends to specefied node + Sends to specified node """ return self.execute_command('CLUSTER RESET', b'SOFT' if soft else b'HARD', node_id=node_id) @@ -901,7 +901,7 @@ def cluster_save_config(self): def cluster_get_keys_in_slot(self, slot, num_keys): """ - Returns the number of keys in the specefied cluster slot + Returns the number of keys in the specified cluster slot """ return self.execute_command('CLUSTER GETKEYSINSLOT', slot, num_keys) @@ -909,7 +909,7 @@ def cluster_set_config_epoch(self, node_id, epoch): """ Set the configuration epoch in a new node - Sends to specefied node + Sends to specified node """ return self.execute_command('CLUSTER SET-CONFIG-EPOCH', epoch, node_id=node_id) @@ -918,7 +918,7 @@ def cluster_setslot(self, node_id, slot_id, state, bind_to_node_id=None): """ Bind an hash slot to a specific node - Sends to specefied node + Sends to specified node """ if state.upper() in ('IMPORTING', 'MIGRATING', 'NODE') and node_id is not None: return self.execute_command('CLUSTER SETSLOT', slot_id, state, node_id) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 6429c408..70dadd0b 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -596,7 +596,7 @@ def get_connection_by_key(self, key, command): def get_master_connection_by_slot(self, slot): """ - Returns a connection for the Master node for the specefied slot. + Returns a connection for the Master node for the specified slot. Do not return a random node if master node is not available for any reason. """ @@ -606,7 +606,7 @@ def get_master_connection_by_slot(self, slot): def get_random_master_slave_connection_by_slot(self, slot): """ Returns a random connection from the set of (master + slaves) for the - specefied slot. If connection is not reachable then return a random connection. + specified slot. If connection is not reachable then return a random connection. """ self._checkpid() diff --git a/tests/test_cluster_node_manager.py b/tests/test_cluster_node_manager.py index ac740629..3b25392f 100644 --- a/tests/test_cluster_node_manager.py +++ b/tests/test_cluster_node_manager.py @@ -158,7 +158,7 @@ def patch_execute_command(*args, **kwargs): def test_empty_startup_nodes(): """ - It should not be possible to create a node manager with no nodes specefied + It should not be possible to create a node manager with no nodes specified """ with pytest.raises(RedisClusterException): NodeManager() @@ -252,7 +252,7 @@ def execute_command(*args, **kwargs): def test_all_nodes(): """ - Set a list of nodes and it should be possible to itterate over all + Set a list of nodes and it should be possible to iterate over all """ n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) n.initialize() @@ -266,7 +266,7 @@ def test_all_nodes(): def test_all_nodes_masters(): """ Set a list of nodes with random masters/slaves config and it shold be possible - to itterate over all of them. + to iterate over all of them. """ n = NodeManager( startup_nodes=[ diff --git a/tests/test_commands.py b/tests/test_commands.py index b85d069c..b9f3b743 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -39,7 +39,7 @@ def cleanup(): def redis_server_time(client): """ - Method adapted from uptream to return the server timestamp from the main + Method adapted from upstream to return the server timestamp from the main cluster node that we assigned as port 7000 node. This is not ideal but will be done for now. """ @@ -1987,7 +1987,7 @@ def test_cluster_slaves(self, mock_cluster_resp_slaves): @skip_for_no_cluster_impl() def test_readwrite(self, r): """ - FIXME: Needs cluster impelmentation + FIXME: Needs cluster implementation """ assert r.readwrite() @@ -1995,7 +1995,7 @@ def test_readwrite(self, r): @skip_for_no_cluster_impl() def test_readonly_invalid_cluster_state(self, r): """ - FIXME: Needs cluster impelmentation + FIXME: Needs cluster implementation """ with pytest.raises(exceptions.RedisError): r.readonly() @@ -2004,7 +2004,7 @@ def test_readonly_invalid_cluster_state(self, r): @skip_for_no_cluster_impl() def test_readonly(self, mock_cluster_resp_ok): """ - FIXME: Needs cluster impelmentation + FIXME: Needs cluster implementation """ assert mock_cluster_resp_ok.readonly() is True diff --git a/tests/test_encoding_cluster.py b/tests/test_encoding_cluster.py index c8a2577a..166afa21 100644 --- a/tests/test_encoding_cluster.py +++ b/tests/test_encoding_cluster.py @@ -12,7 +12,7 @@ class TestEncodingCluster(object): We must import the entire class due to the seperate fixture that uses RedisCluster as client class instead of the normal Redis instance. - FIXME: If possible, monkeypatching TestEncoding class would be preffered but kinda impossible in reality + FIXME: If possible, monkeypatching TestEncoding class would be preferred but kinda impossible in reality """ @pytest.fixture() def r(self, request): From 8a8102a9d758d61a7ec1e2ac9050fcd34029ff3f Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sat, 12 Mar 2022 12:50:41 +0100 Subject: [PATCH 263/263] Update README.md --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 4b828f22..e605ad9c 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,10 @@ +# redis-py-cluster EOL + +In the upstream package *redis-py* that this librar extends, they have since version `* 4.1.0 (Dec 26, 2021)` ported in this code base into the main branch. That basically ends the need for this package if you are using any version after that release as it is natively supported there. If you are upgrading your redis-py version you should plan in time to migrate out from this package into their package. The move into the first released version should be seamless with very few and small changes required. This means that the release `2.1.x` is the very last major release of this package. This do not mean that there might be some small support version if that is needed to sort out some critical issue here. This is not expected as the development time spent on this package in the last few years have been very low. This repo will not be put into a real github Archive mode but this repo should be considered in archive state. + +I want to give a few big thanks to some of the people that has provided many contributions, work, time and effort into making this project into what it is today. First is one of the main contributors 72Squared and his team who helped to build many of the core features and trying out new and untested code and provided many optimizations. The team over at AWS for putting in the time and effort and skill into porting over this to `redis-py`. The team at RedisLabs for all of their support and time in creating a fantastic redis community the last few years. Antirez for making the reference client which this repo was written and based on and for making one of my favorite databases in the ecosystem. And last all the contributions and use of this repo by the entire community. + + # redis-py-cluster This client provides a client for redis cluster that was added in redis 3.0.