summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Burke <tim.burke@gmail.com>2020-04-20 17:40:52 -0700
committerPete Zaitcev <zaitcev@kotori.zaitcev.us>2021-02-23 17:43:37 -0600
commit1b3b776964651acae6c17f581855b0fc7094acee (patch)
treefe9f21ad418bee164a21f39b5b449f6e7b6c3629
parentb484c1e1cf299a71bdb80188ccd3dbde8dc73e93 (diff)
downloadswift-1b3b776964651acae6c17f581855b0fc7094acee.tar.gz
sharding: Add probe test that exercises swift-manage-shard-ranges
Cherry-Picked-From: Ic7c40589679c290e5565f9581f70b9a1c070f6ab Change-Id: I0af92f6a54aaccd38ef82dd072f7e10fdff7184c
-rw-r--r--test/probe/test_sharder.py93
1 files changed, 91 insertions, 2 deletions
diff --git a/test/probe/test_sharder.py b/test/probe/test_sharder.py
index 3d06b0bc1..b453af565 100644
--- a/test/probe/test_sharder.py
+++ b/test/probe/test_sharder.py
@@ -16,6 +16,7 @@ import hashlib
import json
import os
import shutil
+import subprocess
import uuid
from nose import SkipTest
@@ -195,12 +196,16 @@ class BaseTestContainerSharding(ReplProbeTest):
return (utils.storage_directory(datadir, part, container_hash),
container_hash)
- def get_broker(self, part, node, account=None, container=None):
+ def get_db_file(self, part, node, account=None, container=None):
container_dir, container_hash = self.get_storage_dir(
part, node, account=account, container=container)
db_file = os.path.join(container_dir, container_hash + '.db')
self.assertTrue(get_db_files(db_file)) # sanity check
- return ContainerBroker(db_file)
+ return db_file
+
+ def get_broker(self, part, node, account=None, container=None):
+ return ContainerBroker(
+ self.get_db_file(part, node, account, container))
def categorize_container_dir_content(self, account=None, container=None):
account = account or self.brain.account
@@ -2409,3 +2414,87 @@ class TestContainerShardingMoreUTF8(TestContainerSharding):
self.container_name = cont_name.encode('utf8').ljust(name_length, b'x')
if not six.PY2:
self.container_name = self.container_name.decode('utf8')
+
+
+class TestManagedContainerSharding(BaseTestContainerSharding):
+ '''Test sharding using swift-manage-shard-ranges'''
+ def test_manage_shard_ranges(self):
+ obj_names = self._make_object_names(4)
+ self.put_objects(obj_names)
+
+ client.post_container(self.url, self.admin_token, self.container_name,
+ headers={'X-Container-Sharding': 'on'})
+
+ # run replicators first time to get sync points set
+ self.replicators.once()
+
+ # sanity check: we don't have nearly enough objects for this to shard
+ # automatically
+ self.sharders.once(number=self.brain.node_numbers[0],
+ additional_args='--partitions=%s' % self.brain.part)
+ self.assert_container_state(self.brain.nodes[0], 'unsharded', 0)
+
+ subprocess.check_output([
+ 'swift-manage-shard-ranges',
+ self.get_db_file(self.brain.part, self.brain.nodes[0]),
+ 'find_and_replace', '2', '--enable'], stderr=subprocess.STDOUT)
+ self.assert_container_state(self.brain.nodes[0], 'unsharded', 2)
+
+ # "Run container-replicator to replicate them to other nodes."
+ self.replicators.once()
+ # "Run container-sharder on all nodes to shard the container."
+ self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
+
+ # Everybody's settled
+ self.assert_container_state(self.brain.nodes[0], 'sharded', 2)
+ self.assert_container_state(self.brain.nodes[1], 'sharded', 2)
+ self.assert_container_state(self.brain.nodes[2], 'sharded', 2)
+ self.assert_container_listing(obj_names)
+
+ def test_manage_shard_ranges_used_poorly(self):
+ obj_names = self._make_object_names(8)
+ self.put_objects(obj_names)
+
+ client.post_container(self.url, self.admin_token, self.container_name,
+ headers={'X-Container-Sharding': 'on'})
+
+ # run replicators first time to get sync points set
+ self.replicators.once()
+
+ subprocess.check_output([
+ 'swift-manage-shard-ranges',
+ self.get_db_file(self.brain.part, self.brain.nodes[0]),
+ 'find_and_replace', '2', '--enable'], stderr=subprocess.STDOUT)
+ self.assert_container_state(self.brain.nodes[0], 'unsharded', 4)
+
+ # *Also* go find shard ranges on *another node*, like a dumb-dumb
+ subprocess.check_output([
+ 'swift-manage-shard-ranges',
+ self.get_db_file(self.brain.part, self.brain.nodes[1]),
+ 'find_and_replace', '3', '--enable'], stderr=subprocess.STDOUT)
+ self.assert_container_state(self.brain.nodes[1], 'unsharded', 3)
+
+ # Run things out of order (they were likely running as daemons anyway)
+ self.sharders.once(number=self.brain.node_numbers[0],
+ additional_args='--partitions=%s' % self.brain.part)
+ self.sharders.once(number=self.brain.node_numbers[1],
+ additional_args='--partitions=%s' % self.brain.part)
+ self.replicators.once()
+
+ # Uh-oh
+ self.assert_container_state(self.brain.nodes[0], 'sharding', 7)
+ self.assert_container_state(self.brain.nodes[1], 'sharding', 7)
+ # There's a race: the third replica may be sharding, may be unsharded
+
+ # Try it again a few times
+ self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
+ self.replicators.once()
+ self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
+
+ # It's not really fixing itself...
+ self.assert_container_state(self.brain.nodes[0], 'sharding', 7)
+ self.assert_container_state(self.brain.nodes[1], 'sharding', 7)
+
+ # But hey, at least listings still work! They're just going to get
+ # horribly out of date as more objects are added
+ self.assert_container_listing(obj_names)