summaryrefslogtreecommitdiff
path: root/swift/container
diff options
context:
space:
mode:
authorAlistair Coles <alistairncoles@gmail.com>2022-07-07 18:29:06 +0100
committerAlistair Coles <alistairncoles@gmail.com>2022-07-27 15:48:40 +0100
commit38271142eb403a1cdf587616b6d26bfc6b7fb382 (patch)
treeb78893b75a5de51e44de533200989f2e3d736a0c /swift/container
parentc3aa1ce66c20c0c9148a4d3e22d3ff5bcd12bfdb (diff)
downloadswift-38271142eb403a1cdf587616b6d26bfc6b7fb382.tar.gz
sharder: process deleted DBs
It is possible for some replicas of a container DB to be sharded, and for the container to then be emptied and deleted, before another replica of the DB has started sharding. Previously, the unsharded replica would remain in the unsharded state because the sharder would not process deleted DBs. This patch modifies the sharder to always process deleted DBs; this will result in the sharder making some additional DB queries for shard ranges in order to determine whether any processing is required for deleted DBs. Auto-sharding will not act on deleted DBs. Change-Id: Ia6ad92042aa9a51e3ddefec0b9b8acae44e9e9d7
Diffstat (limited to 'swift/container')
-rw-r--r--swift/container/sharder.py72
1 files changed, 36 insertions, 36 deletions
diff --git a/swift/container/sharder.py b/swift/container/sharder.py
index 1812de74b..98ec1bbd3 100644
--- a/swift/container/sharder.py
+++ b/swift/container/sharder.py
@@ -1004,12 +1004,14 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
headers=headers, contents=body)
except DirectClientException as err:
self.logger.warning(
- 'Failed to put shard ranges to %s:%s/%s: %s',
- node['ip'], node['port'], node['device'], err.http_status)
+ 'Failed to put shard ranges to %s:%s/%s %s/%s: %s',
+ node['ip'], node['port'], node['device'],
+ quote(account), quote(container), err.http_status)
except (Exception, Timeout) as err:
self.logger.exception(
- 'Failed to put shard ranges to %s:%s/%s: %s',
- node['ip'], node['port'], node['device'], err)
+ 'Failed to put shard ranges to %s:%s/%s %s/%s: %s',
+ node['ip'], node['port'], node['device'],
+ quote(account), quote(container), err)
else:
return True
return False
@@ -2029,8 +2031,10 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
def _process_broker(self, broker, node, part):
broker.get_info() # make sure account/container are populated
state = broker.get_db_state()
- self.logger.debug('Starting processing %s state %s',
- quote(broker.path), state)
+ is_deleted = broker.is_deleted()
+ self.logger.debug('Starting processing %s state %s%s',
+ quote(broker.path), state,
+ ' (deleted)' if is_deleted else '')
if not self._audit_container(broker):
return
@@ -2038,13 +2042,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# now look and deal with misplaced objects.
self._move_misplaced_objects(broker)
- if broker.is_deleted():
- # This container is deleted so we can skip it. We still want
- # deleted containers to go via misplaced items because they may
- # have new objects sitting in them that may need to move.
- return
-
- is_leader = node['index'] == 0 and self.auto_shard
+ is_leader = node['index'] == 0 and self.auto_shard and not is_deleted
if state in (UNSHARDED, COLLAPSED):
if is_leader and broker.is_root_container():
# bootstrap sharding of root container
@@ -2096,29 +2094,31 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self.logger.debug('Remaining in sharding state %s',
quote(broker.path))
- if state == SHARDED and broker.is_root_container():
- # look for shrink stats
- self._identify_shrinking_candidate(broker, node)
- if is_leader:
- self._find_and_enable_shrinking_candidates(broker)
- self._find_and_enable_sharding_candidates(broker)
- for shard_range in broker.get_shard_ranges(
- states=[ShardRange.SHARDING]):
- self._send_shard_ranges(
- shard_range.account, shard_range.container,
- [shard_range])
-
- if not broker.is_root_container():
- # Update the root container with this container's shard range
- # info; do this even when sharded in case previous attempts
- # failed; don't do this if there is no own shard range. When
- # sharding a shard, this is when the root will see the new
- # shards move to ACTIVE state and the sharded shard
- # simultaneously become deleted.
- self._update_root_container(broker)
-
- self.logger.debug('Finished processing %s state %s',
- quote(broker.path), broker.get_db_state())
+ if not broker.is_deleted():
+ if state == SHARDED and broker.is_root_container():
+ # look for shrink stats
+ self._identify_shrinking_candidate(broker, node)
+ if is_leader:
+ self._find_and_enable_shrinking_candidates(broker)
+ self._find_and_enable_sharding_candidates(broker)
+ for shard_range in broker.get_shard_ranges(
+ states=[ShardRange.SHARDING]):
+ self._send_shard_ranges(
+ shard_range.account, shard_range.container,
+ [shard_range])
+
+ if not broker.is_root_container():
+ # Update the root container with this container's shard range
+ # info; do this even when sharded in case previous attempts
+ # failed; don't do this if there is no own shard range. When
+ # sharding a shard, this is when the root will see the new
+ # shards move to ACTIVE state and the sharded shard
+ # simultaneously become deleted.
+ self._update_root_container(broker)
+
+ self.logger.debug('Finished processing %s state %s%s',
+ quote(broker.path), broker.get_db_state(),
+ ' (deleted)' if is_deleted else '')
def _one_shard_cycle(self, devices_to_shard, partitions_to_shard):
"""