summaryrefslogtreecommitdiff
path: root/swift/common/db_replicator.py
diff options
context:
space:
mode:
authorClay Gerrard <clay.gerrard@gmail.com>2018-10-29 14:49:48 -0500
committerTim Burke <tim@swiftstack.com>2018-10-30 22:28:05 +0000
commit06cf5d298fb6b103899aa358e1cb4b828f502dc5 (patch)
tree04b8a4c952134a1965b327cc969de4be4feec2d4 /swift/common/db_replicator.py
parent24bf5eea8cd5d8d4ce3d3cb166a48bf8781ccad9 (diff)
downloadswift-06cf5d298fb6b103899aa358e1cb4b828f502dc5.tar.gz
Add databases_per_second to db daemons
Most daemons have a "go as fast as you can then sleep for 30 seconds" strategy towards resource utilization; the object-updater and object-auditor however have some "X_per_second" options that allow operators much better control over how they spend their I/O budget. This change extends that pattern into the account-replicator, container-replicator, and container-sharder which have been known to peg CPUs when they're not IO limited. Partial-Bug: #1784753 Change-Id: Ib7f2497794fa2f384a1a6ab500b657c624426384
Diffstat (limited to 'swift/common/db_replicator.py')
-rw-r--r--swift/common/db_replicator.py11
1 files changed, 9 insertions, 2 deletions
diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py
index 478d1f83b..d0edb4b25 100644
--- a/swift/common/db_replicator.py
+++ b/swift/common/db_replicator.py
@@ -33,7 +33,7 @@ from swift.common.utils import get_logger, whataremyips, storage_directory, \
renamer, mkdirs, lock_parent_directory, config_true_value, \
unlink_older_than, dump_recon_cache, rsync_module_interpolation, \
json, parse_override_options, round_robin_iter, Everything, get_db_files, \
- parse_db_filename, quote
+ parse_db_filename, quote, RateLimitedIterator
from swift.common import ring
from swift.common.ring.utils import is_local_device
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE, \
@@ -204,6 +204,8 @@ class Replicator(Daemon):
' to use option %(type)s-replicator/'
'interval.'
% {'type': self.server_type})
+ self.databases_per_second = int(
+ conf.get('databases_per_second', 50))
self.node_timeout = float(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.rsync_compress = config_true_value(
@@ -733,6 +735,11 @@ class Replicator(Daemon):
def report_up_to_date(self, full_info):
return True
+ def roundrobin_datadirs(self, dirs):
+ return RateLimitedIterator(
+ roundrobin_datadirs(dirs),
+ elements_per_second=self.databases_per_second)
+
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
override_options = parse_override_options(once=True, **kwargs)
@@ -789,7 +796,7 @@ class Replicator(Daemon):
"file, not replicating",
", ".join(ips), self.port)
self.logger.info(_('Beginning replication run'))
- for part, object_file, node_id in roundrobin_datadirs(dirs):
+ for part, object_file, node_id in self.roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()