summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZhao Chao <zhaochao1984@gmail.com>2018-01-22 13:55:36 +0800
committerManoj Kumar <kumarmn@us.ibm.com>2018-01-24 17:21:26 +0000
commit2fd8c66f64b086104a9b00334f0eab71d4f49a9a (patch)
tree84834a950a0e850ac83f92fce75943f3ccd69f9f
parenta70d6b9cbebb8cce380ec387798e3e86fcde075f (diff)
downloadtrove-2fd8c66f64b086104a9b00334f0eab71d4f49a9a.tar.gz
Remove log translations
According to discussions on the ML, log messages should not be translated any more. This patch also: * removes all usage of _LI, _LW, _LE, _LC; * updates log translation hacking rule. ML discussions: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html The original commit messages are kept as following to make a historic note: remove unwanted log translations recent discussions on the ML have led to the decision to eliminate all translations for messages that are destined for log files but retain them for messages that are going to be returned as exceptions and potentially shown to end users via an API. see [1], [2]. This change does that as follows. 1. If a string is being used to generate an exception, it will still be translated. Still Trove used both _LE and _ for these translations, there are some cases where _LE has been simply changed to _, and not removed. 2. If a string is used for a logging message, remove the use of _, _LE, _LW, _LI. Also, I have long felt that we have had a creep of checks in the pep8 tests that apparently make the code easier to read. I strongly believe that these kinds of "easier to read" things make sense if they are followed across all projects and not just gratuitously added one project at a time. I've taken this opportunity to reduce the merge mess caused by this change, to sync up our ignore flags with a long list of ignores from Nova. When they made the change for removing log translation, they could do it in an automated-way like I have because they didn't have to deal with under and overindented lines for visual edit (E127). Same for Cinder. Changes 448443 [3] and 447839 [4] were inadequate because they only addressed a little part of the problem, namely removing the use of _LE, _LI, and _LW, and I think this is a change we don't need to dribble in a few files at a time. The changes are straightforward and should be taken in a single lump to make it easy to deal with the merges coming. [1] http://lists.openstack.org/pipermail/openstack-operators/2017-March/012887.html [2] http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html [3] https://review.openstack.org/448443 [4] https://review.openstack.org/447839 Co-Authored-By: Amrith Kumar <amrith@amrith.org> Co-Authored-By: Valencia Serrao <vserrao@us.ibm.com> Change-Id: I5f86c982469e625997fc8bd15c6fae0fc77a8c64
-rw-r--r--HACKING.rst3
-rw-r--r--trove/backup/models.py6
-rw-r--r--trove/backup/service.py7
-rw-r--r--trove/cluster/models.py16
-rw-r--r--trove/cmd/guest.py6
-rw-r--r--trove/common/auth.py15
-rw-r--r--trove/common/base_exception.py4
-rw-r--r--trove/common/cfg.py4
-rw-r--r--trove/common/debug_utils.py11
-rw-r--r--trove/common/exception.py10
-rw-r--r--trove/common/extensions.py2
-rw-r--r--trove/common/i18n.py10
-rw-r--r--trove/common/profile.py19
-rw-r--r--trove/common/rpc/service.py3
-rw-r--r--trove/common/server_group.py3
-rw-r--r--trove/common/strategies/cluster/experimental/cassandra/taskmanager.py17
-rw-r--r--trove/common/strategies/cluster/experimental/galera_common/taskmanager.py18
-rw-r--r--trove/common/strategies/cluster/experimental/mongodb/api.py29
-rw-r--r--trove/common/strategies/cluster/experimental/mongodb/taskmanager.py19
-rw-r--r--trove/common/strategies/cluster/experimental/redis/taskmanager.py8
-rw-r--r--trove/common/strategies/cluster/experimental/vertica/taskmanager.py10
-rw-r--r--trove/common/strategies/storage/swift.py32
-rw-r--r--trove/common/utils.py25
-rw-r--r--trove/common/wsgi.py4
-rw-r--r--trove/conductor/manager.py21
-rw-r--r--trove/configuration/service.py14
-rw-r--r--trove/db/models.py15
-rw-r--r--trove/db/sqlalchemy/session.py14
-rw-r--r--trove/extensions/account/service.py11
-rw-r--r--trove/extensions/common/service.py25
-rw-r--r--trove/extensions/mgmt/clusters/service.py12
-rw-r--r--trove/extensions/mgmt/configuration/service.py6
-rw-r--r--trove/extensions/mgmt/datastores/service.py15
-rw-r--r--trove/extensions/mgmt/host/instance/service.py8
-rw-r--r--trove/extensions/mgmt/host/models.py6
-rw-r--r--trove/extensions/mgmt/host/service.py11
-rw-r--r--trove/extensions/mgmt/instances/models.py6
-rw-r--r--trove/extensions/mgmt/instances/service.py54
-rw-r--r--trove/extensions/mgmt/quota/service.py8
-rw-r--r--trove/extensions/mgmt/upgrade/service.py5
-rw-r--r--trove/extensions/mgmt/volume/service.py5
-rw-r--r--trove/extensions/mysql/service.py58
-rw-r--r--trove/extensions/redis/service.py40
-rw-r--r--trove/extensions/security_group/models.py12
-rw-r--r--trove/extensions/security_group/service.py9
-rw-r--r--trove/guestagent/api.py9
-rw-r--r--trove/guestagent/backup/backupagent.py6
-rw-r--r--trove/guestagent/datastore/experimental/cassandra/manager.py7
-rw-r--r--trove/guestagent/datastore/experimental/cassandra/service.py22
-rw-r--r--trove/guestagent/datastore/experimental/couchbase/manager.py7
-rw-r--r--trove/guestagent/datastore/experimental/couchbase/service.py28
-rw-r--r--trove/guestagent/datastore/experimental/couchdb/manager.py7
-rw-r--r--trove/guestagent/datastore/experimental/couchdb/service.py44
-rw-r--r--trove/guestagent/datastore/experimental/db2/manager.py7
-rw-r--r--trove/guestagent/datastore/experimental/db2/service.py42
-rw-r--r--trove/guestagent/datastore/experimental/mariadb/service.py5
-rw-r--r--trove/guestagent/datastore/experimental/mongodb/manager.py7
-rw-r--r--trove/guestagent/datastore/experimental/mongodb/service.py40
-rw-r--r--trove/guestagent/datastore/experimental/percona/service.py5
-rw-r--r--trove/guestagent/datastore/experimental/postgresql/manager.py6
-rw-r--r--trove/guestagent/datastore/experimental/postgresql/service.py50
-rw-r--r--trove/guestagent/datastore/experimental/redis/manager.py14
-rw-r--r--trove/guestagent/datastore/experimental/redis/service.py32
-rw-r--r--trove/guestagent/datastore/experimental/vertica/manager.py8
-rw-r--r--trove/guestagent/datastore/experimental/vertica/service.py89
-rw-r--r--trove/guestagent/datastore/galera_common/manager.py3
-rw-r--r--trove/guestagent/datastore/galera_common/service.py8
-rw-r--r--trove/guestagent/datastore/manager.py89
-rw-r--r--trove/guestagent/datastore/mysql/service.py7
-rw-r--r--trove/guestagent/datastore/mysql_common/manager.py9
-rw-r--r--trove/guestagent/datastore/mysql_common/service.py72
-rw-r--r--trove/guestagent/datastore/service.py34
-rw-r--r--trove/guestagent/dbaas.py2
-rw-r--r--trove/guestagent/guest_log.py4
-rw-r--r--trove/guestagent/models.py3
-rw-r--r--trove/guestagent/module/driver_manager.py12
-rw-r--r--trove/guestagent/module/drivers/module_driver.py9
-rw-r--r--trove/guestagent/module/drivers/new_relic_license_driver.py17
-rw-r--r--trove/guestagent/module/drivers/ping_driver.py7
-rw-r--r--trove/guestagent/module/module_manager.py6
-rw-r--r--trove/guestagent/pkg.py8
-rw-r--r--trove/guestagent/strategies/backup/experimental/couchbase_impl.py9
-rw-r--r--trove/guestagent/strategies/backup/experimental/db2_impl.py17
-rw-r--r--trove/guestagent/strategies/backup/experimental/postgresql_impl.py8
-rw-r--r--trove/guestagent/strategies/backup/mysql_impl.py6
-rw-r--r--trove/guestagent/strategies/replication/experimental/postgresql_impl.py6
-rw-r--r--trove/guestagent/strategies/replication/mysql_base.py5
-rw-r--r--trove/guestagent/strategies/replication/mysql_binlog.py4
-rw-r--r--trove/guestagent/strategies/replication/mysql_gtid.py4
-rw-r--r--trove/guestagent/strategies/restore/experimental/couchbase_impl.py5
-rw-r--r--trove/guestagent/strategies/restore/experimental/db2_impl.py5
-rw-r--r--trove/guestagent/strategies/restore/experimental/postgresql_impl.py9
-rw-r--r--trove/guestagent/strategies/restore/experimental/redis_impl.py3
-rw-r--r--trove/guestagent/strategies/restore/mysql_impl.py19
-rw-r--r--trove/guestagent/volume.py79
-rw-r--r--trove/hacking/checks.py97
-rw-r--r--trove/instance/models.py79
-rw-r--r--trove/instance/service.py29
-rw-r--r--trove/module/models.py2
-rw-r--r--trove/module/service.py12
-rw-r--r--trove/network/neutron.py15
-rw-r--r--trove/network/nova.py11
-rw-r--r--trove/quota/quota.py9
-rw-r--r--trove/taskmanager/manager.py55
-rwxr-xr-xtrove/taskmanager/models.py296
-rw-r--r--trove/tests/fakes/nova.py9
-rw-r--r--trove/tests/unittests/hacking/test_check.py59
-rw-r--r--trove/tests/util/utils.py6
108 files changed, 1099 insertions, 1120 deletions
diff --git a/HACKING.rst b/HACKING.rst
index 8aa52182..c5773879 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -1,8 +1,7 @@
Trove Library Specific Commandments
-------------------------------------
-- [T101] Validate that LOG messages, except debug ones, are translated
-- [T102] Validate that debug level logs are not translated
- [T103] Exception messages should be translated
- [T104] Python 3 is not support basestring,replace basestring with
six.string_types
+- [T105] Validate no LOG translations
diff --git a/trove/backup/models.py b/trove/backup/models.py
index a7aba0f1..42d49713 100644
--- a/trove/backup/models.py
+++ b/trove/backup/models.py
@@ -108,8 +108,8 @@ class Backup(object):
datastore_version_id=ds_version.id,
deleted=False)
except exception.InvalidModelError as ex:
- LOG.exception(_("Unable to create backup record for "
- "instance: %s"), instance_id)
+ LOG.exception("Unable to create backup record for "
+ "instance: %s", instance_id)
raise exception.BackupCreationError(str(ex))
backup_info = {'id': db_info.id,
@@ -268,7 +268,7 @@ class Backup(object):
try:
cls.delete(context, child.id)
except exception.NotFound:
- LOG.exception(_("Backup %s cannot be found."), backup_id)
+ LOG.exception("Backup %s cannot be found.", backup_id)
def _delete_resources():
backup = cls.get_by_id(context, backup_id)
diff --git a/trove/backup/service.py b/trove/backup/service.py
index f5ed6c77..ddb3e99e 100644
--- a/trove/backup/service.py
+++ b/trove/backup/service.py
@@ -18,7 +18,6 @@ from oslo_log import log as logging
from trove.backup.models import Backup
from trove.backup import views
from trove.common import apischema
-from trove.common.i18n import _
from trove.common import notification
from trove.common.notification import StartNotification
from trove.common import pagination
@@ -59,7 +58,7 @@ class BackupController(wsgi.Controller):
return wsgi.Result(views.BackupView(backup).data(), 200)
def create(self, req, body, tenant_id):
- LOG.info(_("Creating a backup for tenant %s"), tenant_id)
+ LOG.info("Creating a backup for tenant %s", tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
policy.authorize_on_tenant(context, 'backup:create')
data = body['backup']
@@ -77,8 +76,8 @@ class BackupController(wsgi.Controller):
return wsgi.Result(views.BackupView(backup).data(), 202)
def delete(self, req, tenant_id, id):
- LOG.info(_('Deleting backup for tenant %(tenant_id)s '
- 'ID: %(backup_id)s'),
+ LOG.info('Deleting backup for tenant %(tenant_id)s '
+ 'ID: %(backup_id)s',
{'tenant_id': tenant_id, 'backup_id': id})
context = req.environ[wsgi.CONTEXT_KEY]
backup = Backup.get_by_id(context, id)
diff --git a/trove/cluster/models.py b/trove/cluster/models.py
index 78cf6af5..9380c82c 100644
--- a/trove/cluster/models.py
+++ b/trove/cluster/models.py
@@ -147,11 +147,11 @@ class Cluster(object):
self.db_info.save()
def reset_task(self):
- LOG.info(_("Setting task to NONE on cluster %s"), self.id)
+ LOG.info("Setting task to NONE on cluster %s", self.id)
self.update_db(task_status=ClusterTasks.NONE)
def reset_status(self):
- LOG.info(_("Resetting status to NONE on cluster %s"), self.id)
+ LOG.info("Resetting status to NONE on cluster %s", self.id)
self.reset_task()
instances = inst_models.DBInstance.find_all(cluster_id=self.id,
deleted=False).all()
@@ -271,11 +271,13 @@ class Cluster(object):
def validate_cluster_available(self, valid_states=[ClusterTasks.NONE]):
if self.db_info.task_status not in valid_states:
- msg = (_("This action cannot be performed on the cluster while "
- "the current cluster task is '%s'.") %
- self.db_info.task_status.name)
- LOG.error(msg)
- raise exception.UnprocessableEntity(msg)
+ log_fmt = ("This action cannot be performed on the cluster while "
+ "the current cluster task is '%s'.")
+ exc_fmt = _("This action cannot be performed on the cluster while "
+ "the current cluster task is '%s'.")
+ LOG.error(log_fmt, self.db_info.task_status.name)
+ raise exception.UnprocessableEntity(
+ exc_fmt % self.db_info.task_status.name)
def delete(self):
diff --git a/trove/cmd/guest.py b/trove/cmd/guest.py
index 19692d14..687cebe2 100644
--- a/trove/cmd/guest.py
+++ b/trove/cmd/guest.py
@@ -24,7 +24,7 @@ from oslo_service import service as openstack_service
from trove.common import cfg
from trove.common import debug_utils
-from trove.common.i18n import _LE
+from trove.common.i18n import _
from trove.guestagent import api as guest_api
CONF = cfg.CONF
@@ -44,12 +44,12 @@ def main():
from trove.guestagent import dbaas
manager = dbaas.datastore_registry().get(CONF.datastore_manager)
if not manager:
- msg = (_LE("Manager class not registered for datastore manager %s") %
+ msg = (_("Manager class not registered for datastore manager %s") %
CONF.datastore_manager)
raise RuntimeError(msg)
if not CONF.guest_id:
- msg = (_LE("The guest_id parameter is not set. guest_info.conf "
+ msg = (_("The guest_id parameter is not set. guest_info.conf "
"was not injected into the guest or not read by guestagent"))
raise RuntimeError(msg)
diff --git a/trove/common/auth.py b/trove/common/auth.py
index 9db8a7ab..2d709f4a 100644
--- a/trove/common/auth.py
+++ b/trove/common/auth.py
@@ -63,17 +63,16 @@ class TenantBasedAuth(object):
if (match_for_tenant and
tenant_id == match_for_tenant.group('tenant_id')):
LOG.debug(strutils.mask_password(
- _("Authorized tenant '%(tenant_id)s' request: "
- "%(request)s") %
- {'tenant_id': tenant_id,
- 'request': req_to_text(request)}))
+ ("Authorized tenant '%(tenant_id)s' request: "
+ "%(request)s") %
+ {'tenant_id': tenant_id, 'request': req_to_text(request)}))
return True
- msg = _(
- "User with tenant id %s cannot access this resource.") % tenant_id
+ log_fmt = "User with tenant id %s cannot access this resource."
+ exc_fmt = _("User with tenant id %s cannot access this resource.")
- LOG.error(msg)
- raise webob.exc.HTTPForbidden(msg)
+ LOG.error(log_fmt, tenant_id)
+ raise webob.exc.HTTPForbidden(exc_fmt % tenant_id)
def admin_context(f):
diff --git a/trove/common/base_exception.py b/trove/common/base_exception.py
index 011131df..1a2a3cd5 100644
--- a/trove/common/base_exception.py
+++ b/trove/common/base_exception.py
@@ -19,8 +19,6 @@ Exceptions common to OpenStack projects
from oslo_log import log as logging
-from trove.common.i18n import _
-
_FATAL_EXCEPTION_FORMAT_ERRORS = False
LOG = logging.getLogger(__name__)
@@ -100,7 +98,7 @@ def wrap_exception(f):
return f(*args, **kw)
except Exception as e:
if not isinstance(e, Error):
- LOG.exception(_('Uncaught exception'))
+ LOG.exception('Uncaught exception')
raise Error(str(e))
raise
_wrap.func_name = f.func_name
diff --git a/trove/common/cfg.py b/trove/common/cfg.py
index 970088d6..a3384a95 100644
--- a/trove/common/cfg.py
+++ b/trove/common/cfg.py
@@ -1600,8 +1600,8 @@ def get_configuration_property(property_name):
datastore_manager = CONF.datastore_manager
if not datastore_manager:
datastore_manager = 'mysql'
- LOG.warning(_("Manager name ('datastore_manager') not defined, "
- "using '%s' options instead."), datastore_manager)
+ LOG.warning("Manager name ('datastore_manager') not defined, "
+ "using '%s' options instead.", datastore_manager)
try:
return CONF.get(datastore_manager).get(property_name)
diff --git a/trove/common/debug_utils.py b/trove/common/debug_utils.py
index a2d48137..d115f8f6 100644
--- a/trove/common/debug_utils.py
+++ b/trove/common/debug_utils.py
@@ -21,7 +21,6 @@ import sys
from oslo_config import cfg
from oslo_log import log as logging
-from trove.common.i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -97,8 +96,8 @@ def enabled():
if __debug_state:
import threading
if threading.current_thread.__module__ == 'eventlet.green.threading':
- LOG.warning(_("Enabling debugging with eventlet monkey"
- " patched produce unexpected behavior."))
+ LOG.warning("Enabling debugging with eventlet monkey"
+ " patched produce unexpected behavior.")
return __debug_state
@@ -118,9 +117,9 @@ def __setup_remote_pydev_debug_safe(pydev_debug_host=None,
pydev_debug_port=pydev_debug_port,
pydev_path=pydev_path)
except Exception as e:
- LOG.warning(_("Can't connect to remote debug server."
- " Continuing to work in standard mode."
- " Error: %s."), e)
+ LOG.warning("Can't connect to remote debug server."
+ " Continuing to work in standard mode."
+ " Error: %s.", e)
return False
diff --git a/trove/common/exception.py b/trove/common/exception.py
index 1448f52f..9ba5a6a1 100644
--- a/trove/common/exception.py
+++ b/trove/common/exception.py
@@ -93,8 +93,9 @@ class DatabaseNotFound(NotFound):
class ComputeInstanceNotFound(NotFound):
- internal_message = _("Cannot find compute instance %(server_id)s for "
- "instance %(instance_id)s.")
+ # internal_message is used for log, stop translating.
+ internal_message = ("Cannot find compute instance %(server_id)s for "
+ "instance %(instance_id)s.")
message = _("Resource %(instance_id)s can not be retrieved.")
@@ -202,8 +203,9 @@ class NoUniqueMatch(TroveError):
class OverLimit(TroveError):
- internal_message = _("The server rejected the request due to its size or "
- "rate.")
+ # internal_message is used for log, stop translating.
+ internal_message = ("The server rejected the request due to its size or "
+ "rate.")
class QuotaLimitTooSmall(TroveError):
diff --git a/trove/common/extensions.py b/trove/common/extensions.py
index 1cd8c4f0..fe8b96c4 100644
--- a/trove/common/extensions.py
+++ b/trove/common/extensions.py
@@ -397,7 +397,7 @@ class ExtensionManager(object):
LOG.debug('Ext namespace: %s', extension.get_namespace())
LOG.debug('Ext updated: %s', extension.get_updated())
except AttributeError as ex:
- LOG.exception(_("Exception loading extension: %s"),
+ LOG.exception("Exception loading extension: %s",
encodeutils.exception_to_unicode(ex))
return False
return True
diff --git a/trove/common/i18n.py b/trove/common/i18n.py
index 7cc071e1..bc3ae6c3 100644
--- a/trove/common/i18n.py
+++ b/trove/common/i18n.py
@@ -29,13 +29,3 @@ _translators = oslo_i18n.TranslatorFactory(domain='trove')
# The primary translation function using the well-known name "_"
_ = _translators.primary
-
-# Translators for log levels.
-#
-# The abbreviated names are meant to reflect the usual use of a short
-# name like '_'. The "L" is for "log" and the other letter comes from
-# the level.
-_LI = _translators.log_info
-_LW = _translators.log_warning
-_LE = _translators.log_error
-_LC = _translators.log_critical
diff --git a/trove/common/profile.py b/trove/common/profile.py
index 1450995c..3eafe842 100644
--- a/trove/common/profile.py
+++ b/trove/common/profile.py
@@ -20,7 +20,6 @@ from osprofiler import notifier
from osprofiler import web
from trove.common import cfg
-from trove.common.i18n import _LW
from trove import rpc
@@ -35,14 +34,14 @@ def setup_profiler(binary, host):
rpc.TRANSPORT, "trove", binary, host)
notifier.set(_notifier)
web.enable(CONF.profiler.hmac_keys)
- LOG.warning(_LW("The OpenStack Profiler is enabled. Using one"
- " of the hmac_keys specified in the trove.conf file "
- "(typically in /etc/trove), a trace can be made of "
- "all requests. Only an admin user can retrieve "
- "the trace information, however.\n"
- "To disable the profiler, add the following to the "
- "configuration file:\n"
- "[profiler]\n"
- "enabled=false"))
+ LOG.warning("The OpenStack Profiler is enabled. Using one"
+ " of the hmac_keys specified in the trove.conf file "
+ "(typically in /etc/trove), a trace can be made of "
+ "all requests. Only an admin user can retrieve "
+ "the trace information, however.\n"
+ "To disable the profiler, add the following to the "
+ "configuration file:\n"
+ "[profiler]\n"
+ "enabled=false")
else:
web.disable()
diff --git a/trove/common/rpc/service.py b/trove/common/rpc/service.py
index ed3924c0..3bc11971 100644
--- a/trove/common/rpc/service.py
+++ b/trove/common/rpc/service.py
@@ -27,7 +27,6 @@ from oslo_utils import importutils
from osprofiler import profiler
from trove.common import cfg
-from trove.common.i18n import _
from trove.common import profile
from trove.common.rpc import secure_serializer as ssz
from trove import rpc
@@ -83,7 +82,7 @@ class RpcService(service.Service):
try:
self.rpcserver.stop()
except Exception:
- LOG.info(_("Failed to stop RPC server before shutdown. "))
+ LOG.info("Failed to stop RPC server before shutdown. ")
pass
super(RpcService, self).stop()
diff --git a/trove/common/server_group.py b/trove/common/server_group.py
index a51a6930..8f59b823 100644
--- a/trove/common/server_group.py
+++ b/trove/common/server_group.py
@@ -18,7 +18,6 @@ import six
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.common.remote import create_nova_client
@@ -36,7 +35,7 @@ class ServerGroup(object):
if compute_id in sg.members:
server_group = sg
except Exception:
- LOG.exception(_("Could not load server group for compute %s"),
+ LOG.exception("Could not load server group for compute %s",
compute_id)
return server_group
diff --git a/trove/common/strategies/cluster/experimental/cassandra/taskmanager.py b/trove/common/strategies/cluster/experimental/cassandra/taskmanager.py
index 694989e0..29bb9e0f 100644
--- a/trove/common/strategies/cluster/experimental/cassandra/taskmanager.py
+++ b/trove/common/strategies/cluster/experimental/cassandra/taskmanager.py
@@ -17,7 +17,6 @@ from eventlet.timeout import Timeout
from oslo_log import log as logging
from trove.common import cfg
-from trove.common.i18n import _
from trove.common.strategies.cluster import base
from trove.common import utils
from trove.instance.models import DBInstance
@@ -98,7 +97,7 @@ class CassandraClusterTasks(task_models.ClusterTasks):
LOG.debug("Cluster configuration finished successfully.")
except Exception:
- LOG.exception(_("Error creating cluster."))
+ LOG.exception("Error creating cluster.")
self.update_statuses_on_failure(cluster_id)
timeout = Timeout(CONF.cluster_usage_timeout)
@@ -108,7 +107,7 @@ class CassandraClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for building cluster."))
+ LOG.exception("Timeout for building cluster.")
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
@@ -240,12 +239,12 @@ class CassandraClusterTasks(task_models.ClusterTasks):
LOG.debug("Waiting for node to finish its "
"cleanup: %s", nid)
if not self._all_instances_running([nid], cluster_id):
- LOG.warning(_("Node did not complete cleanup "
- "successfully: %s"), nid)
+ LOG.warning("Node did not complete cleanup "
+ "successfully: %s", nid)
LOG.debug("Cluster configuration finished successfully.")
except Exception:
- LOG.exception(_("Error growing cluster."))
+ LOG.exception("Error growing cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
@@ -256,7 +255,7 @@ class CassandraClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for growing cluster."))
+ LOG.exception("Timeout for growing cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
finally:
@@ -327,7 +326,7 @@ class CassandraClusterTasks(task_models.ClusterTasks):
LOG.debug("Cluster configuration finished successfully.")
except Exception:
- LOG.exception(_("Error shrinking cluster."))
+ LOG.exception("Error shrinking cluster.")
self.update_statuses_on_failure(
cluster_id,
status=inst_tasks.InstanceTasks.SHRINKING_ERROR)
@@ -339,7 +338,7 @@ class CassandraClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for shrinking cluster."))
+ LOG.exception("Timeout for shrinking cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR)
finally:
diff --git a/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py b/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py
index 47972ea9..a6ad1a45 100644
--- a/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py
+++ b/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py
@@ -133,7 +133,7 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
for guest in instance_guests:
guest.cluster_complete()
except Exception:
- LOG.exception(_("Error creating cluster."))
+ LOG.exception("Error creating cluster.")
self.update_statuses_on_failure(cluster_id)
timeout = Timeout(CONF.cluster_usage_timeout)
@@ -143,10 +143,10 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for building cluster."))
+ LOG.exception("Timeout for building cluster.")
self.update_statuses_on_failure(cluster_id)
except TroveError:
- LOG.exception(_("Error creating cluster %s."), cluster_id)
+ LOG.exception("Error creating cluster %s.", cluster_id)
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
@@ -245,11 +245,11 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for growing cluster."))
+ LOG.exception("Timeout for growing cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
except Exception:
- LOG.exception(_("Error growing cluster %s."), cluster_id)
+ LOG.exception("Error growing cluster %s.", cluster_id)
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
finally:
@@ -277,12 +277,12 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
set(non_deleted_ids))
)
try:
- LOG.info(_("Deleting instances (%s)"), removal_instance_ids)
+ LOG.info("Deleting instances (%s)", removal_instance_ids)
utils.poll_until(all_instances_marked_deleted,
sleep_time=2,
time_out=CONF.cluster_delete_time_out)
except PollTimeOut:
- LOG.error(_("timeout for instances to be marked as deleted."))
+ LOG.error("timeout for instances to be marked as deleted.")
return
db_instances = DBInstance.find_all(
@@ -317,11 +317,11 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for shrinking cluster."))
+ LOG.exception("Timeout for shrinking cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR)
except Exception:
- LOG.exception(_("Error shrinking cluster %s."), cluster_id)
+ LOG.exception("Error shrinking cluster %s.", cluster_id)
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR)
finally:
diff --git a/trove/common/strategies/cluster/experimental/mongodb/api.py b/trove/common/strategies/cluster/experimental/mongodb/api.py
index ed177846..5de3c261 100644
--- a/trove/common/strategies/cluster/experimental/mongodb/api.py
+++ b/trove/common/strategies/cluster/experimental/mongodb/api.py
@@ -250,10 +250,12 @@ class MongoDbCluster(models.Cluster):
if self.db_info.task_status != ClusterTasks.NONE:
current_task = self.db_info.task_status.name
- msg = _("This action cannot be performed on the cluster while "
- "the current cluster task is '%s'.") % current_task
- LOG.error(msg)
- raise exception.UnprocessableEntity(msg)
+ log_fmt = ("This action cannot be performed on the cluster while "
+ "the current cluster task is '%s'.")
+ exc_fmt = _("This action cannot be performed on the cluster while "
+ "the current cluster task is '%s'.")
+ LOG.error(log_fmt, current_task)
+ raise exception.UnprocessableEntity(exc_fmt % current_task)
db_insts = inst_models.DBInstance.find_all(cluster_id=self.id,
deleted=False,
@@ -261,10 +263,11 @@ class MongoDbCluster(models.Cluster):
num_unique_shards = len(set([db_inst.shard_id for db_inst
in db_insts]))
if num_unique_shards == 0:
- msg = _("This action cannot be performed on the cluster as no "
- "reference shard exists.")
- LOG.error(msg)
- raise exception.UnprocessableEntity(msg)
+ LOG.error("This action cannot be performed on the cluster as no "
+ "reference shard exists.")
+ raise exception.UnprocessableEntity(
+ _("This action cannot be performed on the cluster as no "
+ "reference shard exists."))
arbitrary_shard_id = db_insts[0].shard_id
members_in_shard = [db_inst for db_inst in db_insts
@@ -461,10 +464,12 @@ class MongoDbCluster(models.Cluster):
"""Get information about the cluster's current state."""
if self.db_info.task_status != ClusterTasks.NONE:
current_task = self.db_info.task_status.name
- msg = _("This action cannot be performed on the cluster while "
- "the current cluster task is '%s'.") % current_task
- LOG.error(msg)
- raise exception.UnprocessableEntity(msg)
+ log_fmt = ("This action cannot be performed on the cluster while "
+ "the current cluster task is '%s'.")
+ exc_fmt = _("This action cannot be performed on the cluster while "
+ "the current cluster task is '%s'.")
+ LOG.error(log_fmt, current_task)
+ raise exception.UnprocessableEntity(exc_fmt % current_task)
def _instances_of_type(instance_type):
return [db_inst for db_inst in self.db_instances
diff --git a/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py b/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py
index 659a5408..7e680d9d 100644
--- a/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py
+++ b/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py
@@ -18,7 +18,6 @@ from oslo_log import log as logging
from trove.common import cfg
from trove.common.exception import PollTimeOut
-from trove.common.i18n import _
from trove.common.instance import ServiceStatuses
from trove.common.strategies.cluster import base
from trove.common import utils
@@ -121,7 +120,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("timeout for building cluster."))
+ LOG.exception("timeout for building cluster.")
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
@@ -170,7 +169,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("timeout for building shard."))
+ LOG.exception("timeout for building shard.")
self.update_statuses_on_failure(cluster_id, shard_id)
finally:
timeout.cancel()
@@ -250,7 +249,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("timeout for growing cluster."))
+ LOG.exception("timeout for growing cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
finally:
@@ -275,7 +274,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
sleep_time=2,
time_out=CONF.cluster_delete_time_out)
except PollTimeOut:
- LOG.error(_("timeout for instances to be marked as deleted."))
+ LOG.error("timeout for instances to be marked as deleted.")
return
cluster_usage_timeout = CONF.cluster_usage_timeout
@@ -286,7 +285,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("timeout for shrinking cluster."))
+ LOG.exception("timeout for shrinking cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR)
finally:
@@ -314,7 +313,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
self.get_guest(primary_member).prep_primary()
self.get_guest(primary_member).add_members(other_members_ips)
except Exception:
- LOG.exception(_("error initializing replica set"))
+ LOG.exception("error initializing replica set")
self.update_statuses_on_failure(self.id,
shard_id=primary_member.shard_id)
return False
@@ -337,7 +336,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
self.get_guest(query_router).add_shard(
replica_set, self.get_ip(primary_member))
except Exception:
- LOG.exception(_("error adding shard"))
+ LOG.exception("error adding shard")
self.update_statuses_on_failure(self.id,
shard_id=primary_member.shard_id)
return False
@@ -351,7 +350,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
instance_id=instance_id).get_status()
if status == ServiceStatuses.RUNNING:
return instance_id
- LOG.exception(_("no query routers ready to accept requests"))
+ LOG.exception("no query routers ready to accept requests")
self.update_statuses_on_failure(self.id)
return False
@@ -378,7 +377,7 @@ class MongoDbClusterTasks(task_models.ClusterTasks):
else:
guest.store_admin_password(admin_password)
except Exception:
- LOG.exception(_("error adding config servers"))
+ LOG.exception("error adding config servers")
self.update_statuses_on_failure(self.id)
return False
return True
diff --git a/trove/common/strategies/cluster/experimental/redis/taskmanager.py b/trove/common/strategies/cluster/experimental/redis/taskmanager.py
index 1ec2dcd4..a82d65d8 100644
--- a/trove/common/strategies/cluster/experimental/redis/taskmanager.py
+++ b/trove/common/strategies/cluster/experimental/redis/taskmanager.py
@@ -85,7 +85,7 @@ class RedisClusterTasks(task_models.ClusterTasks):
for guest in guests:
guest.cluster_complete()
except Exception:
- LOG.exception(_("Error creating cluster."))
+ LOG.exception("Error creating cluster.")
self.update_statuses_on_failure(cluster_id)
timeout = Timeout(CONF.cluster_usage_timeout)
@@ -95,7 +95,7 @@ class RedisClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for building cluster."))
+ LOG.exception("Timeout for building cluster.")
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
@@ -142,11 +142,11 @@ class RedisClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for growing cluster."))
+ LOG.exception("Timeout for growing cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
except Exception:
- LOG.exception(_("Error growing cluster %s."), cluster_id)
+ LOG.exception("Error growing cluster %s.", cluster_id)
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
finally:
diff --git a/trove/common/strategies/cluster/experimental/vertica/taskmanager.py b/trove/common/strategies/cluster/experimental/vertica/taskmanager.py
index 96fabfcd..1094dbbd 100644
--- a/trove/common/strategies/cluster/experimental/vertica/taskmanager.py
+++ b/trove/common/strategies/cluster/experimental/vertica/taskmanager.py
@@ -92,7 +92,7 @@ class VerticaClusterTasks(task_models.ClusterTasks):
for guest in guests:
guest.cluster_complete()
except Exception:
- LOG.exception(_("Error creating cluster."))
+ LOG.exception("Error creating cluster.")
self.update_statuses_on_failure(cluster_id)
timeout = Timeout(CONF.cluster_usage_timeout)
@@ -102,7 +102,7 @@ class VerticaClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for building cluster."))
+ LOG.exception("Timeout for building cluster.")
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
@@ -162,11 +162,11 @@ class VerticaClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for growing cluster."))
+ LOG.exception("Timeout for growing cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
except Exception:
- LOG.exception(_("Error growing cluster %s."), cluster_id)
+ LOG.exception("Error growing cluster %s.", cluster_id)
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR)
finally:
@@ -214,7 +214,7 @@ class VerticaClusterTasks(task_models.ClusterTasks):
except Timeout as t:
if t is not timeout:
raise
- LOG.exception(_("Timeout for shrinking cluster."))
+ LOG.exception("Timeout for shrinking cluster.")
self.update_statuses_on_failure(
cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR)
finally:
diff --git a/trove/common/strategies/storage/swift.py b/trove/common/strategies/storage/swift.py
index d9d289a7..dc2c57a2 100644
--- a/trove/common/strategies/storage/swift.py
+++ b/trove/common/strategies/storage/swift.py
@@ -112,7 +112,7 @@ class SwiftStorage(base.Storage):
which is typically in the format '<backup_id>.<ext>.gz'
"""
- LOG.info(_('Saving %(filename)s to %(container)s in swift.'),
+ LOG.info('Saving %(filename)s to %(container)s in swift.',
{'filename': filename, 'container': BACKUP_CONTAINER})
# Create the container if it doesn't already exist
@@ -146,8 +146,8 @@ class SwiftStorage(base.Storage):
# Check each segment MD5 hash against swift etag
# Raise an error and mark backup as failed
if etag != segment_checksum:
- LOG.error(_("Error saving data segment to swift. "
- "ETAG: %(tag)s Segment MD5: %(checksum)s."),
+ LOG.error("Error saving data segment to swift. "
+ "ETAG: %(tag)s Segment MD5: %(checksum)s.",
{'tag': etag, 'checksum': segment_checksum})
return False, "Error saving data to Swift!", None, location
@@ -180,7 +180,7 @@ class SwiftStorage(base.Storage):
LOG.debug('Metadata headers: %s', str(headers))
if large_object:
- LOG.info(_('Creating the manifest file.'))
+ LOG.info('Creating the manifest file.')
manifest_data = json.dumps(segment_results)
LOG.debug('Manifest contents: %s', manifest_data)
# The etag returned from the manifest PUT is the checksum of the
@@ -194,8 +194,8 @@ class SwiftStorage(base.Storage):
# Validation checksum is the Swift Checksum
final_swift_checksum = swift_checksum.hexdigest()
else:
- LOG.info(_('Backup fits in a single segment. Moving segment '
- '%(segment)s to %(filename)s.'),
+ LOG.info('Backup fits in a single segment. Moving segment '
+ '%(segment)s to %(filename)s.',
{'segment': stream_reader.first_segment,
'filename': filename})
segment_result = segment_results[0]
@@ -221,8 +221,8 @@ class SwiftStorage(base.Storage):
# Raise an error and mark backup as failed
if etag != final_swift_checksum:
LOG.error(
- _("Error saving data to swift. Manifest "
- "ETAG: %(tag)s Swift MD5: %(checksum)s"),
+ ("Error saving data to swift. Manifest "
+ "ETAG: %(tag)s Swift MD5: %(checksum)s"),
{'tag': etag, 'checksum': final_swift_checksum})
return False, "Error saving data to Swift!", None, location
@@ -238,11 +238,15 @@ class SwiftStorage(base.Storage):
def _verify_checksum(self, etag, checksum):
etag_checksum = etag.strip('"')
if etag_checksum != checksum:
- msg = (_("Original checksum: %(original)s does not match"
- " the current checksum: %(current)s") %
- {'original': etag_checksum, 'current': checksum})
- LOG.error(msg)
- raise SwiftDownloadIntegrityError(msg)
+ log_fmt = ("Original checksum: %(original)s does not match"
+ " the current checksum: %(current)s")
+ exc_fmt = _("Original checksum: %(original)s does not match"
+ " the current checksum: %(current)s")
+ msg_content = {
+ 'original': etag_checksum,
+ 'current': checksum}
+ LOG.error(log_fmt, msg_content)
+ raise SwiftDownloadIntegrityError(exc_fmt % msg_content)
return True
def load(self, location, backup_checksum):
@@ -294,5 +298,5 @@ class SwiftStorage(base.Storage):
for key, value in metadata.items():
headers[self._set_attr(key)] = value
- LOG.info(_("Writing metadata: %s"), str(headers))
+ LOG.info("Writing metadata: %s", str(headers))
self.connection.post_object(container, filename, headers=headers)
diff --git a/trove/common/utils.py b/trove/common/utils.py
index fa5b1fc2..029bb580 100644
--- a/trove/common/utils.py
+++ b/trove/common/utils.py
@@ -238,24 +238,29 @@ def execute_with_timeout(*args, **kwargs):
except exception.ProcessExecutionError as e:
if log_output_on_error:
LOG.error(
- _("Command '%(cmd)s' failed. %(description)s "
- "Exit code: %(exit_code)s\nstderr: %(stderr)s\n"
- "stdout: %(stdout)s"),
+ ("Command '%(cmd)s' failed. %(description)s "
+ "Exit code: %(exit_code)s\nstderr: %(stderr)s\n"
+ "stdout: %(stdout)s"),
{'cmd': e.cmd, 'description': e.description or '',
'exit_code': e.exit_code, 'stderr': e.stderr,
'stdout': e.stdout})
raise
except Timeout as t:
if t is not timeout:
- LOG.error(_("Got a timeout but not the one expected."))
+ LOG.error("Got a timeout but not the one expected.")
raise
else:
- msg = (_("Time out after waiting "
- "%(time)s seconds when running proc: %(args)s"
- " %(kwargs)s.") % {'time': time, 'args': args,
- 'kwargs': kwargs})
- LOG.error(msg)
- raise exception.ProcessExecutionError(msg)
+ log_fmt = ("Time out after waiting "
+ "%(time)s seconds when running proc: %(args)s"
+ " %(kwargs)s.")
+ exc_fmt = _("Time out after waiting "
+ "%(time)s seconds when running proc: %(args)s"
+ " %(kwargs)s.")
+ msg_content = {
+ 'time': time, 'args': args,
+ 'kwargs': kwargs}
+ LOG.error(log_fmt, msg_content)
+ raise exception.ProcessExecutionError(exc_fmt % msg_content)
finally:
timeout.cancel()
diff --git a/trove/common/wsgi.py b/trove/common/wsgi.py
index a3bf5994..cd624d96 100644
--- a/trove/common/wsgi.py
+++ b/trove/common/wsgi.py
@@ -301,7 +301,7 @@ class Resource(base_wsgi.Resource):
# If action_result is not a Fault then there really was a
# serialization error which we log. Otherwise return the Fault.
if not isinstance(action_result, Fault):
- LOG.exception(_("Unserializable result detected."))
+ LOG.exception("Unserializable result detected.")
raise
return action_result
@@ -578,7 +578,7 @@ class FaultWrapper(base_wsgi.Middleware):
return resp
return resp
except Exception as ex:
- LOG.exception(_("Caught error: %s."),
+ LOG.exception("Caught error: %s.",
encodeutils.exception_to_unicode(ex))
exc = webob.exc.HTTPInternalServerError()
return Fault(exc)
diff --git a/trove/conductor/manager.py b/trove/conductor/manager.py
index 1f0f5756..3674a0ad 100644
--- a/trove/conductor/manager.py
+++ b/trove/conductor/manager.py
@@ -19,7 +19,6 @@ from oslo_service import periodic_task
from trove.backup import models as bkup_models
from trove.common import cfg
from trove.common import exception as trove_exception
-from trove.common.i18n import _
from trove.common.instance import ServiceStatus
from trove.common.rpc import version as rpc_version
from trove.common.serializable_notification import SerializableNotification
@@ -46,8 +45,8 @@ class Manager(periodic_task.PeriodicTasks):
}
if sent is None:
- LOG.error(_("[Instance %s] sent field not present. Cannot "
- "compare."), instance_id)
+ LOG.error("[Instance %s] sent field not present. Cannot "
+ "compare.", instance_id)
return False
LOG.debug("Instance %(instance)s sent %(method)s at %(sent)s ", fields)
@@ -77,8 +76,8 @@ class Manager(periodic_task.PeriodicTasks):
seen.save()
return False
- LOG.info(_("[Instance %s] Rec'd message is older than last seen. "
- "Discarding."), instance_id)
+ LOG.info("[Instance %s] Rec'd message is older than last seen. "
+ "Discarding.", instance_id)
return True
def heartbeat(self, context, instance_id, payload, sent=None):
@@ -112,8 +111,8 @@ class Manager(periodic_task.PeriodicTasks):
'found': backup.id,
'instance': str(instance_id),
}
- LOG.error(_("[Instance: %(instance)s] Backup IDs mismatch! "
- "Expected %(expected)s, found %(found)s"), fields)
+ LOG.error("[Instance: %(instance)s] Backup IDs mismatch! "
+ "Expected %(expected)s, found %(found)s", fields)
return
if instance_id != backup.instance_id:
fields = {
@@ -121,9 +120,9 @@ class Manager(periodic_task.PeriodicTasks):
'found': backup.instance_id,
'instance': str(instance_id),
}
- LOG.error(_("[Instance: %(instance)s] Backup instance IDs "
- "mismatch! Expected %(expected)s, found "
- "%(found)s"), fields)
+ LOG.error("[Instance: %(instance)s] Backup instance IDs "
+ "mismatch! Expected %(expected)s, found "
+ "%(found)s", fields)
return
for k, v in backup_fields.items():
@@ -148,6 +147,6 @@ class Manager(periodic_task.PeriodicTasks):
message, exception):
notification = SerializableNotification.deserialize(
context, serialized_notification)
- LOG.error(_("Guest exception on request %(req)s:\n%(exc)s"),
+ LOG.error("Guest exception on request %(req)s:\n%(exc)s",
{'req': notification.request_id, 'exc': exception})
notification.notify_exc_info(message, exception)
diff --git a/trove/configuration/service.py b/trove/configuration/service.py
index 463cae93..43acd52a 100644
--- a/trove/configuration/service.py
+++ b/trove/configuration/service.py
@@ -106,8 +106,8 @@ class ConfigurationsController(wsgi.Controller):
description = body['configuration'].get('description')
values = body['configuration']['values']
- msg = _("Creating configuration group on tenant "
- "%(tenant_id)s with name: %(cfg_name)s")
+ msg = ("Creating configuration group on tenant "
+ "%(tenant_id)s with name: %(cfg_name)s")
LOG.info(msg, {"tenant_id": tenant_id, "cfg_name": name})
datastore_args = body['configuration'].get('datastore', {})
@@ -143,8 +143,8 @@ class ConfigurationsController(wsgi.Controller):
return wsgi.Result(view_data.data(), 200)
def delete(self, req, tenant_id, id):
- msg = _("Deleting configuration group %(cfg_id)s on tenant: "
- "%(tenant_id)s")
+ msg = ("Deleting configuration group %(cfg_id)s on tenant: "
+ "%(tenant_id)s")
LOG.info(msg, {"tenant_id": tenant_id, "cfg_id": id})
context = req.environ[wsgi.CONTEXT_KEY]
@@ -163,8 +163,8 @@ class ConfigurationsController(wsgi.Controller):
return wsgi.Result(None, 202)
def update(self, req, body, tenant_id, id):
- msg = _("Updating configuration group %(cfg_id)s for tenant "
- "id %(tenant_id)s")
+ msg = ("Updating configuration group %(cfg_id)s for tenant "
+ "id %(tenant_id)s")
LOG.info(msg, {"tenant_id": tenant_id, "cfg_id": id})
context = req.environ[wsgi.CONTEXT_KEY]
@@ -269,7 +269,7 @@ class ConfigurationsController(wsgi.Controller):
@staticmethod
def _validate_configuration(values, datastore_version, config_rules):
- LOG.info(_("Validating configuration values"))
+ LOG.info("Validating configuration values")
# create rules dictionary based on parameter name
rules_lookup = {}
diff --git a/trove/db/models.py b/trove/db/models.py
index 8d8c7ad5..50815039 100644
--- a/trove/db/models.py
+++ b/trove/db/models.py
@@ -105,13 +105,16 @@ class DatabaseModelBase(models.ModelBase):
if ((context and not context.is_admin and hasattr(model, 'tenant_id')
and model.tenant_id != context.tenant)):
- msg = _("Tenant %(s_tenant)s tried to access "
- "%(s_name)s, owned by %(s_owner)s.") % {
- "s_tenant": context.tenant, "s_name": cls.__name__,
+ log_fmt = ("Tenant %(s_tenant)s tried to access "
+ "%(s_name)s, owned by %(s_owner)s.")
+ exc_fmt = _("Tenant %(s_tenant)s tried to access "
+ "%(s_name)s, owned by %(s_owner)s.")
+ msg_content = {
+ "s_tenant": context.tenant,
+ "s_name": cls.__name__,
"s_owner": model.tenant_id}
-
- LOG.error(msg)
- raise exception.ModelNotFoundError(msg)
+ LOG.error(log_fmt, msg_content)
+ raise exception.ModelNotFoundError(exc_fmt % msg_content)
return model
diff --git a/trove/db/sqlalchemy/session.py b/trove/db/sqlalchemy/session.py
index 188b7d51..2253f8a5 100644
--- a/trove/db/sqlalchemy/session.py
+++ b/trove/db/sqlalchemy/session.py
@@ -85,10 +85,10 @@ def _create_facade(options):
if conf.query_log:
if conf.connection_debug < 50:
conf['connection_debug'] = 50
- LOG.warning(_('Configuration option "query_log" has been '
- 'depracated. Use "connection_debug" '
- 'instead. Setting connection_debug = '
- '%(debug_level)s instead.'),
+ LOG.warning(('Configuration option "query_log" has been '
+ 'depracated. Use "connection_debug" '
+ 'instead. Setting connection_debug = '
+ '%(debug_level)s instead.'),
conf.get('connection_debug'))
# TODO(mvandijk): once query_log is removed,
# use enginefacade.from_config() instead
@@ -103,9 +103,9 @@ def _create_facade(options):
def _check_facade():
if _FACADE is None:
- msg = _("***The Database has not been setup!!!***")
- LOG.exception(msg)
- raise RuntimeError(msg)
+ LOG.exception("***The Database has not been setup!!!***")
+ raise RuntimeError(
+ _("***The Database has not been setup!!!***"))
def get_facade():
diff --git a/trove/extensions/account/service.py b/trove/extensions/account/service.py
index 025c53cc..525b7d99 100644
--- a/trove/extensions/account/service.py
+++ b/trove/extensions/account/service.py
@@ -17,7 +17,6 @@ from oslo_log import log as logging
import trove.common.apischema as apischema
from trove.common.auth import admin_context
-from trove.common.i18n import _
from trove.common import wsgi
from trove.extensions.account import models
from trove.extensions.account import views
@@ -32,9 +31,9 @@ class AccountController(wsgi.Controller):
@admin_context
def show(self, req, tenant_id, id):
"""Return a account and instances associated with a single account."""
- LOG.info(_("req : '%s'\n\n"), req)
- LOG.info(_("Showing account information for '%(account)s' "
- "to '%(tenant)s'"), {'account': id, 'tenant': tenant_id})
+ LOG.info("req : '%s'\n\n", req)
+ LOG.info("Showing account information for '%(account)s' "
+ "to '%(tenant)s'", {'account': id, 'tenant': tenant_id})
context = req.environ[wsgi.CONTEXT_KEY]
account = models.Account.load(context, id)
@@ -43,7 +42,7 @@ class AccountController(wsgi.Controller):
@admin_context
def index(self, req, tenant_id):
"""Return a list of all accounts with non-deleted instances."""
- LOG.info(_("req : '%s'\n\n"), req)
- LOG.info(_("Showing all accounts with instances for '%s'"), tenant_id)
+ LOG.info("req : '%s'\n\n", req)
+ LOG.info("Showing all accounts with instances for '%s'", tenant_id)
accounts_summary = models.AccountsSummary.load()
return wsgi.Result(views.AccountsView(accounts_summary).data(), 200)
diff --git a/trove/extensions/common/service.py b/trove/extensions/common/service.py
index 7023f1f6..4d4629d5 100644
--- a/trove/extensions/common/service.py
+++ b/trove/extensions/common/service.py
@@ -26,7 +26,6 @@ from trove.cluster.models import DBCluster
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
-from trove.common.i18n import _LI
from trove.common import policy
from trove.common import wsgi
from trove.datastore import models as datastore_models
@@ -93,8 +92,8 @@ class DefaultRootController(BaseDatastoreRootController):
if is_cluster:
raise exception.ClusterOperationNotSupported(
operation='show_root')
- LOG.info(_LI("Getting root enabled for instance '%s'."), instance_id)
- LOG.info(_LI("req : '%s'\n\n"), req)
+ LOG.info("Getting root enabled for instance '%s'.", instance_id)
+ LOG.info("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
is_root_enabled = models.Root.load(context, instance_id)
return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200)
@@ -103,8 +102,8 @@ class DefaultRootController(BaseDatastoreRootController):
if is_cluster:
raise exception.ClusterOperationNotSupported(
operation='enable_root')
- LOG.info(_LI("Enabling root for instance '%s'."), instance_id)
- LOG.info(_LI("req : '%s'\n\n"), req)
+ LOG.info("Enabling root for instance '%s'.", instance_id)
+ LOG.info("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
user_name = context.user
password = DefaultRootController._get_password_from_body(body)
@@ -116,8 +115,8 @@ class DefaultRootController(BaseDatastoreRootController):
if is_cluster:
raise exception.ClusterOperationNotSupported(
operation='disable_root')
- LOG.info(_LI("Disabling root for instance '%s'."), instance_id)
- LOG.info(_LI("req : '%s'\n\n"), req)
+ LOG.info("Disabling root for instance '%s'.", instance_id)
+ LOG.info("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
try:
found_user = self._find_root_user(context, instance_id)
@@ -139,8 +138,8 @@ class ClusterRootController(DefaultRootController):
return self.instance_root_index(req, tenant_id, instance_id)
def instance_root_index(self, req, tenant_id, instance_id):
- LOG.info(_LI("Getting root enabled for instance '%s'."), instance_id)
- LOG.info(_LI("req : '%s'\n\n"), req)
+ LOG.info("Getting root enabled for instance '%s'.", instance_id)
+ LOG.info("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
try:
is_root_enabled = models.ClusterRoot.load(context, instance_id)
@@ -150,7 +149,7 @@ class ClusterRootController(DefaultRootController):
return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200)
def cluster_root_index(self, req, tenant_id, cluster_id):
- LOG.info(_LI("Getting root enabled for cluster '%s'."), cluster_id)
+ LOG.info("Getting root enabled for cluster '%s'.", cluster_id)
single_instance_id, cluster_instances = self._get_cluster_instance_id(
tenant_id, cluster_id)
return self.instance_root_index(req, tenant_id, single_instance_id)
@@ -173,8 +172,8 @@ class ClusterRootController(DefaultRootController):
def instance_root_create(self, req, body, instance_id,
cluster_instances=None):
- LOG.info(_LI("Enabling root for instance '%s'."), instance_id)
- LOG.info(_LI("req : '%s'\n\n"), req)
+ LOG.info("Enabling root for instance '%s'.", instance_id)
+ LOG.info("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
user_name = context.user
password = ClusterRootController._get_password_from_body(body)
@@ -183,7 +182,7 @@ class ClusterRootController(DefaultRootController):
return wsgi.Result(views.RootCreatedView(root).data(), 200)
def cluster_root_create(self, req, body, tenant_id, cluster_id):
- LOG.info(_LI("Enabling root for cluster '%s'."), cluster_id)
+ LOG.info("Enabling root for cluster '%s'.", cluster_id)
single_instance_id, cluster_instances = self._get_cluster_instance_id(
tenant_id, cluster_id)
return self.instance_root_create(req, body, single_instance_id,
diff --git a/trove/extensions/mgmt/clusters/service.py b/trove/extensions/mgmt/clusters/service.py
index a955eeef..35b2214f 100644
--- a/trove/extensions/mgmt/clusters/service.py
+++ b/trove/extensions/mgmt/clusters/service.py
@@ -41,7 +41,7 @@ class MgmtClusterController(ClusterController):
def index(self, req, tenant_id):
"""Return a list of clusters."""
LOG.debug("Showing a list of clusters for tenant '%s'.", tenant_id)
- LOG.info(_("req : '%s'\n\n"), req)
+ LOG.info("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
deleted = None
deleted_q = req.GET.get('deleted', '').lower()
@@ -56,10 +56,10 @@ class MgmtClusterController(ClusterController):
@admin_context
def show(self, req, tenant_id, id):
"""Return a single cluster."""
- LOG.info(_("Showing cluster for tenant '%(tenant_id)s'.\n"
- "req : '%(req)s'\n"
- "id : '%(id)s'"), {
- "tenant_id": tenant_id, "req": req, "id": id})
+ LOG.info("Showing cluster for tenant '%(tenant_id)s'.\n"
+ "req : '%(req)s'\n"
+ "id : '%(id)s'", {
+ "tenant_id": tenant_id, "req": req, "id": id})
context = req.environ[wsgi.CONTEXT_KEY]
cluster = models.MgmtCluster.load(context, id)
@@ -72,7 +72,7 @@ class MgmtClusterController(ClusterController):
LOG.debug("Committing an action against cluster %(cluster)s for "
"tenant '%(tenant)s'.", {'cluster': id,
'tenant': tenant_id})
- LOG.info(_("req : '%s'\n\n"), req)
+ LOG.info("req : '%s'\n\n", req)
if not body:
raise exception.BadRequest(_("Invalid request body."))
context = req.environ[wsgi.CONTEXT_KEY]
diff --git a/trove/extensions/mgmt/configuration/service.py b/trove/extensions/mgmt/configuration/service.py
index c4d84052..85de2c8b 100644
--- a/trove/extensions/mgmt/configuration/service.py
+++ b/trove/extensions/mgmt/configuration/service.py
@@ -74,7 +74,7 @@ class ConfigurationsParameterController(wsgi.Controller):
@admin_context
def create(self, req, body, tenant_id, version_id):
"""Create configuration parameter for datastore version."""
- LOG.info(_("Creating configuration parameter for datastore"))
+ LOG.info("Creating configuration parameter for datastore")
LOG.debug("req : '%s'\n\n", req)
LOG.debug("body : '%s'\n\n", body)
if not body:
@@ -101,7 +101,7 @@ class ConfigurationsParameterController(wsgi.Controller):
@admin_context
def update(self, req, body, tenant_id, version_id, id):
"""Updating configuration parameter for datastore version."""
- LOG.info(_("Updating configuration parameter for datastore"))
+ LOG.info("Updating configuration parameter for datastore")
LOG.debug("req : '%s'\n\n", req)
LOG.debug("body : '%s'\n\n", body)
if not body:
@@ -126,7 +126,7 @@ class ConfigurationsParameterController(wsgi.Controller):
@admin_context
def delete(self, req, tenant_id, version_id, id):
"""Delete configuration parameter for datastore version."""
- LOG.info(_("Deleting configuration parameter for datastore"))
+ LOG.info("Deleting configuration parameter for datastore")
LOG.debug("req : '%s'\n\n", req)
ds_config_params = config_models.DatastoreConfigurationParameters
try:
diff --git a/trove/extensions/mgmt/datastores/service.py b/trove/extensions/mgmt/datastores/service.py
index 23643ae0..0f09dfcd 100644
--- a/trove/extensions/mgmt/datastores/service.py
+++ b/trove/extensions/mgmt/datastores/service.py
@@ -20,7 +20,6 @@ from trove.common import apischema
from trove.common.auth import admin_context
from trove.common import exception
from trove.common import glance_remote
-from trove.common.i18n import _
from trove.common import utils
from trove.common import wsgi
from trove.datastore import models
@@ -48,8 +47,8 @@ class DatastoreVersionController(wsgi.Controller):
active = body['version']['active']
default = body['version']['default']
- LOG.info(_("Tenant: '%(tenant)s' is adding the datastore "
- "version: '%(version)s' to datastore: '%(datastore)s'"),
+ LOG.info("Tenant: '%(tenant)s' is adding the datastore "
+ "version: '%(version)s' to datastore: '%(datastore)s'",
{'tenant': tenant_id, 'version': version_name,
'datastore': datastore_name})
@@ -63,7 +62,7 @@ class DatastoreVersionController(wsgi.Controller):
datastore = models.Datastore.load(datastore_name)
except exception.DatastoreNotFound:
# Create the datastore if datastore_name does not exists.
- LOG.info(_("Creating datastore %s"), datastore_name)
+ LOG.info("Creating datastore %s", datastore_name)
datastore = models.DBDatastore()
datastore.id = utils.generate_uuid()
datastore.name = datastore_name
@@ -106,8 +105,8 @@ class DatastoreVersionController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
datastore_version = models.DatastoreVersion.load_by_uuid(id)
- LOG.info(_("Tenant: '%(tenant)s' is updating the datastore "
- "version: '%(version)s' for datastore: '%(datastore)s'"),
+ LOG.info("Tenant: '%(tenant)s' is updating the datastore "
+ "version: '%(version)s' for datastore: '%(datastore)s'",
{'tenant': tenant_id, 'version': datastore_version.name,
'datastore': datastore_version.datastore_name})
@@ -144,8 +143,8 @@ class DatastoreVersionController(wsgi.Controller):
datastore_version = models.DatastoreVersion.load_by_uuid(id)
datastore = models.Datastore.load(datastore_version.datastore_id)
- LOG.info(_("Tenant: '%(tenant)s' is removing the datastore "
- "version: '%(version)s' for datastore: '%(datastore)s'"),
+ LOG.info("Tenant: '%(tenant)s' is removing the datastore "
+ "version: '%(version)s' for datastore: '%(datastore)s'",
{'tenant': tenant_id, 'version': datastore_version.name,
'datastore': datastore.name})
diff --git a/trove/extensions/mgmt/host/instance/service.py b/trove/extensions/mgmt/host/instance/service.py
index b923dc0b..ff29fffb 100644
--- a/trove/extensions/mgmt/host/instance/service.py
+++ b/trove/extensions/mgmt/host/instance/service.py
@@ -28,10 +28,10 @@ class HostInstanceController(wsgi.Controller):
"""Controller for all instances on specific hosts."""
def action(self, req, body, tenant_id, host_id):
- LOG.info(_("Committing an ACTION against host %(host_id)s for "
- "tenant '%(tenant_id)s'\n"
- "req : '%(req)s'\n\n"), {"req": req, "host_id": host_id,
- "tenant_id": tenant_id})
+ LOG.info("Committing an ACTION against host %(host_id)s for "
+ "tenant '%(tenant_id)s'\n"
+ "req : '%(req)s'\n\n", {"req": req, "host_id": host_id,
+ "tenant_id": tenant_id})
if not body:
raise exception.BadRequest(_("Invalid request body."))
diff --git a/trove/extensions/mgmt/host/models.py b/trove/extensions/mgmt/host/models.py
index 8778c5ac..e26ac4fe 100644
--- a/trove/extensions/mgmt/host/models.py
+++ b/trove/extensions/mgmt/host/models.py
@@ -72,8 +72,8 @@ class DetailedHost(object):
instance['status'] = instance_info.status
except exception.TroveError as re:
LOG.error(re)
- LOG.error(_("Compute Instance ID found with no associated RD "
- "instance: %s."), instance['server_id'])
+ LOG.error("Compute Instance ID found with no associated RD "
+ "instance: %s.", instance['server_id'])
instance['id'] = None
def update_all(self, context):
@@ -87,7 +87,7 @@ class DetailedHost(object):
client.update_guest()
except exception.TroveError as re:
LOG.error(re)
- LOG.error(_("Unable to update instance: %s."), instance['id'])
+ LOG.error("Unable to update instance: %s.", instance['id'])
failed_instances.append(instance['id'])
if len(failed_instances) > 0:
msg = _("Failed to update instances: %s.") % failed_instances
diff --git a/trove/extensions/mgmt/host/service.py b/trove/extensions/mgmt/host/service.py
index 15b21278..9e0d0bf2 100644
--- a/trove/extensions/mgmt/host/service.py
+++ b/trove/extensions/mgmt/host/service.py
@@ -16,7 +16,6 @@
from oslo_log import log as logging
from trove.common.auth import admin_context
-from trove.common.i18n import _
from trove.common import wsgi
from trove.extensions.mgmt.host import models
from trove.extensions.mgmt.host import views
@@ -31,8 +30,8 @@ class HostController(InstanceController):
@admin_context
def index(self, req, tenant_id, detailed=False):
"""Return all hosts."""
- LOG.info(_("req : '%s'\n\n"), req)
- LOG.info(_("Indexing a host for tenant '%s'"), tenant_id)
+ LOG.info("req : '%s'\n\n", req)
+ LOG.info("Indexing a host for tenant '%s'", tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
hosts = models.SimpleHost.load_all(context)
return wsgi.Result(views.HostsView(hosts).data(), 200)
@@ -40,9 +39,9 @@ class HostController(InstanceController):
@admin_context
def show(self, req, tenant_id, id):
"""Return a single host."""
- LOG.info(_("req : '%s'\n\n"), req)
- LOG.info(_("Showing a host for tenant '%s'"), tenant_id)
- LOG.info(_("id : '%s'\n\n"), id)
+ LOG.info("req : '%s'\n\n", req)
+ LOG.info("Showing a host for tenant '%s'", tenant_id)
+ LOG.info("id : '%s'\n\n", id)
context = req.environ[wsgi.CONTEXT_KEY]
host = models.DetailedHost.load(context, id)
return wsgi.Result(views.HostDetailedView(host).data(), 200)
diff --git a/trove/extensions/mgmt/instances/models.py b/trove/extensions/mgmt/instances/models.py
index 64da1a1e..605704b5 100644
--- a/trove/extensions/mgmt/instances/models.py
+++ b/trove/extensions/mgmt/instances/models.py
@@ -36,7 +36,7 @@ def load_mgmt_instances(context, deleted=None, client=None,
mgmt_servers = client.rdservers.list()
except AttributeError:
mgmt_servers = client.servers.list(search_opts={'all_tenants': 1})
- LOG.info(_("Found %d servers in Nova"),
+ LOG.info("Found %d servers in Nova",
len(mgmt_servers if mgmt_servers else []))
args = {}
if deleted is not None:
@@ -198,7 +198,7 @@ class NotificationTransformer(object):
datastore_manager_id = id_map[datastore_manager]
else:
datastore_manager_id = cfg.UNKNOWN_SERVICE_ID
- LOG.error(_("Datastore ID for Manager (%s) is not configured"),
+ LOG.error("Datastore ID for Manager (%s) is not configured",
datastore_manager)
return datastore_manager_id
@@ -257,7 +257,7 @@ class NovaNotificationTransformer(NotificationTransformer):
LOG.debug("Flavor cache hit for %s", flavor_id)
return self._flavor_cache[flavor_id]
# fetch flavor resource from nova
- LOG.info(_("Flavor cache miss for %s"), flavor_id)
+ LOG.info("Flavor cache miss for %s", flavor_id)
flavor = self.nova_client.flavors.get(flavor_id)
self._flavor_cache[flavor_id] = flavor.name if flavor else 'unknown'
return self._flavor_cache[flavor_id]
diff --git a/trove/extensions/mgmt/instances/service.py b/trove/extensions/mgmt/instances/service.py
index 9e3fae6e..914dc9a6 100644
--- a/trove/extensions/mgmt/instances/service.py
+++ b/trove/extensions/mgmt/instances/service.py
@@ -49,9 +49,9 @@ class MgmtInstanceController(InstanceController):
@admin_context
def index(self, req, tenant_id, detailed=False):
"""Return all instances."""
- LOG.info(_("Indexing a database instance for tenant '%(tenant_id)s'\n"
- "req : '%(req)s'\n\n"), {
- "tenant_id": tenant_id, "req": req})
+ LOG.info("Indexing a database instance for tenant '%(tenant_id)s'\n"
+ "req : '%(req)s'\n\n", {
+ "tenant_id": tenant_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
deleted = None
deleted_q = req.GET.get('deleted', '').lower()
@@ -74,10 +74,10 @@ class MgmtInstanceController(InstanceController):
@admin_context
def show(self, req, tenant_id, id):
"""Return a single instance."""
- LOG.info(_("Showing a database instance %(id)s for tenant "
- "'%(tenant_id)s'\n"
- "req : '%(req)s'\n\n"), {
- "tenant_id": tenant_id, "req": req, "id": id})
+ LOG.info("Showing a database instance %(id)s for tenant "
+ "'%(tenant_id)s'\n"
+ "req : '%(req)s'\n\n", {
+ "tenant_id": tenant_id, "req": req, "id": id})
context = req.environ[wsgi.CONTEXT_KEY]
deleted_q = req.GET.get('deleted', '').lower()
include_deleted = deleted_q == 'true'
@@ -94,10 +94,10 @@ class MgmtInstanceController(InstanceController):
@admin_context
def action(self, req, body, tenant_id, id):
- LOG.info(_("Committing an ACTION against a database "
- "instance %(id)s for tenant '%(tenant_id)s'\n"
- "req : '%(req)s'\n\n"), {
- "tenant_id": tenant_id, "req": req, "id": id})
+ LOG.info("Committing an ACTION against a database "
+ "instance %(id)s for tenant '%(tenant_id)s'\n"
+ "req : '%(req)s'\n\n", {
+ "tenant_id": tenant_id, "req": req, "id": id})
if not body:
raise exception.BadRequest(_("Invalid request body."))
context = req.environ[wsgi.CONTEXT_KEY]
@@ -159,10 +159,10 @@ class MgmtInstanceController(InstanceController):
"""Return the date and time root was enabled on an instance,
if ever.
"""
- LOG.info(_("Showing root history for a database "
- "instance %(id)s for tenant '%(tenant_id)s'\n"
- "req : '%(req)s'\n\n"), {
- "tenant_id": tenant_id, "req": req, "id": id})
+ LOG.info("Showing root history for a database "
+ "instance %(id)s for tenant '%(tenant_id)s'\n"
+ "req : '%(req)s'\n\n", {
+ "tenant_id": tenant_id, "req": req, "id": id})
context = req.environ[wsgi.CONTEXT_KEY]
try:
@@ -180,10 +180,10 @@ class MgmtInstanceController(InstanceController):
@admin_context
def hwinfo(self, req, tenant_id, id):
"""Return a single instance hardware info."""
- LOG.info(_("Showing hardware info for a database "
- "instance %(id)s for tenant '%(tenant_id)s'\n"
- "req : '%(req)s'\n\n"), {
- "tenant_id": tenant_id, "req": req, "id": id})
+ LOG.info("Showing hardware info for a database "
+ "instance %(id)s for tenant '%(tenant_id)s'\n"
+ "req : '%(req)s'\n\n", {
+ "tenant_id": tenant_id, "req": req, "id": id})
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
@@ -194,10 +194,10 @@ class MgmtInstanceController(InstanceController):
@admin_context
def diagnostics(self, req, tenant_id, id):
"""Return instance diagnostics for a single instance."""
- LOG.info(_("Showing diagnostic info for a database "
- "instance %(id)s for tenant '%(tenant_id)s'\n"
- "req : '%(req)s'\n\n"), {
- "tenant_id": tenant_id, "req": req, "id": id})
+ LOG.info("Showing diagnostic info for a database "
+ "instance %(id)s for tenant '%(tenant_id)s'\n"
+ "req : '%(req)s'\n\n", {
+ "tenant_id": tenant_id, "req": req, "id": id})
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
@@ -208,10 +208,10 @@ class MgmtInstanceController(InstanceController):
@admin_context
def rpc_ping(self, req, tenant_id, id):
"""Checks if instance is reachable via rpc."""
- LOG.info(_("Sending RPC PING for a database "
- "instance %(id)s for tenant '%(tenant_id)s'\n"
- "req : '%(req)s'\n\n"), {
- "tenant_id": tenant_id, "req": req, "id": id})
+ LOG.info("Sending RPC PING for a database "
+ "instance %(id)s for tenant '%(tenant_id)s'\n"
+ "req : '%(req)s'\n\n", {
+ "tenant_id": tenant_id, "req": req, "id": id})
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.MgmtInstance.load(context=context, id=id)
diff --git a/trove/extensions/mgmt/quota/service.py b/trove/extensions/mgmt/quota/service.py
index b61f4fb2..1879f165 100644
--- a/trove/extensions/mgmt/quota/service.py
+++ b/trove/extensions/mgmt/quota/service.py
@@ -32,8 +32,8 @@ class QuotaController(wsgi.Controller):
@admin_context
def show(self, req, tenant_id, id):
"""Return all quotas for this tenant."""
- LOG.info(_("Indexing quota info for tenant '%(id)s'\n"
- "req : '%(req)s'\n\n"), {"id": id, "req": req})
+ LOG.info("Indexing quota info for tenant '%(id)s'\n"
+ "req : '%(req)s'\n\n", {"id": id, "req": req})
usages = quota_engine.get_all_quota_usages_by_tenant(id)
limits = quota_engine.get_all_quotas_by_tenant(id)
@@ -43,8 +43,8 @@ class QuotaController(wsgi.Controller):
@admin_context
def update(self, req, body, tenant_id, id):
- LOG.info(_("Updating quota limits for tenant '%(id)s'\n"
- "req : '%(req)s'\n\n"), {"id": id, "req": req})
+ LOG.info("Updating quota limits for tenant '%(id)s'\n"
+ "req : '%(req)s'\n\n", {"id": id, "req": req})
if not body:
raise exception.BadRequest(_("Invalid request body."))
diff --git a/trove/extensions/mgmt/upgrade/service.py b/trove/extensions/mgmt/upgrade/service.py
index d4eba194..036d9d3c 100644
--- a/trove/extensions/mgmt/upgrade/service.py
+++ b/trove/extensions/mgmt/upgrade/service.py
@@ -17,7 +17,6 @@ from oslo_log import log as logging
import trove.common.apischema as apischema
from trove.common.auth import admin_context
-from trove.common.i18n import _
from trove.common import wsgi
from trove.extensions.mgmt.upgrade.models import UpgradeMessageSender
@@ -33,8 +32,8 @@ class UpgradeController(wsgi.Controller):
@admin_context
def create(self, req, body, tenant_id, instance_id):
- LOG.info(_("Sending upgrade notifications\nreq : '%(req)s'\n"
- "Admin tenant_id: %(tenant_id)s"),
+ LOG.info("Sending upgrade notifications\nreq : '%(req)s'\n"
+ "Admin tenant_id: %(tenant_id)s",
{"tenant_id": tenant_id, "req": req})
context = req.environ.get(wsgi.CONTEXT_KEY)
diff --git a/trove/extensions/mgmt/volume/service.py b/trove/extensions/mgmt/volume/service.py
index 2761e8e2..a756258b 100644
--- a/trove/extensions/mgmt/volume/service.py
+++ b/trove/extensions/mgmt/volume/service.py
@@ -18,7 +18,6 @@ from oslo_log import log as logging
from trove.common.auth import admin_context
from trove.common import cfg
-from trove.common.i18n import _
from trove.common import wsgi
from trove.extensions.mgmt.volume import models
from trove.extensions.mgmt.volume import views
@@ -33,8 +32,8 @@ class StorageController(wsgi.Controller):
@admin_context
def index(self, req, tenant_id):
"""Return all storage devices."""
- LOG.info(_("req : '%s'\n\n"), req)
- LOG.info(_("Indexing storage info for tenant '%s'"), tenant_id)
+ LOG.info("req : '%s'\n\n", req)
+ LOG.info("Indexing storage info for tenant '%s'", tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
storages = models.StorageDevices.load(context, CONF.os_region_name)
return wsgi.Result(views.StoragesView(storages).data(), 200)
diff --git a/trove/extensions/mysql/service.py b/trove/extensions/mysql/service.py
index b5784a02..306ebd6f 100644
--- a/trove/extensions/mysql/service.py
+++ b/trove/extensions/mysql/service.py
@@ -57,8 +57,8 @@ class UserController(ExtensionController):
def index(self, req, tenant_id, instance_id):
"""Return all users."""
- LOG.info(_("Listing users for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Listing users for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
self.authorize_target_action(context, 'user:index', instance_id)
@@ -70,9 +70,9 @@ class UserController(ExtensionController):
def create(self, req, body, tenant_id, instance_id):
"""Creates a set of users."""
- LOG.info(_("Creating users for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"
- "body: '%(body)s'\n'n"),
+ LOG.info("Creating users for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n"
+ "body: '%(body)s'\n'n",
{"id": instance_id,
"req": strutils.mask_password(req),
"body": strutils.mask_password(body)})
@@ -93,8 +93,8 @@ class UserController(ExtensionController):
return wsgi.Result(None, 202)
def delete(self, req, tenant_id, instance_id, id):
- LOG.info(_("Delete instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Delete instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
self.authorize_target_action(context, 'user:delete', instance_id)
@@ -122,8 +122,8 @@ class UserController(ExtensionController):
def show(self, req, tenant_id, instance_id, id):
"""Return a single user."""
- LOG.info(_("Showing a user for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Showing a user for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
self.authorize_target_action(context, 'user:show', instance_id)
@@ -142,8 +142,8 @@ class UserController(ExtensionController):
def update(self, req, body, tenant_id, instance_id, id):
"""Change attributes for one user."""
- LOG.info(_("Updating user attributes for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Updating user attributes for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": strutils.mask_password(req)})
context = req.environ[wsgi.CONTEXT_KEY]
self.authorize_target_action(context, 'user:update', instance_id)
@@ -173,8 +173,8 @@ class UserController(ExtensionController):
def update_all(self, req, body, tenant_id, instance_id):
"""Change the password of one or more users."""
- LOG.info(_("Updating user password for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Updating user password for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": strutils.mask_password(req)})
context = req.environ[wsgi.CONTEXT_KEY]
self.authorize_target_action(context, 'user:update_all', instance_id)
@@ -234,8 +234,8 @@ class UserAccessController(ExtensionController):
def index(self, req, tenant_id, instance_id, user_id):
"""Show permissions for the given user."""
- LOG.info(_("Showing user access for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Showing user access for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
@@ -245,7 +245,7 @@ class UserAccessController(ExtensionController):
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
if not user:
- LOG.error(_("No such user: %(user)s "), {'user': user})
+ LOG.error("No such user: %(user)s ", {'user': user})
raise exception.UserNotFound(uuid=user)
username, hostname = unquote_user_host(user_id)
access = models.User.access(context, instance_id, username, hostname)
@@ -254,8 +254,8 @@ class UserAccessController(ExtensionController):
def update(self, req, body, tenant_id, instance_id, user_id):
"""Grant access for a user to one or more databases."""
- LOG.info(_("Granting user access for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Granting user access for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
self.authorize_target_action(
@@ -265,7 +265,7 @@ class UserAccessController(ExtensionController):
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
if not user:
- LOG.error(_("No such user: %(user)s "), {'user': user})
+ LOG.error("No such user: %(user)s ", {'user': user})
raise exception.UserNotFound(uuid=user)
username, hostname = unquote_user_host(user_id)
databases = [db['name'] for db in body['databases']]
@@ -277,8 +277,8 @@ class UserAccessController(ExtensionController):
def delete(self, req, tenant_id, instance_id, user_id, id):
"""Revoke access for a user."""
- LOG.info(_("Revoking user access for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Revoking user access for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
self.authorize_target_action(
@@ -288,7 +288,7 @@ class UserAccessController(ExtensionController):
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
if not user:
- LOG.error(_("No such user: %(user)s "), {'user': user})
+ LOG.error("No such user: %(user)s ", {'user': user})
raise exception.UserNotFound(uuid=user)
username, hostname = unquote_user_host(user_id)
access = models.User.access(context, instance_id, username, hostname)
@@ -307,8 +307,8 @@ class SchemaController(ExtensionController):
def index(self, req, tenant_id, instance_id):
"""Return all schemas."""
- LOG.info(_("Listing schemas for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Listing schemas for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
@@ -322,9 +322,9 @@ class SchemaController(ExtensionController):
def create(self, req, body, tenant_id, instance_id):
"""Creates a set of schemas."""
- LOG.info(_("Creating schema for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"
- "body: '%(body)s'\n'n"),
+ LOG.info("Creating schema for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n"
+ "body: '%(body)s'\n'n",
{"id": instance_id,
"req": req,
"body": body})
@@ -347,8 +347,8 @@ class SchemaController(ExtensionController):
return wsgi.Result(None, 202)
def delete(self, req, tenant_id, instance_id, id):
- LOG.info(_("Deleting schema for instance '%(id)s'\n"
- "req : '%(req)s'\n\n"),
+ LOG.info("Deleting schema for instance '%(id)s'\n"
+ "req : '%(req)s'\n\n",
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
self.authorize_target_action(
diff --git a/trove/extensions/redis/service.py b/trove/extensions/redis/service.py
index 6788a3de..8bee2d4d 100644
--- a/trove/extensions/redis/service.py
+++ b/trove/extensions/redis/service.py
@@ -18,8 +18,6 @@ from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
-from trove.common.i18n import _LE
-from trove.common.i18n import _LI
from trove.common import wsgi
from trove.extensions.common.service import DefaultRootController
from trove.extensions.redis.models import RedisRoot
@@ -52,9 +50,9 @@ class RedisRootController(DefaultRootController):
def _instance_root_create(self, req, instance_id, password,
slave_instances=None):
- LOG.info(_LI("Enabling authentication for instance '%s'."),
+ LOG.info("Enabling authentication for instance '%s'.",
instance_id)
- LOG.info(_LI("req : '%s'\n\n"), req)
+ LOG.info("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
user_name = context.user
@@ -70,15 +68,15 @@ class RedisRootController(DefaultRootController):
except exception.TroveError:
self._rollback_once(req, instance_id, original_auth_password)
raise exception.TroveError(
- _LE("Failed to do root-enable for instance "
- "'%(instance_id)s'.") % {'instance_id': instance_id}
+ _("Failed to do root-enable for instance "
+ "'%(instance_id)s'.") % {'instance_id': instance_id}
)
failed_slaves = []
for slave_id in slave_instances:
try:
- LOG.info(_LI("Enabling authentication for slave instance "
- "'%s'."), slave_id)
+ LOG.info("Enabling authentication for slave instance "
+ "'%s'.", slave_id)
RedisRoot.create(context, slave_id, user_name, password)
except exception.TroveError:
failed_slaves.append(slave_id)
@@ -87,9 +85,9 @@ class RedisRootController(DefaultRootController):
RedisRootCreatedView(root, failed_slaves).data(), 200)
def _instance_root_delete(self, req, instance_id, slave_instances=None):
- LOG.info(_LI("Disabling authentication for instance '%s'."),
+ LOG.info("Disabling authentication for instance '%s'.",
instance_id)
- LOG.info(_LI("req : '%s'\n\n"), req)
+ LOG.info("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
original_auth_password = self._get_original_auth_password(
@@ -101,15 +99,15 @@ class RedisRootController(DefaultRootController):
except exception.TroveError:
self._rollback_once(req, instance_id, original_auth_password)
raise exception.TroveError(
- _LE("Failed to do root-disable for instance "
- "'%(instance_id)s'.") % {'instance_id': instance_id}
+ _("Failed to do root-disable for instance "
+ "'%(instance_id)s'.") % {'instance_id': instance_id}
)
failed_slaves = []
for slave_id in slave_instances:
try:
- LOG.info(_LI("Disabling authentication for slave instance "
- "'%s'."), slave_id)
+ LOG.info("Disabling authentication for slave instance "
+ "'%s'.", slave_id)
RedisRoot.delete(context, slave_id)
except exception.TroveError:
failed_slaves.append(slave_id)
@@ -124,8 +122,8 @@ class RedisRootController(DefaultRootController):
@staticmethod
def _rollback_once(req, instance_id, original_auth_password):
- LOG.info(_LI("Rolling back enable/disable authentication "
- "for instance '%s'."), instance_id)
+ LOG.info("Rolling back enable/disable authentication "
+ "for instance '%s'.", instance_id)
context = req.environ[wsgi.CONTEXT_KEY]
user_name = context.user
try:
@@ -138,7 +136,7 @@ class RedisRootController(DefaultRootController):
RedisRoot.create(context, instance_id, user_name,
original_auth_password)
except exception.TroveError:
- LOG.exception(_("Rolling back failed for instance '%s'"),
+ LOG.exception("Rolling back failed for instance '%s'",
instance_id)
@staticmethod
@@ -149,8 +147,8 @@ class RedisRootController(DefaultRootController):
@staticmethod
def _get_slaves(tenant_id, instance_or_cluster_id, deleted=False):
- LOG.info(_LI("Getting non-deleted slaves of instance '%s', "
- "if any."), instance_or_cluster_id)
+ LOG.info("Getting non-deleted slaves of instance '%s', "
+ "if any.", instance_or_cluster_id)
args = {'slave_of_id': instance_or_cluster_id, 'tenant_id': tenant_id,
'deleted': deleted}
db_infos = DBInstance.find_all(**args)
@@ -168,8 +166,8 @@ class RedisRootController(DefaultRootController):
password = RedisRoot.get_auth_password(context, instance_id)
except exception.TroveError:
raise exception.TroveError(
- _LE("Failed to get original auth password of instance "
- "'%(instance_id)s'.") % {'instance_id': instance_id}
+ _("Failed to get original auth password of instance "
+ "'%(instance_id)s'.") % {'instance_id': instance_id}
)
return password
diff --git a/trove/extensions/security_group/models.py b/trove/extensions/security_group/models.py
index 5e191437..ac8c327c 100644
--- a/trove/extensions/security_group/models.py
+++ b/trove/extensions/security_group/models.py
@@ -66,7 +66,7 @@ class SecurityGroup(DatabaseModelBase):
tenant_id=context.tenant)
except exception.SecurityGroupCreationError:
- LOG.exception(_("Failed to create remote security group."))
+ LOG.exception("Failed to create remote security group.")
raise
@classmethod
@@ -112,7 +112,7 @@ class SecurityGroup(DatabaseModelBase):
super(SecurityGroup, self).delete()
except exception.TroveError:
- LOG.exception(_('Failed to delete security group.'))
+ LOG.exception('Failed to delete security group.')
raise exception.TroveError("Failed to delete Security Group")
@classmethod
@@ -128,8 +128,8 @@ class SecurityGroup(DatabaseModelBase):
association.delete()
except (exception.ModelNotFoundError,
exception.TroveError):
- LOG.info(_('Security Group with id: %(id)s '
- 'already had been deleted'),
+ LOG.info('Security Group with id: %(id)s '
+ 'already had been deleted',
{'id': instance_id})
@@ -165,7 +165,7 @@ class SecurityGroupRule(DatabaseModelBase):
group_id=sec_group['id'])
except exception.SecurityGroupRuleCreationError:
- LOG.exception(_("Failed to create remote security group rule."))
+ LOG.exception("Failed to create remote security group rule.")
raise
def get_security_group(self, tenant_id):
@@ -179,7 +179,7 @@ class SecurityGroupRule(DatabaseModelBase):
RemoteSecurityGroup.delete_rule(self.id, context, region_name)
super(SecurityGroupRule, self).delete()
except exception.TroveError:
- LOG.exception(_('Failed to delete remote security group rule.'))
+ LOG.exception('Failed to delete remote security group rule.')
raise exception.SecurityGroupRuleDeletionError(
"Failed to delete Remote Security Group Rule")
diff --git a/trove/extensions/security_group/service.py b/trove/extensions/security_group/service.py
index 99073dc4..05100f12 100644
--- a/trove/extensions/security_group/service.py
+++ b/trove/extensions/security_group/service.py
@@ -18,7 +18,6 @@ from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
-from trove.common.i18n import _
from trove.common import wsgi
from trove.datastore.models import DatastoreVersion
from trove.extensions.security_group import models
@@ -74,8 +73,8 @@ class SecurityGroupRuleController(wsgi.Controller):
sec_group = sec_group_rule.get_security_group(tenant_id)
if sec_group is None:
- LOG.error(_("Attempting to delete Group Rule that does not "
- "exist or does not belong to tenant %s"), tenant_id)
+ LOG.error("Attempting to delete Group Rule that does not "
+ "exist or does not belong to tenant %s", tenant_id)
raise exception.Forbidden("Unauthorized")
sec_group_rule.delete(context, CONF.os_region_name)
@@ -130,8 +129,8 @@ class SecurityGroupRuleController(wsgi.Controller):
body['security_group_rule']['group_id']
body['security_group_rule']['cidr']
except KeyError as e:
- LOG.error(_("Create Security Group Rules Required field(s) "
- "- %s"), e)
+ LOG.error("Create Security Group Rules Required field(s) "
+ "- %s", e)
raise exception.SecurityGroupRuleCreationError(
"Required element/key - %s was not specified" % e)
diff --git a/trove/guestagent/api.py b/trove/guestagent/api.py
index a654297d..b0619813 100644
--- a/trove/guestagent/api.py
+++ b/trove/guestagent/api.py
@@ -24,7 +24,6 @@ from oslo_messaging.rpc.client import RemoteError
from trove.common import cfg
from trove.common import exception
-from trove.common.i18n import _
from trove.common.notification import NotificationCastWrapper
from trove import rpc
@@ -93,10 +92,10 @@ class API(object):
LOG.debug("Result is %s.", result)
return result
except RemoteError as r:
- LOG.exception(_("Error calling %s"), method_name)
+ LOG.exception("Error calling %s", method_name)
raise exception.GuestError(original_message=r.value)
except Exception as e:
- LOG.exception(_("Error calling %s"), method_name)
+ LOG.exception("Error calling %s", method_name)
raise exception.GuestError(original_message=str(e))
except Timeout:
raise exception.GuestTimeout()
@@ -108,10 +107,10 @@ class API(object):
cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, method_name, **kwargs)
except RemoteError as r:
- LOG.exception(_("Error calling %s"), method_name)
+ LOG.exception("Error calling %s", method_name)
raise exception.GuestError(original_message=r.value)
except Exception as e:
- LOG.exception(_("Error calling %s"), method_name)
+ LOG.exception("Error calling %s", method_name)
raise exception.GuestError(original_message=str(e))
def _get_routing_key(self):
diff --git a/trove/guestagent/backup/backupagent.py b/trove/guestagent/backup/backupagent.py
index 9d13e746..8220abc8 100644
--- a/trove/guestagent/backup/backupagent.py
+++ b/trove/guestagent/backup/backupagent.py
@@ -114,11 +114,11 @@ class BackupAgent(object):
except Exception:
LOG.exception(
- _("Error saving backup: %(backup_id)s."), backup_state)
+ "Error saving backup: %(backup_id)s.", backup_state)
backup_state.update({'state': BackupState.FAILED})
raise
finally:
- LOG.info(_("Completed backup %(backup_id)s."), backup_state)
+ LOG.info("Completed backup %(backup_id)s.", backup_state)
conductor.update_backup(CONF.guest_id,
sent=timeutils.utcnow_ts(
microsecond=True),
@@ -176,7 +176,7 @@ class BackupAgent(object):
LOG.debug("Restore size: %s.", content_size)
except Exception:
- LOG.exception(_("Error restoring backup %(id)s."), backup_info)
+ LOG.exception("Error restoring backup %(id)s.", backup_info)
raise
else:
diff --git a/trove/guestagent/datastore/experimental/cassandra/manager.py b/trove/guestagent/datastore/experimental/cassandra/manager.py
index f584e16a..80ddcc56 100644
--- a/trove/guestagent/datastore/experimental/cassandra/manager.py
+++ b/trove/guestagent/datastore/experimental/cassandra/manager.py
@@ -19,7 +19,6 @@ import os
from oslo_log import log as logging
from trove.common import cfg
-from trove.common.i18n import _
from trove.common import instance as trove_instance
from trove.common.notification import EndNotification
from trove.guestagent import backup
@@ -239,17 +238,17 @@ class Manager(manager.Manager):
return self.app.is_root_enabled()
def _perform_restore(self, backup_info, context, restore_location):
- LOG.info(_("Restoring database from backup %s."), backup_info['id'])
+ LOG.info("Restoring database from backup %s.", backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
self.app._apply_post_restore_updates(backup_info)
except Exception as e:
LOG.error(e)
- LOG.error(_("Error performing restore from backup %s."),
+ LOG.error("Error performing restore from backup %s.",
backup_info['id'])
self.app.status.set_status(trove_instance.ServiceStatuses.FAILED)
raise
- LOG.info(_("Restored database successfully."))
+ LOG.info("Restored database successfully.")
def create_backup(self, context, backup_info):
"""
diff --git a/trove/guestagent/datastore/experimental/cassandra/service.py b/trove/guestagent/datastore/experimental/cassandra/service.py
index d366d272..66a73c07 100644
--- a/trove/guestagent/datastore/experimental/cassandra/service.py
+++ b/trove/guestagent/datastore/experimental/cassandra/service.py
@@ -159,7 +159,7 @@ class CassandraApp(object):
def install_if_needed(self, packages):
"""Prepare the guest machine with a Cassandra server installation."""
- LOG.info(_("Preparing Guest as a Cassandra Server"))
+ LOG.info("Preparing Guest as a Cassandra Server")
if not packager.pkg_is_installed(packages):
self._install_db(packages)
LOG.debug("Cassandra install_if_needed complete")
@@ -168,7 +168,7 @@ class CassandraApp(object):
try:
operating_system.create_directory(mount_point, as_root=True)
except exception.ProcessExecutionError:
- LOG.exception(_("Error while initiating storage structure."))
+ LOG.exception("Error while initiating storage structure.")
def start_db(self, update_db=False, enable_on_boot=True):
self.status.start_db_service(
@@ -208,7 +208,7 @@ class CassandraApp(object):
raise RuntimeError(_("Cannot remove system tables. "
"The service is still running."))
- LOG.info(_('Removing existing system tables.'))
+ LOG.info('Removing existing system tables.')
system_keyspace_dir = guestagent_utils.build_file_path(
self.cassandra_data_dir, 'system')
commitlog_file = guestagent_utils.build_file_path(
@@ -295,7 +295,7 @@ class CassandraApp(object):
Create a new one using the default database credentials
otherwise and drop the built-in user when finished.
"""
- LOG.info(_('Configuring Trove superuser.'))
+ LOG.info('Configuring Trove superuser.')
if password is None:
password = utils.generate_random_password()
@@ -443,8 +443,8 @@ class CassandraApp(object):
return self._load_current_superuser()
LOG.warning(
- _("Trove administrative user has not been configured yet. "
- "Using the built-in default: %s"),
+ "Trove administrative user has not been configured yet. "
+ "Using the built-in default: %s",
models.CassandraUser.root_username)
return models.CassandraUser(models.CassandraUser.root_username,
self.default_superuser_password)
@@ -560,7 +560,7 @@ class CassandraApp(object):
self.configuration_manager.remove_user_override()
def write_cluster_topology(self, data_center, rack, prefer_local=True):
- LOG.info(_('Saving Cassandra cluster topology configuration.'))
+ LOG.info('Saving Cassandra cluster topology configuration.')
config = {'dc': data_center,
'rack': rack,
@@ -664,7 +664,7 @@ class CassandraApp(object):
self._run_nodetool_command('cleanup')
self.status.set_status(rd_instance.ServiceStatuses.RUNNING)
except Exception:
- LOG.exception(_("The node failed to complete its cleanup."))
+ LOG.exception("The node failed to complete its cleanup.")
finally:
self.status.end_restart()
@@ -684,7 +684,7 @@ class CassandraApp(object):
try:
self._run_nodetool_command('decommission')
except Exception:
- LOG.exception(_("The node failed to decommission itself."))
+ LOG.exception("The node failed to decommission itself.")
self.status.set_status(rd_instance.ServiceStatuses.FAILED)
return
finally:
@@ -771,7 +771,7 @@ class CassandraAppStatus(service.BaseDbStatus):
except NoHostAvailable:
return rd_instance.ServiceStatuses.SHUTDOWN
except Exception:
- LOG.exception(_("Error getting Cassandra status."))
+ LOG.exception("Error getting Cassandra status.")
return rd_instance.ServiceStatuses.SHUTDOWN
@@ -1231,7 +1231,7 @@ class CassandraConnection(object):
data_values, timeout)
return rows or []
except OperationTimedOut:
- LOG.error(_("Query execution timed out."))
+ LOG.error("Query execution timed out.")
raise
LOG.debug("Cannot perform this operation on a closed connection.")
diff --git a/trove/guestagent/datastore/experimental/couchbase/manager.py b/trove/guestagent/datastore/experimental/couchbase/manager.py
index e34c323f..cca76afa 100644
--- a/trove/guestagent/datastore/experimental/couchbase/manager.py
+++ b/trove/guestagent/datastore/experimental/couchbase/manager.py
@@ -17,7 +17,6 @@ import os
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.notification import EndNotification
from trove.guestagent import backup
@@ -104,16 +103,16 @@ class Manager(manager.Manager):
Restores all couchbase buckets and their documents from the
backup.
"""
- LOG.info(_("Restoring database from backup %s"), backup_info['id'])
+ LOG.info("Restoring database from backup %s", backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception as e:
- LOG.error(_("Error performing restore from backup %s"),
+ LOG.error("Error performing restore from backup %s",
backup_info['id'])
LOG.error(e)
self.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
- LOG.info(_("Restored database successfully"))
+ LOG.info("Restored database successfully")
def create_backup(self, context, backup_info):
"""
diff --git a/trove/guestagent/datastore/experimental/couchbase/service.py b/trove/guestagent/datastore/experimental/couchbase/service.py
index 3d1b06d0..c10e8210 100644
--- a/trove/guestagent/datastore/experimental/couchbase/service.py
+++ b/trove/guestagent/datastore/experimental/couchbase/service.py
@@ -61,7 +61,7 @@ class CouchbaseApp(object):
"""
Install couchbase if needed, do nothing if it is already installed.
"""
- LOG.info(_('Preparing Guest as Couchbase Server.'))
+ LOG.info('Preparing Guest as Couchbase Server.')
if not packager.pkg_is_installed(packages):
LOG.debug('Installing Couchbase.')
self._install_couchbase(packages)
@@ -70,7 +70,7 @@ class CouchbaseApp(object):
self.ip_address = netutils.get_my_ipv4()
mount_point = CONF.couchbase.mount_point
try:
- LOG.info(_('Couchbase Server change data dir path.'))
+ LOG.info('Couchbase Server change data dir path.')
operating_system.chown(mount_point, 'couchbase', 'couchbase',
as_root=True)
pwd = CouchbaseRootAccess.get_password()
@@ -89,9 +89,9 @@ class CouchbaseApp(object):
utils.execute_with_timeout(system.cmd_set_swappiness, shell=True)
utils.execute_with_timeout(system.cmd_update_sysctl_conf,
shell=True)
- LOG.info(_('Couchbase Server initial setup finished.'))
+ LOG.info('Couchbase Server initial setup finished.')
except exception.ProcessExecutionError:
- LOG.exception(_('Error performing initial Couchbase setup.'))
+ LOG.exception('Error performing initial Couchbase setup.')
raise RuntimeError(_("Couchbase Server initial setup failed"))
def _install_couchbase(self, packages):
@@ -125,11 +125,11 @@ class CouchbaseApp(object):
return CouchbaseRootAccess.enable_root(root_password)
def start_db_with_conf_changes(self, config_contents):
- LOG.info(_("Starting Couchbase with configuration changes.\n"
- "Configuration contents:\n %s."), config_contents)
+ LOG.info("Starting Couchbase with configuration changes.\n"
+ "Configuration contents:\n %s.", config_contents)
if self.status.is_running:
- LOG.error(_("Cannot start Couchbase with configuration changes. "
- "Couchbase state == %s."), self.status)
+ LOG.error("Cannot start Couchbase with configuration changes. "
+ "Couchbase state == %s.", self.status)
raise RuntimeError(_("Couchbase is not stopped."))
self._write_config(config_contents)
self.start_db(True)
@@ -159,14 +159,14 @@ class CouchbaseAppStatus(service.BaseDbStatus):
return self._get_status_from_couchbase(pwd)
except exception.ProcessExecutionError:
# log the exception, but continue with native config approach
- LOG.exception(_("Error getting the Couchbase status."))
+ LOG.exception("Error getting the Couchbase status.")
try:
out, err = utils.execute_with_timeout(
system.cmd_get_password_from_config, shell=True)
except exception.ProcessExecutionError:
- LOG.exception(_("Error getting the root password from the "
- "native Couchbase config file."))
+ LOG.exception("Error getting the root password from the "
+ "native Couchbase config file.")
return rd_instance.ServiceStatuses.SHUTDOWN
config_pwd = out.strip() if out is not None else None
@@ -179,9 +179,9 @@ class CouchbaseAppStatus(service.BaseDbStatus):
try:
status = self._get_status_from_couchbase(config_pwd)
except exception.ProcessExecutionError:
- LOG.exception(_("Error getting Couchbase status using the "
- "password parsed from the native Couchbase "
- "config file."))
+ LOG.exception("Error getting Couchbase status using the "
+ "password parsed from the native Couchbase "
+ "config file.")
return rd_instance.ServiceStatuses.SHUTDOWN
# if the parsed root password worked, update the stored value to
diff --git a/trove/guestagent/datastore/experimental/couchdb/manager.py b/trove/guestagent/datastore/experimental/couchdb/manager.py
index 43f5c4f0..aeb3f9be 100644
--- a/trove/guestagent/datastore/experimental/couchdb/manager.py
+++ b/trove/guestagent/datastore/experimental/couchdb/manager.py
@@ -17,7 +17,6 @@ import os
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.couchdb import service
@@ -92,16 +91,16 @@ class Manager(manager.Manager):
Restores all CouchDB databases and their documents from the
backup.
"""
- LOG.info(_("Restoring database from backup %s"),
+ LOG.info("Restoring database from backup %s",
backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception:
- LOG.exception(_("Error performing restore from backup %s"),
+ LOG.exception("Error performing restore from backup %s",
backup_info['id'])
self.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
- LOG.info(_("Restored database successfully"))
+ LOG.info("Restored database successfully")
def create_backup(self, context, backup_info):
LOG.debug("Creating backup for CouchDB.")
diff --git a/trove/guestagent/datastore/experimental/couchdb/service.py b/trove/guestagent/datastore/experimental/couchdb/service.py
index fcadb124..3dbb9e94 100644
--- a/trove/guestagent/datastore/experimental/couchdb/service.py
+++ b/trove/guestagent/datastore/experimental/couchdb/service.py
@@ -63,11 +63,11 @@ class CouchDBApp(object):
"""
Install CouchDB if needed, do nothing if it is already installed.
"""
- LOG.info(_('Preparing guest as a CouchDB server.'))
+ LOG.info('Preparing guest as a CouchDB server.')
if not packager.pkg_is_installed(packages):
LOG.debug("Installing packages: %s.", str(packages))
packager.pkg_install(packages, {}, system.TIME_OUT)
- LOG.info(_("Finished installing CouchDB server."))
+ LOG.info("Finished installing CouchDB server.")
def change_permissions(self):
"""
@@ -87,7 +87,7 @@ class CouchDBApp(object):
as_root=True)
LOG.debug("Successfully changed permissions.")
except exception.ProcessExecutionError:
- LOG.exception(_("Error changing permissions."))
+ LOG.exception("Error changing permissions.")
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
self.status.stop_db_service(
@@ -112,8 +112,8 @@ class CouchDBApp(object):
)
self.start_db()
except exception.ProcessExecutionError:
- LOG.exception(_("Error while trying to update bind address of"
- " CouchDB server."))
+ LOG.exception("Error while trying to update bind address of"
+ " CouchDB server.")
def start_db_with_conf_changes(self, config_contents):
'''
@@ -123,7 +123,7 @@ class CouchDBApp(object):
this needs to be implemented to enable volume resize on the guest
agent side.
'''
- LOG.info(_("Starting CouchDB with configuration changes."))
+ LOG.info("Starting CouchDB with configuration changes.")
self.start_db(True)
def store_admin_password(self, password):
@@ -185,7 +185,7 @@ class CouchDBAppStatus(service.BaseDbStatus):
LOG.debug("Status of CouchDB is not active.")
return rd_instance.ServiceStatuses.SHUTDOWN
except exception.ProcessExecutionError:
- LOG.exception(_("Error getting CouchDB status."))
+ LOG.exception("Error getting CouchDB status.")
return rd_instance.ServiceStatuses.SHUTDOWN
@@ -230,7 +230,7 @@ class CouchDBAdmin(object):
'password': user.password},
shell=True)
except exception.ProcessExecutionError as pe:
- LOG.exception(_("Error creating user: %s."), user.name)
+ LOG.exception("Error creating user: %s.", user.name)
pass
for database in user.databases:
@@ -253,7 +253,7 @@ class CouchDBAdmin(object):
LOG.debug(pe)
pass
except exception.ProcessExecutionError as pe:
- LOG.exception(_("An error occurred creating users: %s."),
+ LOG.exception("An error occurred creating users: %s.",
pe.message)
pass
@@ -318,8 +318,8 @@ class CouchDBAdmin(object):
'revid': revid},
shell=True)
except exception.ProcessExecutionError as pe:
- LOG.exception(_(
- "There was an error while deleting user: %s."), pe)
+ LOG.exception(
+ "There was an error while deleting user: %s.", pe)
raise exception.GuestError(original_message=_(
"Unable to delete user: %s.") % couchdb_user.name)
@@ -413,8 +413,8 @@ class CouchDBAdmin(object):
else:
user = models.CouchDBUser(username)
if not self._is_modifiable_user(user.name):
- LOG.warning(_('Cannot grant access for reserved user '
- '%(user)s'), {'user': username})
+ LOG.warning('Cannot grant access for reserved user '
+ '%(user)s', {'user': username})
if not user:
raise exception.BadRequest(_(
'Cannot grant access for reserved or non-existant user '
@@ -499,16 +499,16 @@ class CouchDBAdmin(object):
'dbname': dbName},
shell=True)
except exception.ProcessExecutionError:
- LOG.exception(_(
- "There was an error creating database: %s."), dbName)
+ LOG.exception(
+ "There was an error creating database: %s.", dbName)
db_create_failed.append(dbName)
pass
else:
- LOG.warning(_('Cannot create database with a reserved name '
- '%(db)s'), {'db': dbName})
+ LOG.warning('Cannot create database with a reserved name '
+ '%(db)s', {'db': dbName})
db_create_failed.append(dbName)
if len(db_create_failed) > 0:
- LOG.exception(_("Creating the following databases failed: %s."),
+ LOG.exception("Creating the following databases failed: %s.",
db_create_failed)
def list_database_names(self):
@@ -548,13 +548,13 @@ class CouchDBAdmin(object):
'dbname': dbName},
shell=True)
except exception.ProcessExecutionError:
- LOG.exception(_(
- "There was an error while deleting database:%s."), dbName)
+ LOG.exception(
+ "There was an error while deleting database:%s.", dbName)
raise exception.GuestError(original_message=_(
"Unable to delete database: %s.") % dbName)
else:
- LOG.warning(_('Cannot delete a reserved database '
- '%(db)s'), {'db': dbName})
+ LOG.warning('Cannot delete a reserved database '
+ '%(db)s', {'db': dbName})
class CouchDBCredentials(object):
diff --git a/trove/guestagent/datastore/experimental/db2/manager.py b/trove/guestagent/datastore/experimental/db2/manager.py
index 0aa33fbf..ac4b0a06 100644
--- a/trove/guestagent/datastore/experimental/db2/manager.py
+++ b/trove/guestagent/datastore/experimental/db2/manager.py
@@ -17,7 +17,6 @@ import os
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.common import instance as ds_instance
from trove.common.notification import EndNotification
from trove.guestagent import backup
@@ -131,15 +130,15 @@ class Manager(manager.Manager):
self.app.start_db_with_conf_changes(config_contents)
def _perform_restore(self, backup_info, context, restore_location):
- LOG.info(_("Restoring database from backup %s."), backup_info['id'])
+ LOG.info("Restoring database from backup %s.", backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception:
- LOG.exception(_("Error performing restore from backup %s."),
+ LOG.exception("Error performing restore from backup %s.",
backup_info['id'])
self.status.set_status(ds_instance.ServiceStatuses.FAILED)
raise
- LOG.info(_("Restored database successfully."))
+ LOG.info("Restored database successfully.")
def create_backup(self, context, backup_info):
LOG.debug("Creating backup.")
diff --git a/trove/guestagent/datastore/experimental/db2/service.py b/trove/guestagent/datastore/experimental/db2/service.py
index b2935cf3..94b9e34b 100644
--- a/trove/guestagent/datastore/experimental/db2/service.py
+++ b/trove/guestagent/datastore/experimental/db2/service.py
@@ -211,7 +211,7 @@ class DB2App(object):
"Command to disable DB2 server on boot failed."))
def start_db_with_conf_changes(self, config_contents):
- LOG.info(_("Starting DB2 with configuration changes."))
+ LOG.info("Starting DB2 with configuration changes.")
self.configuration_manager.save_configuration(config_contents)
self.start_db(True)
@@ -226,7 +226,7 @@ class DB2App(object):
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
- LOG.error(_("Start of DB2 server instance failed."))
+ LOG.error("Start of DB2 server instance failed.")
self.status.end_restart()
raise RuntimeError(_("Could not start DB2."))
@@ -242,7 +242,7 @@ class DB2App(object):
if not (self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db)):
- LOG.error(_("Could not stop DB2."))
+ LOG.error("Could not stop DB2.")
self.status.end_restart()
raise RuntimeError(_("Could not stop DB2."))
@@ -275,7 +275,7 @@ class DB2App(object):
"parameter": param,
"value": value})
except exception.ProcessExecutionError:
- LOG.exception(_("Failed to update config %s"), param)
+ LOG.exception("Failed to update config %s", param)
raise
def _reset_config(self, config):
@@ -284,18 +284,18 @@ class DB2App(object):
default_cfg_value = self.dbm_default_config[k]
self._update_dbm_config(k, default_cfg_value)
except Exception:
- LOG.exception(_("DB2 configuration reset failed."))
+ LOG.exception("DB2 configuration reset failed.")
raise RuntimeError(_("DB2 configuration reset failed."))
- LOG.info(_("DB2 configuration reset completed."))
+ LOG.info("DB2 configuration reset completed.")
def _apply_config(self, config):
try:
for k, v in config.items():
self._update_dbm_config(k, v)
except Exception:
- LOG.exception(_("DB2 configuration apply failed"))
+ LOG.exception("DB2 configuration apply failed")
raise RuntimeError(_("DB2 configuration apply failed"))
- LOG.info(_("DB2 config apply completed."))
+ LOG.info("DB2 config apply completed.")
class DB2AppStatus(service.BaseDbStatus):
@@ -312,7 +312,7 @@ class DB2AppStatus(service.BaseDbStatus):
else:
return rd_instance.ServiceStatuses.SHUTDOWN
except exception.ProcessExecutionError:
- LOG.exception(_("Error getting the DB2 server status."))
+ LOG.exception("Error getting the DB2 server status.")
return rd_instance.ServiceStatuses.CRASHED
@@ -353,8 +353,8 @@ class DB2Admin(object):
try:
run_command(system.CREATE_DB_COMMAND % {'dbname': dbName})
except exception.ProcessExecutionError:
- LOG.exception(_(
- "There was an error creating database: %s."), dbName)
+ LOG.exception(
+ "There was an error creating database: %s.", dbName)
db_create_failed.append(dbName)
pass
@@ -373,12 +373,12 @@ class DB2Admin(object):
run_command(system.RECOVER_FROM_BACKUP_PENDING_MODE % {
'dbname': dbName})
except exception.ProcessExecutionError:
- LOG.exception(_(
+ LOG.exception(
"There was an error while configuring the database for "
- "online backup: %s."), dbName)
+ "online backup: %s.", dbName)
if len(db_create_failed) > 0:
- LOG.exception(_("Creating the following databases failed: %s."),
+ LOG.exception("Creating the following databases failed: %s.",
db_create_failed)
def delete_database(self, database):
@@ -391,8 +391,8 @@ class DB2Admin(object):
LOG.debug("Deleting DB2 database: %s.", dbName)
run_command(system.DELETE_DB_COMMAND % {'dbname': dbName})
except exception.ProcessExecutionError:
- LOG.exception(_(
- "There was an error while deleting database:%s."), dbName)
+ LOG.exception(
+ "There was an error while deleting database:%s.", dbName)
raise exception.GuestError(original_message=_(
"Unable to delete database: %s.") % dbName)
@@ -436,7 +436,7 @@ class DB2Admin(object):
LOG.debug("databases = %s.", str(databases))
except exception.ProcessExecutionError as pe:
err_msg = encodeutils.exception_to_unicode(pe)
- LOG.exception(_("An error occurred listing databases: %s."),
+ LOG.exception("An error occurred listing databases: %s.",
err_msg)
pass
return databases, next_marker
@@ -454,7 +454,7 @@ class DB2Admin(object):
'login': user.name, 'login': user.name,
'passwd': user.password}, shell=True)
except exception.ProcessExecutionError as pe:
- LOG.exception(_("Error creating user: %s."), user.name)
+ LOG.exception("Error creating user: %s.", user.name)
continue
for database in user.databases:
@@ -472,7 +472,7 @@ class DB2Admin(object):
LOG.debug(pe)
pass
except exception.ProcessExecutionError as pe:
- LOG.exception(_("An error occurred creating users: %s."),
+ LOG.exception("An error occurred creating users: %s.",
pe.message)
pass
@@ -508,8 +508,8 @@ class DB2Admin(object):
utils.execute_with_timeout(system.DELETE_USER_COMMAND % {
'login': db2_user.name.lower()}, shell=True)
except exception.ProcessExecutionError as pe:
- LOG.exception(_(
- "There was an error while deleting user: %s."), pe)
+ LOG.exception(
+ "There was an error while deleting user: %s.", pe)
raise exception.GuestError(original_message=_(
"Unable to delete user: %s.") % userName)
diff --git a/trove/guestagent/datastore/experimental/mariadb/service.py b/trove/guestagent/datastore/experimental/mariadb/service.py
index f659ef99..bc2afd29 100644
--- a/trove/guestagent/datastore/experimental/mariadb/service.py
+++ b/trove/guestagent/datastore/experimental/mariadb/service.py
@@ -16,7 +16,6 @@
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.galera_common import service as galera_service
from trove.guestagent.datastore.mysql_common import service as mysql_service
@@ -84,11 +83,11 @@ class MariaDBApp(galera_service.GaleraApp):
return master_UUID, int(last_txn_id)
def get_latest_txn_id(self):
- LOG.info(_("Retrieving latest txn id."))
+ LOG.info("Retrieving latest txn id.")
return self._get_gtid_executed()
def wait_for_txn(self, txn):
- LOG.info(_("Waiting on txn '%s'."), txn)
+ LOG.info("Waiting on txn '%s'.", txn)
with self.local_sql_client(self.get_engine()) as client:
client.execute("SELECT MASTER_GTID_WAIT('%s')" % txn)
diff --git a/trove/guestagent/datastore/experimental/mongodb/manager.py b/trove/guestagent/datastore/experimental/mongodb/manager.py
index defefa1b..c26408c1 100644
--- a/trove/guestagent/datastore/experimental/mongodb/manager.py
+++ b/trove/guestagent/datastore/experimental/mongodb/manager.py
@@ -17,7 +17,6 @@ import os
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.common import instance as ds_instance
from trove.common.notification import EndNotification
from trove.guestagent import backup
@@ -182,15 +181,15 @@ class Manager(manager.Manager):
return service.MongoDBAdmin().is_root_enabled()
def _perform_restore(self, backup_info, context, restore_location, app):
- LOG.info(_("Restoring database from backup %s."), backup_info['id'])
+ LOG.info("Restoring database from backup %s.", backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception:
- LOG.exception(_("Error performing restore from backup %s."),
+ LOG.exception("Error performing restore from backup %s.",
backup_info['id'])
self.status.set_status(ds_instance.ServiceStatuses.FAILED)
raise
- LOG.info(_("Restored database successfully."))
+ LOG.info("Restored database successfully.")
def create_backup(self, context, backup_info):
LOG.debug("Creating backup.")
diff --git a/trove/guestagent/datastore/experimental/mongodb/service.py b/trove/guestagent/datastore/experimental/mongodb/service.py
index c9e703a4..668bfeed 100644
--- a/trove/guestagent/datastore/experimental/mongodb/service.py
+++ b/trove/guestagent/datastore/experimental/mongodb/service.py
@@ -67,11 +67,11 @@ class MongoDBApp(object):
def install_if_needed(self, packages):
"""Prepare the guest machine with a MongoDB installation."""
- LOG.info(_("Preparing Guest as MongoDB."))
+ LOG.info("Preparing Guest as MongoDB.")
if not system.PACKAGER.pkg_is_installed(packages):
LOG.debug("Installing packages: %s.", str(packages))
system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT)
- LOG.info(_("Finished installing MongoDB server."))
+ LOG.info("Finished installing MongoDB server.")
def _get_service_candidates(self):
if self.is_query_router:
@@ -100,12 +100,12 @@ class MongoDBApp(object):
self.configuration_manager.remove_user_override()
def start_db_with_conf_changes(self, config_contents):
- LOG.info(_('Starting MongoDB with configuration changes.'))
+ LOG.info('Starting MongoDB with configuration changes.')
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
- LOG.info(_("Initiating config."))
+ LOG.info("Initiating config.")
self.configuration_manager.save_configuration(config_contents)
# The configuration template has to be updated with
# guestagent-controlled settings.
@@ -161,15 +161,15 @@ class MongoDBApp(object):
self._configure_as_cluster_member(
cluster_config['replica_set_name'])
else:
- LOG.error(_("Bad cluster configuration; instance type "
- "given as %s."), cluster_config['instance_type'])
+ LOG.error("Bad cluster configuration; instance type "
+ "given as %s.", cluster_config['instance_type'])
return ds_instance.ServiceStatuses.FAILED
if 'key' in cluster_config:
self._configure_cluster_security(cluster_config['key'])
def _configure_as_query_router(self):
- LOG.info(_("Configuring instance as a cluster query router."))
+ LOG.info("Configuring instance as a cluster query router.")
self.is_query_router = True
# FIXME(pmalik): We should really have a separate configuration
@@ -193,13 +193,13 @@ class MongoDBApp(object):
{'sharding.configDB': ''}, CNF_CLUSTER)
def _configure_as_config_server(self):
- LOG.info(_("Configuring instance as a cluster config server."))
+ LOG.info("Configuring instance as a cluster config server.")
self._configure_network(CONFIGSVR_PORT)
self.configuration_manager.apply_system_override(
{'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER)
def _configure_as_cluster_member(self, replica_set_name):
- LOG.info(_("Configuring instance as a cluster member."))
+ LOG.info("Configuring instance as a cluster member.")
self.is_cluster_member = True
self._configure_network(MONGODB_PORT)
# we don't want these thinking they are in a replica set yet
@@ -240,7 +240,7 @@ class MongoDBApp(object):
try:
operating_system.remove(mount_point, force=True, as_root=True)
except exception.ProcessExecutionError:
- LOG.exception(_("Error clearing storage."))
+ LOG.exception("Error clearing storage.")
def _has_config_db(self):
value_string = self.configuration_manager.get_value(
@@ -256,7 +256,7 @@ class MongoDBApp(object):
"""
config_servers_string = ','.join(['%s:%s' % (host, CONFIGSVR_PORT)
for host in config_server_hosts])
- LOG.info(_("Setting config servers: %s"), config_servers_string)
+ LOG.info("Setting config servers: %s", config_servers_string)
self.configuration_manager.apply_system_override(
{'sharding.configDB': config_servers_string}, CNF_CLUSTER)
self.start_db(True)
@@ -454,7 +454,7 @@ class MongoDBAppStatus(service.BaseDbStatus):
pymongo.errors.AutoReconnect):
return ds_instance.ServiceStatuses.SHUTDOWN
except Exception:
- LOG.exception(_("Error getting MongoDB status."))
+ LOG.exception("Error getting MongoDB status.")
return ds_instance.ServiceStatuses.SHUTDOWN
@@ -528,8 +528,8 @@ class MongoDBAdmin(object):
self.create_validated_user(user, client=client)
except (ValueError, pymongo.errors.PyMongoError) as e:
LOG.error(e)
- LOG.warning(_('Skipping creation of user with name '
- '%(user)s'), {'user': user.name})
+ LOG.warning('Skipping creation of user with name '
+ '%(user)s', {'user': user.name})
def delete_validated_user(self, user):
"""Deletes a user from their database. The caller should ensure that
@@ -553,8 +553,8 @@ class MongoDBAdmin(object):
"""Get the user's record."""
user = models.MongoDBUser(name)
if user.is_ignored:
- LOG.warning(_('Skipping retrieval of user with reserved '
- 'name %(user)s'), {'user': user.name})
+ LOG.warning('Skipping retrieval of user with reserved '
+ 'name %(user)s', {'user': user.name})
return None
if client:
user_info = client.admin.system.users.find_one(
@@ -613,8 +613,8 @@ class MongoDBAdmin(object):
self._create_user_with_client(user, admin_client)
except (ValueError, pymongo.errors.PyMongoError) as e:
LOG.error(e)
- LOG.warning(_('Skipping password change for user with '
- 'name %(user)s'), {'user': user.name})
+ LOG.warning('Skipping password change for user with '
+ 'name %(user)s', {'user': user.name})
def update_attributes(self, name, user_attrs):
"""Update user attributes."""
@@ -624,9 +624,9 @@ class MongoDBAdmin(object):
user.password = password
self.change_passwords([user.serialize()])
if user_attrs.get('name'):
- LOG.warning(_('Changing user name is not supported.'))
+ LOG.warning('Changing user name is not supported.')
if user_attrs.get('host'):
- LOG.warning(_('Changing user host is not supported.'))
+ LOG.warning('Changing user host is not supported.')
def enable_root(self, password=None):
"""Create a user 'root' with role 'root'."""
diff --git a/trove/guestagent/datastore/experimental/percona/service.py b/trove/guestagent/datastore/experimental/percona/service.py
index cfcb88b8..a021e243 100644
--- a/trove/guestagent/datastore/experimental/percona/service.py
+++ b/trove/guestagent/datastore/experimental/percona/service.py
@@ -16,7 +16,6 @@
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.guestagent.datastore.mysql_common import service
LOG = logging.getLogger(__name__)
@@ -63,11 +62,11 @@ class MySqlApp(service.BaseMySqlApp):
return master_UUID, int(last_txn_id)
def get_latest_txn_id(self):
- LOG.info(_("Retrieving latest txn id."))
+ LOG.info("Retrieving latest txn id.")
return self._get_gtid_executed()
def wait_for_txn(self, txn):
- LOG.info(_("Waiting on txn '%s'."), txn)
+ LOG.info("Waiting on txn '%s'.", txn)
with self.local_sql_client(self.get_engine()) as client:
client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')"
% txn)
diff --git a/trove/guestagent/datastore/experimental/postgresql/manager.py b/trove/guestagent/datastore/experimental/postgresql/manager.py
index 64d9864b..996d1cbc 100644
--- a/trove/guestagent/datastore/experimental/postgresql/manager.py
+++ b/trove/guestagent/datastore/experimental/postgresql/manager.py
@@ -234,7 +234,7 @@ class Manager(manager.Manager):
self.app.set_current_admin_user(os_admin)
if snapshot:
- LOG.info(_("Found snapshot info: %s"), str(snapshot))
+ LOG.info("Found snapshot info: %s", str(snapshot))
self.attach_replica(context, snapshot, snapshot['config'])
self.app.start_db()
@@ -284,7 +284,7 @@ class Manager(manager.Manager):
lsn = self.app.pg_last_xlog_replay_location()
else:
lsn = self.app.pg_current_xlog_location()
- LOG.info(_("Last xlog location found: %s"), lsn)
+ LOG.info("Last xlog location found: %s", lsn)
return lsn
def get_last_txn(self, context):
@@ -299,7 +299,7 @@ class Manager(manager.Manager):
def _wait_for_txn():
lsn = self.app.pg_last_xlog_replay_location()
- LOG.info(_("Last xlog location found: %s"), lsn)
+ LOG.info("Last xlog location found: %s", lsn)
return lsn >= txn
try:
utils.poll_until(_wait_for_txn, time_out=120)
diff --git a/trove/guestagent/datastore/experimental/postgresql/service.py b/trove/guestagent/datastore/experimental/postgresql/service.py
index 718c8a49..a18d76ed 100644
--- a/trove/guestagent/datastore/experimental/postgresql/service.py
+++ b/trove/guestagent/datastore/experimental/postgresql/service.py
@@ -257,7 +257,7 @@ class PgSqlApp(object):
if not self.configuration_manager.has_system_override(
BACKUP_CFG_OVERRIDE):
return
- LOG.info(_("Removing configuration changes for backups"))
+ LOG.info("Removing configuration changes for backups")
self.configuration_manager.remove_system_override(BACKUP_CFG_OVERRIDE)
self.remove_wal_archive_dir()
self.restart()
@@ -266,13 +266,13 @@ class PgSqlApp(object):
"""Apply necessary changes to config to enable WAL-based backups
if we are using the PgBaseBackup strategy
"""
- LOG.info(_("Checking if we need to apply changes to WAL config"))
+ LOG.info("Checking if we need to apply changes to WAL config")
if 'PgBaseBackup' not in self.backup_strategy:
return
if self.configuration_manager.has_system_override(BACKUP_CFG_OVERRIDE):
return
- LOG.info(_("Applying changes to WAL config for use by base backups"))
+ LOG.info("Applying changes to WAL config for use by base backups")
wal_arch_loc = self.wal_archive_location
if not os.path.isdir(wal_arch_loc):
raise RuntimeError(_("Cannot enable backup as WAL dir '%s' does "
@@ -323,7 +323,7 @@ class PgSqlApp(object):
if not packager.pkg_is_installed(packages):
try:
LOG.info(
- _("{guest_id}: Installing ({packages}).").format(
+ "{guest_id}: Installing ({packages}).".format(
guest_id=CONF.guest_id,
packages=packages,
)
@@ -335,8 +335,8 @@ class PgSqlApp(object):
pkg.PkgDownloadError, pkg.PkgSignError,
pkg.PkgBrokenError):
LOG.exception(
- _("{guest_id}: There was a package manager error while "
- "trying to install ({packages}).").format(
+ "{guest_id}: There was a package manager error while "
+ "trying to install ({packages}).".format(
guest_id=CONF.guest_id,
packages=packages,
)
@@ -344,8 +344,8 @@ class PgSqlApp(object):
raise
except Exception:
LOG.exception(
- _("{guest_id}: The package manager encountered an unknown "
- "error while trying to install ({packages}).").format(
+ "{guest_id}: The package manager encountered an unknown "
+ "error while trying to install ({packages}).".format(
guest_id=CONF.guest_id,
packages=packages,
)
@@ -583,7 +583,7 @@ class PgSqlAppStatus(service.BaseDbStatus):
except utils.Timeout:
return instance.ServiceStatuses.BLOCKED
except Exception:
- LOG.exception(_("Error getting Postgres status."))
+ LOG.exception("Error getting Postgres status.")
return instance.ServiceStatuses.CRASHED
return instance.ServiceStatuses.SHUTDOWN
@@ -610,11 +610,11 @@ class PgSqlAdmin(object):
"""
for database in databases:
LOG.info(
- _("{guest_id}: Granting user ({user}) access to database "
- "({database}).").format(
- guest_id=CONF.guest_id,
- user=username,
- database=database,)
+ "{guest_id}: Granting user ({user}) access to database "
+ "({database}).".format(
+ guest_id=CONF.guest_id,
+ user=username,
+ database=database,)
)
self.psql(
pgsql_query.AccessQuery.grant(
@@ -632,11 +632,11 @@ class PgSqlAdmin(object):
database.
"""
LOG.info(
- _("{guest_id}: Revoking user ({user}) access to database"
- "({database}).").format(
- guest_id=CONF.guest_id,
- user=username,
- database=database,)
+ "{guest_id}: Revoking user ({user}) access to database"
+ "({database}).".format(
+ guest_id=CONF.guest_id,
+ user=username,
+ database=database,)
)
self.psql(
pgsql_query.AccessQuery.revoke(
@@ -673,7 +673,7 @@ class PgSqlAdmin(object):
:type database: PostgreSQLSchema
"""
LOG.info(
- _("{guest_id}: Creating database {name}.").format(
+ "{guest_id}: Creating database {name}.".format(
guest_id=CONF.guest_id,
name=database.name,
)
@@ -700,7 +700,7 @@ class PgSqlAdmin(object):
:type database: PostgreSQLSchema
"""
LOG.info(
- _("{guest_id}: Dropping database {name}.").format(
+ "{guest_id}: Dropping database {name}.".format(
guest_id=CONF.guest_id,
name=database.name,
)
@@ -755,7 +755,7 @@ class PgSqlAdmin(object):
:type options: list
"""
LOG.info(
- _("{guest_id}: Creating user {user} {with_clause}.")
+ "{guest_id}: Creating user {user} {with_clause}."
.format(
guest_id=CONF.guest_id,
user=user.name,
@@ -844,7 +844,7 @@ class PgSqlAdmin(object):
self.revoke_access(context, user.name, None, db.name)
LOG.info(
- _("{guest_id}: Dropping user {name}.").format(
+ "{guest_id}: Dropping user {name}.".format(
guest_id=CONF.guest_id,
name=user.name,
)
@@ -907,7 +907,7 @@ class PgSqlAdmin(object):
:type options: list
"""
LOG.info(
- _("{guest_id}: Altering user {user} {with_clause}.")
+ "{guest_id}: Altering user {user} {with_clause}."
.format(
guest_id=CONF.guest_id,
user=user.name,
@@ -962,7 +962,7 @@ class PgSqlAdmin(object):
:type user: PostgreSQLUser
"""
LOG.info(
- _("{guest_id}: Changing username for {old} to {new}.").format(
+ "{guest_id}: Changing username for {old} to {new}.".format(
guest_id=CONF.guest_id,
old=user.name,
new=new_username,
diff --git a/trove/guestagent/datastore/experimental/redis/manager.py b/trove/guestagent/datastore/experimental/redis/manager.py
index f3ecf84a..00931a81 100644
--- a/trove/guestagent/datastore/experimental/redis/manager.py
+++ b/trove/guestagent/datastore/experimental/redis/manager.py
@@ -50,15 +50,15 @@ class Manager(manager.Manager):
def _perform_restore(self, backup_info, context, restore_location, app):
"""Perform a restore on this instance."""
- LOG.info(_("Restoring database from backup %s."), backup_info['id'])
+ LOG.info("Restoring database from backup %s.", backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception:
- LOG.exception(_("Error performing restore from backup %s."),
+ LOG.exception("Error performing restore from backup %s.",
backup_info['id'])
app.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
- LOG.info(_("Restored database successfully."))
+ LOG.info("Restored database successfully.")
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
@@ -75,7 +75,7 @@ class Manager(manager.Manager):
as_root=True)
LOG.debug('Mounted the volume.')
self._app.install_if_needed(packages)
- LOG.info(_('Writing redis configuration.'))
+ LOG.info('Writing redis configuration.')
if cluster_config:
config_contents = (config_contents + "\n"
+ "cluster-enabled yes\n"
@@ -192,7 +192,7 @@ class Manager(manager.Manager):
self.replication.enable_as_slave(self._app, replica_info,
slave_config)
except Exception:
- LOG.exception(_("Error enabling replication."))
+ LOG.exception("Error enabling replication.")
raise
def make_read_only(self, context, read_only):
@@ -221,11 +221,11 @@ class Manager(manager.Manager):
return master_host, repl_offset
def get_latest_txn_id(self, context):
- LOG.info(_("Retrieving latest repl offset."))
+ LOG.info("Retrieving latest repl offset.")
return self._get_repl_offset()
def wait_for_txn(self, context, txn):
- LOG.info(_("Waiting on repl offset '%s'."), txn)
+ LOG.info("Waiting on repl offset '%s'."), txn
def _wait_for_txn():
current_offset = self._get_repl_offset()
diff --git a/trove/guestagent/datastore/experimental/redis/service.py b/trove/guestagent/datastore/experimental/redis/service.py
index fa17cb18..dac6d644 100644
--- a/trove/guestagent/datastore/experimental/redis/service.py
+++ b/trove/guestagent/datastore/experimental/redis/service.py
@@ -63,7 +63,7 @@ class RedisAppStatus(service.BaseDbStatus):
except BusyLoadingError:
return rd_instance.ServiceStatuses.BLOCKED
except Exception:
- LOG.exception(_("Error getting Redis status."))
+ LOG.exception("Error getting Redis status.")
return rd_instance.ServiceStatuses.CRASHED
@@ -123,11 +123,11 @@ class RedisApp(object):
"""
Install redis if needed do nothing if it is already installed.
"""
- LOG.info(_('Preparing Guest as Redis Server.'))
+ LOG.info('Preparing Guest as Redis Server.')
if not packager.pkg_is_installed(packages):
- LOG.info(_('Installing Redis.'))
+ LOG.info('Installing Redis.')
self._install_redis(packages)
- LOG.info(_('Redis installed completely.'))
+ LOG.info('Redis installed completely.')
def _install_redis(self, packages):
"""
@@ -203,12 +203,12 @@ class RedisApp(object):
pass
def start_db_with_conf_changes(self, config_contents):
- LOG.info(_('Starting redis with conf changes.'))
+ LOG.info('Starting redis with conf changes.')
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
- LOG.info(_("Initiating config."))
+ LOG.info("Initiating config.")
self.configuration_manager.save_configuration(config_contents)
# The configuration template has to be updated with
# guestagent-controlled settings.
@@ -350,7 +350,7 @@ class RedisApp(object):
utils.execute_with_timeout('redis-cli', 'cluster', 'meet',
ip, port)
except exception.ProcessExecutionError:
- LOG.exception(_('Error joining node to cluster at %s.'), ip)
+ LOG.exception('Error joining node to cluster at %s.', ip)
raise
def cluster_addslots(self, first_slot, last_slot):
@@ -367,8 +367,8 @@ class RedisApp(object):
% out)
del slots[0:group_size]
except exception.ProcessExecutionError:
- LOG.exception(_('Error adding slots %(first_slot)s-%(last_slot)s'
- ' to cluster.'),
+ LOG.exception('Error adding slots %(first_slot)s-%(last_slot)s'
+ ' to cluster.',
{'first_slot': first_slot, 'last_slot': last_slot})
raise
@@ -378,7 +378,7 @@ class RedisApp(object):
'cluster', 'nodes')
return [line.split(' ') for line in out.splitlines()]
except exception.ProcessExecutionError:
- LOG.exception(_('Error getting node info.'))
+ LOG.exception('Error getting node info.')
raise
def _get_node_details(self):
@@ -400,7 +400,7 @@ class RedisApp(object):
'cluster', 'slots')
return node_id if my_ip not in slots else None
except exception.ProcessExecutionError:
- LOG.exception(_('Error validating node to for removal.'))
+ LOG.exception('Error validating node to for removal.')
raise
def remove_nodes(self, node_ids):
@@ -409,7 +409,7 @@ class RedisApp(object):
utils.execute_with_timeout('redis-cli', 'cluster',
'forget', node_id)
except exception.ProcessExecutionError:
- LOG.exception(_('Error removing node from cluster.'))
+ LOG.exception('Error removing node from cluster.')
raise
def enable_root(self, password=None):
@@ -423,7 +423,7 @@ class RedisApp(object):
self.apply_overrides(
self.admin, {'requirepass': password, 'masterauth': password})
except exception.TroveError:
- LOG.exception(_('Error enabling authentication for instance.'))
+ LOG.exception('Error enabling authentication for instance.')
raise
return redis_password.serialize()
@@ -434,7 +434,7 @@ class RedisApp(object):
self.apply_overrides(self.admin,
{'requirepass': '', 'masterauth': ''})
except exception.TroveError:
- LOG.exception(_('Error disabling authentication for instance.'))
+ LOG.exception('Error disabling authentication for instance.')
raise
@@ -473,7 +473,7 @@ class RedisAdmin(object):
# If an auto-save is in progress just use it, since it must have
# just happened
if "Background save already in progress" in str(re):
- LOG.info(_("Waiting for existing background save to finish"))
+ LOG.info("Waiting for existing background save to finish")
else:
raise
if save_ok:
@@ -538,7 +538,7 @@ class RedisAdmin(object):
LOG.debug("Found '%(value)s' for field %(key)s.",
{'value': current_value, 'key': key})
else:
- LOG.error(_('Output from Redis command: %s'), redis_info)
+ LOG.error('Output from Redis command: %s', redis_info)
raise RuntimeError(_("Field %(field)s not found "
"(Section: '%(sec)s').") %
({'field': key, 'sec': section}))
diff --git a/trove/guestagent/datastore/experimental/vertica/manager.py b/trove/guestagent/datastore/experimental/vertica/manager.py
index 58c62992..0d664628 100644
--- a/trove/guestagent/datastore/experimental/vertica/manager.py
+++ b/trove/guestagent/datastore/experimental/vertica/manager.py
@@ -115,7 +115,7 @@ class Manager(manager.Manager):
self.app.add_udls()
LOG.debug("install_cluster call has finished.")
except Exception:
- LOG.exception(_('Cluster installation failed.'))
+ LOG.exception('Cluster installation failed.')
self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED)
raise
@@ -137,7 +137,7 @@ class Manager(manager.Manager):
self.app.grow_cluster(members)
LOG.debug("grow_cluster call has finished.")
except Exception:
- LOG.exception(_('Cluster grow failed.'))
+ LOG.exception('Cluster grow failed.')
self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED)
raise
@@ -147,7 +147,7 @@ class Manager(manager.Manager):
self.app.shrink_cluster(members)
LOG.debug("shrink_cluster call has finished.")
except Exception:
- LOG.exception(_('Cluster shrink failed.'))
+ LOG.exception('Cluster shrink failed.')
self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED)
raise
@@ -156,6 +156,6 @@ class Manager(manager.Manager):
LOG.debug("Setting vertica k-safety to %s.", k)
self.app.mark_design_ksafe(k)
except Exception:
- LOG.exception(_('K-safety setting failed.'))
+ LOG.exception('K-safety setting failed.')
self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED)
raise
diff --git a/trove/guestagent/datastore/experimental/vertica/service.py b/trove/guestagent/datastore/experimental/vertica/service.py
index 77b0f0f8..00bdcdfe 100644
--- a/trove/guestagent/datastore/experimental/vertica/service.py
+++ b/trove/guestagent/datastore/experimental/vertica/service.py
@@ -23,7 +23,6 @@ from trove.common import cfg
from trove.common.db import models
from trove.common import exception
from trove.common.i18n import _
-from trove.common.i18n import _LI
from trove.common import instance as rd_instance
from trove.common.stream_codecs import PropertiesCodec
from trove.common import utils
@@ -56,13 +55,13 @@ class VerticaAppStatus(service.BaseDbStatus):
system.VERTICA_ADMIN)
if out.strip() == DB_NAME:
# UP status is confirmed
- LOG.info(_("Service Status is RUNNING."))
+ LOG.info("Service Status is RUNNING.")
return rd_instance.ServiceStatuses.RUNNING
else:
- LOG.info(_("Service Status is SHUTDOWN."))
+ LOG.info("Service Status is SHUTDOWN.")
return rd_instance.ServiceStatuses.SHUTDOWN
except exception.ProcessExecutionError:
- LOG.exception(_("Failed to get database status."))
+ LOG.exception("Failed to get database status.")
return rd_instance.ServiceStatuses.CRASHED
@@ -119,9 +118,9 @@ class VerticaApp(object):
raise RuntimeError(_("Failed to remove config %s") % k)
except Exception:
- LOG.exception(_("Vertica configuration remove failed."))
+ LOG.exception("Vertica configuration remove failed.")
raise RuntimeError(_("Vertica configuration remove failed."))
- LOG.info(_("Vertica configuration reset completed."))
+ LOG.info("Vertica configuration reset completed.")
def _apply_config(self, config):
try:
@@ -137,9 +136,9 @@ class VerticaApp(object):
raise RuntimeError(_("Failed to apply config %s") % k)
except Exception:
- LOG.exception(_("Vertica configuration apply failed"))
+ LOG.exception("Vertica configuration apply failed")
raise RuntimeError(_("Vertica configuration apply failed"))
- LOG.info(_("Vertica config apply completed."))
+ LOG.info("Vertica config apply completed.")
def _enable_db_on_boot(self):
try:
@@ -150,7 +149,7 @@ class VerticaApp(object):
(system.VERTICA_AGENT_SERVICE_COMMAND % "enable")]
subprocess.Popen(command)
except Exception:
- LOG.exception(_("Failed to enable database on boot."))
+ LOG.exception("Failed to enable database on boot.")
raise RuntimeError(_("Could not enable database on boot."))
def _disable_db_on_boot(self):
@@ -160,12 +159,12 @@ class VerticaApp(object):
command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable")
system.shell_execute(command)
except exception.ProcessExecutionError:
- LOG.exception(_("Failed to disable database on boot."))
+ LOG.exception("Failed to disable database on boot.")
raise RuntimeError(_("Could not disable database on boot."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
"""Stop the database."""
- LOG.info(_("Stopping Vertica."))
+ LOG.info("Stopping Vertica.")
if do_not_start_on_reboot:
self._disable_db_on_boot()
@@ -184,19 +183,19 @@ class VerticaApp(object):
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
- LOG.error(_("Could not stop Vertica."))
+ LOG.error("Could not stop Vertica.")
self.status.end_restart()
raise RuntimeError(_("Could not stop Vertica!"))
LOG.debug("Database stopped.")
else:
LOG.debug("Database is not running.")
except exception.ProcessExecutionError:
- LOG.exception(_("Failed to stop database."))
+ LOG.exception("Failed to stop database.")
raise RuntimeError(_("Could not stop database."))
def start_db(self, update_db=False):
"""Start the database."""
- LOG.info(_("Starting Vertica."))
+ LOG.info("Starting Vertica.")
try:
self._enable_db_on_boot()
# Start vertica-agent service
@@ -219,12 +218,12 @@ class VerticaApp(object):
Currently all that this method does is to start Vertica. This method
needs to be implemented to enable volume resize on guestagent side.
"""
- LOG.info(_("Starting Vertica with configuration changes."))
+ LOG.info("Starting Vertica with configuration changes.")
if self.status.is_running:
format = 'Cannot start_db_with_conf_changes because status is %s.'
LOG.debug(format, self.status)
raise RuntimeError(format % self.status)
- LOG.info(_("Initiating config."))
+ LOG.info("Initiating config.")
self.configuration_manager.save_configuration(config_contents)
self.start_db(True)
@@ -239,7 +238,7 @@ class VerticaApp(object):
def add_db_to_node(self, members=netutils.get_my_ipv4()):
"""Add db to host with admintools"""
- LOG.info(_("Calling admintools to add DB to host"))
+ LOG.info("Calling admintools to add DB to host")
try:
# Create db after install
db_password = self._get_database_password()
@@ -250,13 +249,13 @@ class VerticaApp(object):
except exception.ProcessExecutionError:
# Give vertica some time to get the node up, won't be available
# by the time adminTools -t db_add_node completes
- LOG.info(_("adminTools failed as expected - wait for node"))
+ LOG.info("adminTools failed as expected - wait for node")
self.wait_for_node_status()
- LOG.info(_("Vertica add db to host completed."))
+ LOG.info("Vertica add db to host completed.")
def remove_db_from_node(self, members=netutils.get_my_ipv4()):
"""Remove db from node with admintools"""
- LOG.info(_("Removing db from node"))
+ LOG.info("Removing db from node")
try:
# Create db after install
db_password = self._get_database_password()
@@ -267,16 +266,16 @@ class VerticaApp(object):
except exception.ProcessExecutionError:
# Give vertica some time to get the node up, won't be available
# by the time adminTools -t db_add_node completes
- LOG.info(_("adminTools failed as expected - wait for node"))
+ LOG.info("adminTools failed as expected - wait for node")
# Give vertica some time to take the node down - it won't be available
# by the time adminTools -t db_add_node completes
self.wait_for_node_status()
- LOG.info(_("Vertica remove host from db completed."))
+ LOG.info("Vertica remove host from db completed.")
def create_db(self, members=netutils.get_my_ipv4()):
"""Prepare the guest machine with a Vertica db creation."""
- LOG.info(_("Creating database on Vertica host."))
+ LOG.info("Creating database on Vertica host.")
try:
# Create db after install
db_password = self._get_database_password()
@@ -285,39 +284,39 @@ class VerticaApp(object):
db_password))
system.shell_execute(create_db_command, system.VERTICA_ADMIN)
except Exception:
- LOG.exception(_("Vertica database create failed."))
+ LOG.exception("Vertica database create failed.")
raise RuntimeError(_("Vertica database create failed."))
- LOG.info(_("Vertica database create completed."))
+ LOG.info("Vertica database create completed.")
def install_vertica(self, members=netutils.get_my_ipv4()):
"""Prepare the guest machine with a Vertica db creation."""
- LOG.info(_("Installing Vertica Server."))
+ LOG.info("Installing Vertica Server.")
try:
# Create db after install
install_vertica_cmd = (system.INSTALL_VERTICA % (members,
MOUNT_POINT))
system.shell_execute(install_vertica_cmd)
except exception.ProcessExecutionError:
- LOG.exception(_("install_vertica failed."))
+ LOG.exception("install_vertica failed.")
raise RuntimeError(_("install_vertica failed."))
self._generate_database_password()
- LOG.info(_("install_vertica completed."))
+ LOG.info("install_vertica completed.")
def update_vertica(self, command, members=netutils.get_my_ipv4()):
- LOG.info(_("Calling update_vertica with command %s"), command)
+ LOG.info("Calling update_vertica with command %s", command)
try:
update_vertica_cmd = (system.UPDATE_VERTICA % (command, members,
MOUNT_POINT))
system.shell_execute(update_vertica_cmd)
except exception.ProcessExecutionError:
- LOG.exception(_("update_vertica failed."))
+ LOG.exception("update_vertica failed.")
raise RuntimeError(_("update_vertica failed."))
# self._generate_database_password()
- LOG.info(_("update_vertica completed."))
+ LOG.info("update_vertica completed.")
def add_udls(self):
"""Load the user defined load libraries into the database."""
- LOG.info(_("Adding configured user defined load libraries."))
+ LOG.info("Adding configured user defined load libraries.")
password = self._get_database_password()
loaded_udls = []
for lib in system.UDL_LIBS:
@@ -354,9 +353,9 @@ class VerticaApp(object):
% func_name)
loaded_udls.append(func_name)
else:
- LOG.warning(_("Skipping %(func)s as path %(path)s not "
- "found."), {"func": func_name, "path": path})
- LOG.info(_("The following UDL functions are available for use: %s"),
+ LOG.warning("Skipping %(func)s as path %(path)s not "
+ "found.", {"func": func_name, "path": path})
+ LOG.info("The following UDL functions are available for use: %s",
loaded_udls)
def _generate_database_password(self):
@@ -392,7 +391,7 @@ class VerticaApp(object):
config.read(system.VERTICA_CONF)
return config
except Exception:
- LOG.exception(_("Failed to read config %s."), system.VERTICA_CONF)
+ LOG.exception("Failed to read config %s.", system.VERTICA_CONF)
raise RuntimeError
def _get_database_password(self):
@@ -401,7 +400,7 @@ class VerticaApp(object):
def install_if_needed(self, packages):
"""Install Vertica package if needed."""
- LOG.info(_("Preparing Guest as Vertica Server."))
+ LOG.info("Preparing Guest as Vertica Server.")
if not packager.pkg_is_installed(packages):
LOG.debug("Installing Vertica Package.")
packager.pkg_install(packages, None, system.INSTALL_TIMEOUT)
@@ -424,12 +423,12 @@ class VerticaApp(object):
self._set_readahead_for_disks()
system.shell_execute(command)
except exception.ProcessExecutionError:
- LOG.exception(_("Failed to prepare for install_vertica."))
+ LOG.exception("Failed to prepare for install_vertica.")
raise
def mark_design_ksafe(self, k):
"""Wrapper for mark_design_ksafe function for setting k-safety """
- LOG.info(_("Setting Vertica k-safety to %s"), str(k))
+ LOG.info("Setting Vertica k-safety to %s", str(k))
out, err = system.exec_vsql_command(self._get_database_password(),
system.MARK_DESIGN_KSAFE % k)
# Only fail if we get an ERROR as opposed to a warning complaining
@@ -440,7 +439,7 @@ class VerticaApp(object):
def _create_user(self, username, password, role=None):
"""Creates a user, granting and enabling the given role for it."""
- LOG.info(_("Creating user in Vertica database."))
+ LOG.info("Creating user in Vertica database.")
out, err = system.exec_vsql_command(self._get_database_password(),
system.CREATE_USER %
(username, password))
@@ -474,7 +473,7 @@ class VerticaApp(object):
def enable_root(self, root_password=None):
"""Resets the root password."""
- LOG.info(_LI("Enabling root."))
+ LOG.info("Enabling root.")
user = models.DatastoreUser.root(password=root_password)
if not self.is_root_enabled():
self._create_user(user.name, user.password, 'pseudosuperuser')
@@ -492,7 +491,7 @@ class VerticaApp(object):
raise RuntimeError(_("Failed to update %s "
"password.") % user.name)
except exception.ProcessExecutionError:
- LOG.error(_("Failed to update %s password."), user.name)
+ LOG.error("Failed to update %s password.", user.name)
raise RuntimeError(_("Failed to update %s password.")
% user.name)
return user.serialize()
@@ -527,7 +526,7 @@ class VerticaApp(object):
read_key_cmd = ("cat %(file)s" % {'file': public_key_file_name})
out, err = system.shell_execute(read_key_cmd)
except exception.ProcessExecutionError:
- LOG.exception(_("Cannot read public key."))
+ LOG.exception("Cannot read public key.")
raise
return out.strip()
@@ -554,7 +553,7 @@ class VerticaApp(object):
system.shell_execute(copy_key_cmd)
os.remove(tempkeyfile.name)
except exception.ProcessExecutionError:
- LOG.exception(_("Cannot install public keys."))
+ LOG.exception("Cannot install public keys.")
os.remove(tempkeyfile.name)
raise
@@ -567,7 +566,7 @@ class VerticaApp(object):
system.VERTICA_CONF))
system.shell_execute(COPY_CMD)
except exception.ProcessExecutionError:
- LOG.exception(_("Cannot export configuration."))
+ LOG.exception("Cannot export configuration.")
raise
def install_cluster(self, members):
diff --git a/trove/guestagent/datastore/galera_common/manager.py b/trove/guestagent/datastore/galera_common/manager.py
index a69756ec..6af86cf5 100644
--- a/trove/guestagent/datastore/galera_common/manager.py
+++ b/trove/guestagent/datastore/galera_common/manager.py
@@ -16,7 +16,6 @@
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.guestagent.datastore.mysql_common import manager
@@ -56,7 +55,7 @@ class GaleraManager(manager.MySqlManager):
replication_user, cluster_configuration, bootstrap)
LOG.debug("install_cluster call has finished.")
except Exception:
- LOG.exception(_('Cluster installation failed.'))
+ LOG.exception('Cluster installation failed.')
app.status.set_status(
rd_instance.ServiceStatuses.FAILED)
raise
diff --git a/trove/guestagent/datastore/galera_common/service.py b/trove/guestagent/datastore/galera_common/service.py
index 1e6a8bae..bee7e3c8 100644
--- a/trove/guestagent/datastore/galera_common/service.py
+++ b/trove/guestagent/datastore/galera_common/service.py
@@ -36,7 +36,7 @@ class GaleraApp(service.BaseMySqlApp):
keep_alive_connection_cls)
def _grant_cluster_replication_privilege(self, replication_user):
- LOG.info(_("Granting Replication Slave privilege."))
+ LOG.info("Granting Replication Slave privilege.")
with self.local_sql_client(self.get_engine()) as client:
perms = ['REPLICATION CLIENT', 'RELOAD', 'LOCK TABLES']
g = sql_query.Grant(permissions=perms,
@@ -46,13 +46,13 @@ class GaleraApp(service.BaseMySqlApp):
client.execute(t)
def _bootstrap_cluster(self, timeout=120):
- LOG.info(_("Bootstraping cluster."))
+ LOG.info("Bootstraping cluster.")
try:
utils.execute_with_timeout(
self.mysql_service['cmd_bootstrap_galera_cluster'],
shell=True, timeout=timeout)
except KeyError:
- LOG.exception(_("Error bootstrapping cluster."))
+ LOG.exception("Error bootstrapping cluster.")
raise RuntimeError(_("Service is not discovered."))
def write_cluster_configuration_overrides(self, cluster_configuration):
@@ -61,7 +61,7 @@ class GaleraApp(service.BaseMySqlApp):
def install_cluster(self, replication_user, cluster_configuration,
bootstrap=False):
- LOG.info(_("Installing cluster configuration."))
+ LOG.info("Installing cluster configuration.")
self._grant_cluster_replication_privilege(replication_user)
self.stop_db()
self.write_cluster_configuration_overrides(cluster_configuration)
diff --git a/trove/guestagent/datastore/manager.py b/trove/guestagent/datastore/manager.py
index d6311c5b..9f9dbcdb 100644
--- a/trove/guestagent/datastore/manager.py
+++ b/trove/guestagent/datastore/manager.py
@@ -273,7 +273,7 @@ class Manager(periodic_task.PeriodicTasks):
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot, modules):
- LOG.info(_("Starting datastore prepare for '%s'."), self.manager)
+ LOG.info("Starting datastore prepare for '%s'.", self.manager)
self.status.begin_install()
post_processing = True if cluster_config else False
try:
@@ -284,23 +284,23 @@ class Manager(periodic_task.PeriodicTasks):
config_contents, root_password, overrides,
cluster_config, snapshot)
if overrides:
- LOG.info(_("Applying user-specified configuration "
- "(called from 'prepare')."))
+ LOG.info("Applying user-specified configuration "
+ "(called from 'prepare').")
self.apply_overrides_on_prepare(context, overrides)
except Exception as ex:
self.prepare_error = True
- LOG.exception(_("An error occurred preparing datastore: %s"),
+ LOG.exception("An error occurred preparing datastore: %s",
encodeutils.exception_to_unicode(ex))
raise
finally:
- LOG.info(_("Ending datastore prepare for '%s'."), self.manager)
+ LOG.info("Ending datastore prepare for '%s'.", self.manager)
self.status.end_install(error_occurred=self.prepare_error,
post_processing=post_processing)
# At this point critical 'prepare' work is done and the instance
# is now in the correct 'ACTIVE' 'INSTANCE_READY' or 'ERROR' state.
# Of cource if an error has occurred, none of the code that follows
# will run.
- LOG.info(_("Completed setup of '%s' datastore successfully."),
+ LOG.info("Completed setup of '%s' datastore successfully.",
self.manager)
# The following block performs additional instance initialization.
@@ -308,56 +308,56 @@ class Manager(periodic_task.PeriodicTasks):
# or change the instance state.
try:
if modules:
- LOG.info(_("Applying modules (called from 'prepare')."))
+ LOG.info("Applying modules (called from 'prepare').")
self.module_apply(context, modules)
- LOG.info(_('Module apply completed.'))
+ LOG.info('Module apply completed.')
except Exception as ex:
- LOG.exception(_("An error occurred applying modules: "
- "%s"), ex.message)
+ LOG.exception("An error occurred applying modules: "
+ "%s", ex.message)
# The following block performs single-instance initialization.
# Failures will be recorded, but won't stop the provisioning
# or change the instance state.
if not cluster_config:
try:
if databases:
- LOG.info(_("Creating databases (called from 'prepare')."))
+ LOG.info("Creating databases (called from 'prepare').")
self.create_database(context, databases)
- LOG.info(_('Databases created successfully.'))
+ LOG.info('Databases created successfully.')
except Exception as ex:
- LOG.exception(_("An error occurred creating databases: "
- "%s"), ex.message)
+ LOG.exception("An error occurred creating databases: "
+ "%s", ex.message)
try:
if users:
- LOG.info(_("Creating users (called from 'prepare')"))
+ LOG.info("Creating users (called from 'prepare')")
self.create_user(context, users)
- LOG.info(_('Users created successfully.'))
+ LOG.info('Users created successfully.')
except Exception as ex:
- LOG.exception(_("An error occurred creating users: "
- "%s"), ex.message)
+ LOG.exception("An error occurred creating users: "
+ "%s", ex.message)
# We only enable-root automatically if not restoring a backup
# that may already have root enabled in which case we keep it
# unchanged.
if root_password and not backup_info:
try:
- LOG.info(_("Enabling root user (with password)."))
+ LOG.info("Enabling root user (with password).")
self.enable_root_on_prepare(context, root_password)
- LOG.info(_('Root enabled successfully.'))
+ LOG.info('Root enabled successfully.')
except Exception as ex:
- LOG.exception(_("An error occurred enabling root user: "
- "%s"), ex.message)
+ LOG.exception("An error occurred enabling root user: "
+ "%s", ex.message)
try:
- LOG.info(_("Calling post_prepare for '%s' datastore."),
+ LOG.info("Calling post_prepare for '%s' datastore.",
self.manager)
self.post_prepare(context, packages, databases, memory_mb,
users, device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot)
- LOG.info(_("Post prepare for '%s' datastore completed."),
+ LOG.info("Post prepare for '%s' datastore completed.",
self.manager)
except Exception as ex:
- LOG.exception(_("An error occurred in post prepare: %s"),
+ LOG.exception("An error occurred in post prepare: %s",
ex.message)
raise
@@ -394,7 +394,7 @@ class Manager(periodic_task.PeriodicTasks):
however no status changes are made and the end-user will not be
informed of the error.
"""
- LOG.info(_('No post_prepare work has been defined.'))
+ LOG.info('No post_prepare work has been defined.')
pass
def pre_upgrade(self, context):
@@ -472,13 +472,13 @@ class Manager(periodic_task.PeriodicTasks):
# Log related
#############
def guest_log_list(self, context):
- LOG.info(_("Getting list of guest logs."))
+ LOG.info("Getting list of guest logs.")
self.guest_log_context = context
gl_cache = self.guest_log_cache
result = filter(None, [gl_cache[log_name].show()
if gl_cache[log_name].exposed else None
for log_name in gl_cache.keys()])
- LOG.info(_("Returning list of logs: %s"), result)
+ LOG.info("Returning list of logs: %s", result)
return result
def guest_log_action(self, context, log_name, enable, disable,
@@ -489,9 +489,9 @@ class Manager(periodic_task.PeriodicTasks):
# Enable if we are publishing, unless told to disable
if publish and not disable:
enable = True
- LOG.info(_("Processing guest log '%(log)s' "
- "(enable=%(en)s, disable=%(dis)s, "
- "publish=%(pub)s, discard=%(disc)s)."),
+ LOG.info("Processing guest log '%(log)s' "
+ "(enable=%(en)s, disable=%(dis)s, "
+ "publish=%(pub)s, discard=%(disc)s).",
{'log': log_name, 'en': enable, 'dis': disable,
'pub': publish, 'disc': discard})
self.guest_log_context = context
@@ -522,7 +522,7 @@ class Manager(periodic_task.PeriodicTasks):
log_details = gl_cache[log_name].discard_log()
if publish:
log_details = gl_cache[log_name].publish_log()
- LOG.info(_("Details for log '%(log)s': %(det)s"),
+ LOG.info("Details for log '%(log)s': %(det)s",
{'log': log_name, 'det': log_details})
return log_details
@@ -561,11 +561,14 @@ class Manager(periodic_task.PeriodicTasks):
gl_def.get(self.GUEST_LOG_SECTION_LABEL),
restart_required)
else:
- msg = (_("%(verb)s log '%(log)s' not supported - "
- "no configuration manager defined!") %
- {'verb': verb, 'log': log_name})
- LOG.error(msg)
- raise exception.GuestError(original_message=msg)
+ log_fmt = ("%(verb)s log '%(log)s' not supported - "
+ "no configuration manager defined!")
+ exc_fmt = _("%(verb)s log '%(log)s' not supported - "
+ "no configuration manager defined!")
+ msg_content = {'verb': verb, 'log': log_name}
+ LOG.error(log_fmt, msg_content)
+ raise exception.GuestError(
+ original_message=(exc_fmt % msg_content))
return restart_required
@@ -642,14 +645,14 @@ class Manager(periodic_task.PeriodicTasks):
# Module related
################
def module_list(self, context, include_contents=False):
- LOG.info(_("Getting list of modules."))
+ LOG.info("Getting list of modules.")
results = module_manager.ModuleManager.read_module_results(
is_admin=context.is_admin, include_contents=include_contents)
- LOG.info(_("Returning list of modules: %s"), results)
+ LOG.info("Returning list of modules: %s", results)
return results
def module_apply(self, context, modules=None):
- LOG.info(_("Applying modules."))
+ LOG.info("Applying modules.")
results = []
modules = [data['module'] for data in modules]
try:
@@ -699,11 +702,11 @@ class Manager(periodic_task.PeriodicTasks):
driver, module_type, name, tenant, datastore, ds_version,
contents, id, md5, auto_apply, visible, is_admin)
results.append(result)
- LOG.info(_("Returning list of modules: %s"), results)
+ LOG.info("Returning list of modules: %s", results)
return results
def module_remove(self, context, module=None):
- LOG.info(_("Removing module."))
+ LOG.info("Removing module.")
module = module['module']
id = module.get('id', None)
module_type = module.get('type', None)
@@ -719,7 +722,7 @@ class Manager(periodic_task.PeriodicTasks):
module_type)
module_manager.ModuleManager.remove_module(
driver, module_type, id, name, datastore, ds_version)
- LOG.info(_("Deleted module: %s"), name)
+ LOG.info("Deleted module: %s", name)
###############
# Not Supported
diff --git a/trove/guestagent/datastore/mysql/service.py b/trove/guestagent/datastore/mysql/service.py
index 743e173a..fe040e58 100644
--- a/trove/guestagent/datastore/mysql/service.py
+++ b/trove/guestagent/datastore/mysql/service.py
@@ -18,7 +18,6 @@
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.guestagent.datastore.mysql_common import service
LOG = logging.getLogger(__name__)
@@ -44,7 +43,7 @@ class MySqlApp(service.BaseMySqlApp):
# DEPRECATED: Mantain for API Compatibility
def get_txn_count(self):
- LOG.info(_("Retrieving latest txn id."))
+ LOG.info("Retrieving latest txn id.")
txn_count = 0
with self.local_sql_client(self.get_engine()) as client:
result = client.execute('SELECT @@global.gtid_executed').first()
@@ -81,11 +80,11 @@ class MySqlApp(service.BaseMySqlApp):
return master_UUID, int(last_txn_id)
def get_latest_txn_id(self):
- LOG.info(_("Retrieving latest txn id."))
+ LOG.info("Retrieving latest txn id.")
return self._get_gtid_executed()
def wait_for_txn(self, txn):
- LOG.info(_("Waiting on txn '%s'."), txn)
+ LOG.info("Waiting on txn '%s'.", txn)
with self.local_sql_client(self.get_engine()) as client:
client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')"
% txn)
diff --git a/trove/guestagent/datastore/mysql_common/manager.py b/trove/guestagent/datastore/mysql_common/manager.py
index 78b41071..a9e57335 100644
--- a/trove/guestagent/datastore/mysql_common/manager.py
+++ b/trove/guestagent/datastore/mysql_common/manager.py
@@ -23,7 +23,6 @@ from oslo_log import log as logging
from trove.common import cfg
from trove.common import configurations
from trove.common import exception
-from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.notification import EndNotification
from trove.guestagent import backup
@@ -185,15 +184,15 @@ class MySqlManager(manager.Manager):
return self.mysql_admin().disable_root()
def _perform_restore(self, backup_info, context, restore_location, app):
- LOG.info(_("Restoring database from backup %s."), backup_info['id'])
+ LOG.info("Restoring database from backup %s.", backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception:
- LOG.exception(_("Error performing restore from backup %s."),
+ LOG.exception("Error performing restore from backup %s.",
backup_info['id'])
app.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
- LOG.info(_("Restored database successfully."))
+ LOG.info("Restored database successfully.")
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
@@ -415,7 +414,7 @@ class MySqlManager(manager.Manager):
self._validate_slave_for_replication(context, replica_info)
self.replication.enable_as_slave(app, replica_info, slave_config)
except Exception:
- LOG.exception(_("Error enabling replication."))
+ LOG.exception("Error enabling replication.")
app.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
diff --git a/trove/guestagent/datastore/mysql_common/service.py b/trove/guestagent/datastore/mysql_common/service.py
index 72e1cb59..ec27d606 100644
--- a/trove/guestagent/datastore/mysql_common/service.py
+++ b/trove/guestagent/datastore/mysql_common/service.py
@@ -95,7 +95,7 @@ def clear_expired_password():
out, err = utils.execute("cat", secret_file,
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
- LOG.exception(_("/root/.mysql_secret does not exist."))
+ LOG.exception("/root/.mysql_secret does not exist.")
return
m = re.match('# The random password set for the root user at .*: (.*)',
out)
@@ -105,7 +105,7 @@ def clear_expired_password():
"password", "", run_as_root=True,
root_helper="sudo")
except exception.ProcessExecutionError:
- LOG.exception(_("Cannot change mysql password."))
+ LOG.exception("Cannot change mysql password.")
return
operating_system.remove(secret_file, force=True, as_root=True)
LOG.debug("Expired password removed.")
@@ -149,29 +149,29 @@ class BaseMySqlAppStatus(service.BaseDbStatus):
"/usr/bin/mysqladmin",
"ping", run_as_root=True, root_helper="sudo",
log_output_on_error=True)
- LOG.info(_("MySQL Service Status is RUNNING."))
+ LOG.info("MySQL Service Status is RUNNING.")
return rd_instance.ServiceStatuses.RUNNING
except exception.ProcessExecutionError:
- LOG.exception(_("Failed to get database status."))
+ LOG.exception("Failed to get database status.")
try:
out, err = utils.execute_with_timeout("/bin/ps", "-C",
"mysqld", "h")
pid = out.split()[0]
# TODO(rnirmal): Need to create new statuses for instances
# where the mysql service is up, but unresponsive
- LOG.info(_('MySQL Service Status %(pid)s is BLOCKED.'),
+ LOG.info('MySQL Service Status %(pid)s is BLOCKED.',
{'pid': pid})
return rd_instance.ServiceStatuses.BLOCKED
except exception.ProcessExecutionError:
- LOG.exception(_("Process execution failed."))
+ LOG.exception("Process execution failed.")
mysql_args = load_mysqld_options()
pid_file = mysql_args.get('pid_file',
['/var/run/mysqld/mysqld.pid'])[0]
if os.path.exists(pid_file):
- LOG.info(_("MySQL Service Status is CRASHED."))
+ LOG.info("MySQL Service Status is CRASHED.")
return rd_instance.ServiceStatuses.CRASHED
else:
- LOG.info(_("MySQL Service Status is SHUTDOWN."))
+ LOG.info("MySQL Service Status is SHUTDOWN.")
return rd_instance.ServiceStatuses.SHUTDOWN
@@ -358,7 +358,7 @@ class BaseMySqlAdmin(object):
user = models.MySQLUser(name=username)
user.check_reserved()
except ValueError as ve:
- LOG.exception(_("Error Getting user information"))
+ LOG.exception("Error Getting user information")
err_msg = encodeutils.exception_to_unicode(ve)
raise exception.BadRequest(_("Username %(user)s is not valid"
": %(reason)s") %
@@ -395,7 +395,7 @@ class BaseMySqlAdmin(object):
mydb = models.MySQLSchema(name=database)
mydb.check_reserved()
except ValueError:
- LOG.exception(_("Error granting access"))
+ LOG.exception("Error granting access")
raise exception.BadRequest(_(
"Grant access to %s is not allowed") % database)
@@ -689,7 +689,7 @@ class BaseMySqlApp(object):
"""Prepare the guest machine with a secure
mysql server installation.
"""
- LOG.info(_("Preparing Guest as MySQL Server."))
+ LOG.info("Preparing Guest as MySQL Server.")
if not packager.pkg_is_installed(packages):
LOG.debug("Installing MySQL server.")
self._clear_mysql_config()
@@ -698,7 +698,7 @@ class BaseMySqlApp(object):
'root_password_again': ''}
packager.pkg_install(packages, pkg_opts, self.TIME_OUT)
self._create_mysql_confd_dir()
- LOG.info(_("Finished installing MySQL server."))
+ LOG.info("Finished installing MySQL server.")
self.start_mysql()
def secure(self, config_contents):
@@ -743,7 +743,7 @@ class BaseMySqlApp(object):
def secure_root(self, secure_remote_root=True):
with self.local_sql_client(self.get_engine()) as client:
- LOG.info(_("Preserving root access from restore."))
+ LOG.info("Preserving root access from restore.")
self._generate_root_password(client)
if secure_remote_root:
self._remove_remote_root_access(client)
@@ -774,7 +774,7 @@ class BaseMySqlApp(object):
utils.execute_with_timeout(self.mysql_service['cmd_enable'],
shell=True)
except KeyError:
- LOG.exception(_("Error enabling MySQL start on boot."))
+ LOG.exception("Error enabling MySQL start on boot.")
raise RuntimeError(_("Service is not discovered."))
def _disable_mysql_on_boot(self):
@@ -782,23 +782,23 @@ class BaseMySqlApp(object):
utils.execute_with_timeout(self.mysql_service['cmd_disable'],
shell=True)
except KeyError:
- LOG.exception(_("Error disabling MySQL start on boot."))
+ LOG.exception("Error disabling MySQL start on boot.")
raise RuntimeError(_("Service is not discovered."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
- LOG.info(_("Stopping MySQL."))
+ LOG.info("Stopping MySQL.")
if do_not_start_on_reboot:
self._disable_mysql_on_boot()
try:
utils.execute_with_timeout(self.mysql_service['cmd_stop'],
shell=True)
except KeyError:
- LOG.exception(_("Error stopping MySQL."))
+ LOG.exception("Error stopping MySQL.")
raise RuntimeError(_("Service is not discovered."))
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
- LOG.error(_("Could not stop MySQL."))
+ LOG.error("Could not stop MySQL.")
self.status.end_restart()
raise RuntimeError(_("Could not stop MySQL!"))
@@ -844,8 +844,8 @@ class BaseMySqlApp(object):
client.execute(t)
except exc.OperationalError:
output = {'key': k, 'value': byte_value}
- LOG.exception(_("Unable to set %(key)s with value "
- "%(value)s."), output)
+ LOG.exception("Unable to set %(key)s with value "
+ "%(value)s.", output)
def make_read_only(self, read_only):
with self.local_sql_client(self.get_engine()) as client:
@@ -859,7 +859,7 @@ class BaseMySqlApp(object):
current size of the files MySQL will fail to start, so we delete the
files to be safe.
"""
- LOG.info(_("Wiping ib_logfiles."))
+ LOG.info("Wiping ib_logfiles.")
for index in range(2):
try:
# On restarts, sometimes these are wiped. So it can be a race
@@ -870,14 +870,14 @@ class BaseMySqlApp(object):
% (self.get_data_dir(), index),
force=True, as_root=True)
except exception.ProcessExecutionError:
- LOG.exception(_("Could not delete logfile."))
+ LOG.exception("Could not delete logfile.")
raise
def remove_overrides(self):
self.configuration_manager.remove_user_override()
def _remove_replication_overrides(self, cnf_file):
- LOG.info(_("Removing replication configuration file."))
+ LOG.info("Removing replication configuration file.")
if os.path.exists(cnf_file):
operating_system.remove(cnf_file, as_root=True)
@@ -899,7 +899,7 @@ class BaseMySqlApp(object):
self.configuration_manager.remove_system_override(CNF_SLAVE)
def grant_replication_privilege(self, replication_user):
- LOG.info(_("Granting Replication Slave privilege."))
+ LOG.info("Granting Replication Slave privilege.")
LOG.debug("grant_replication_privilege: %s", replication_user)
@@ -931,14 +931,14 @@ class BaseMySqlApp(object):
return client.execute(sql_statement)
def start_slave(self):
- LOG.info(_("Starting slave replication."))
+ LOG.info("Starting slave replication.")
with self.local_sql_client(self.get_engine()) as client:
client.execute('START SLAVE')
self._wait_for_slave_status("ON", client, 60)
def stop_slave(self, for_failover):
replication_user = None
- LOG.info(_("Stopping slave replication."))
+ LOG.info("Stopping slave replication.")
with self.local_sql_client(self.get_engine()) as client:
result = client.execute('SHOW SLAVE STATUS')
replication_user = result.first()['Master_User']
@@ -952,7 +952,7 @@ class BaseMySqlApp(object):
}
def stop_master(self):
- LOG.info(_("Stopping replication master."))
+ LOG.info("Stopping replication master.")
with self.local_sql_client(self.get_engine()) as client:
client.execute('RESET MASTER')
@@ -967,14 +967,14 @@ class BaseMySqlApp(object):
try:
utils.poll_until(verify_slave_status, sleep_time=3,
time_out=max_time)
- LOG.info(_("Replication is now %s."), status.lower())
+ LOG.info("Replication is now %s.", status.lower())
except PollTimeOut:
raise RuntimeError(
_("Replication is not %(status)s after %(max)d seconds.") % {
'status': status.lower(), 'max': max_time})
def start_mysql(self, update_db=False, disable_on_boot=False, timeout=120):
- LOG.info(_("Starting MySQL."))
+ LOG.info("Starting MySQL.")
# This is the site of all the trouble in the restart tests.
# Essentially what happens is that mysql start fails, but does not
# die. It is then impossible to kill the original, so
@@ -999,32 +999,32 @@ class BaseMySqlApp(object):
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
- LOG.error(_("Start up of MySQL failed."))
+ LOG.error("Start up of MySQL failed.")
# If it won't start, but won't die either, kill it by hand so we
# don't let a rouge process wander around.
try:
utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
except exception.ProcessExecutionError:
- LOG.exception(_("Error killing stalled MySQL start command."))
+ LOG.exception("Error killing stalled MySQL start command.")
# There's nothing more we can do...
self.status.end_restart()
raise RuntimeError(_("Could not start MySQL!"))
def start_db_with_conf_changes(self, config_contents):
- LOG.info(_("Starting MySQL with conf changes."))
+ LOG.info("Starting MySQL with conf changes.")
LOG.debug("Inside the guest - Status is_running = (%s).",
self.status.is_running)
if self.status.is_running:
- LOG.error(_("Cannot execute start_db_with_conf_changes because "
- "MySQL state == %s."), self.status)
+ LOG.error("Cannot execute start_db_with_conf_changes because "
+ "MySQL state == %s.", self.status)
raise RuntimeError(_("MySQL not stopped."))
- LOG.info(_("Resetting configuration."))
+ LOG.info("Resetting configuration.")
self._reset_configuration(config_contents)
self.start_mysql(True)
def reset_configuration(self, configuration):
config_contents = configuration['config_contents']
- LOG.info(_("Resetting configuration."))
+ LOG.info("Resetting configuration.")
self._reset_configuration(config_contents)
def reset_admin_password(self, admin_password):
diff --git a/trove/guestagent/datastore/service.py b/trove/guestagent/datastore/service.py
index bedd63c3..8a2dcdb2 100644
--- a/trove/guestagent/datastore/service.py
+++ b/trove/guestagent/datastore/service.py
@@ -119,14 +119,14 @@ class BaseDbStatus(object):
final_status = instance.ServiceStatuses.INSTANCE_READY
if final_status:
- LOG.info(_("Set final status to %s."), final_status)
+ LOG.info("Set final status to %s.", final_status)
self.set_status(final_status, force=True)
else:
self._end_install_or_restart(True)
def end_restart(self):
self.restart_mode = False
- LOG.info(_("Ending restart."))
+ LOG.info("Ending restart.")
self._end_install_or_restart(False)
def _end_install_or_restart(self, force):
@@ -134,7 +134,7 @@ class BaseDbStatus(object):
Updates the database with the actual DB server status.
"""
real_status = self._get_actual_db_status()
- LOG.info(_("Current database status is '%s'."), real_status)
+ LOG.info("Current database status is '%s'.", real_status)
self.set_status(real_status, force=force)
def _get_actual_db_status(self):
@@ -184,9 +184,9 @@ class BaseDbStatus(object):
status = self._get_actual_db_status()
self.set_status(status)
else:
- LOG.info(_("DB server is not installed or is in restart mode, so "
- "for now we'll skip determining the status of DB on "
- "this instance."))
+ LOG.info("DB server is not installed or is in restart mode, so "
+ "for now we'll skip determining the status of DB on "
+ "this instance.")
def restart_db_service(self, service_candidates, timeout):
"""Restart the database.
@@ -241,13 +241,13 @@ class BaseDbStatus(object):
:raises: :class:`RuntimeError` on failure.
"""
- LOG.info(_("Starting database service."))
+ LOG.info("Starting database service.")
operating_system.start_service(service_candidates, timeout=timeout)
self.wait_for_database_service_start(timeout, update_db=update_db)
if enable_on_boot:
- LOG.info(_("Enable service auto-start on boot."))
+ LOG.info("Enable service auto-start on boot.")
operating_system.enable_service_on_boot(service_candidates)
def wait_for_database_service_start(self, timeout, update_db=False):
@@ -266,7 +266,7 @@ class BaseDbStatus(object):
instance.ServiceStatuses.RUNNING, timeout, update_db):
raise RuntimeError(_("Database failed to start."))
- LOG.info(_("Database has started successfully."))
+ LOG.info("Database has started successfully.")
def stop_db_service(self, service_candidates, timeout,
disable_on_boot=False, update_db=False):
@@ -288,7 +288,7 @@ class BaseDbStatus(object):
:raises: :class:`RuntimeError` on failure.
"""
- LOG.info(_("Stopping database service."))
+ LOG.info("Stopping database service.")
operating_system.stop_service(service_candidates, timeout=timeout)
LOG.debug("Waiting for database to shutdown.")
@@ -296,10 +296,10 @@ class BaseDbStatus(object):
instance.ServiceStatuses.SHUTDOWN, timeout, update_db):
raise RuntimeError(_("Database failed to stop."))
- LOG.info(_("Database has stopped successfully."))
+ LOG.info("Database has stopped successfully.")
if disable_on_boot:
- LOG.info(_("Disable service auto-start on boot."))
+ LOG.info("Disable service auto-start on boot.")
operating_system.disable_service_on_boot(service_candidates)
def _wait_for_database_service_status(self, status, timeout, update_db):
@@ -318,8 +318,8 @@ class BaseDbStatus(object):
"""
if not self.wait_for_real_status_to_change_to(
status, timeout, update_db):
- LOG.info(_("Service status did not change to %(status)s "
- "within the given timeout: %(timeout)ds"),
+ LOG.info("Service status did not change to %(status)s "
+ "within the given timeout: %(timeout)ds",
{'status': status, 'timeout': timeout})
LOG.debug("Attempting to cleanup stalled services.")
try:
@@ -367,9 +367,9 @@ class BaseDbStatus(object):
time.sleep(CONF.state_change_poll_time)
- LOG.error(_("Timeout while waiting for database status to change."
- "Expected state %(status)s, "
- "current state is %(actual_status)s"),
+ LOG.error("Timeout while waiting for database status to change."
+ "Expected state %(status)s, "
+ "current state is %(actual_status)s",
{"status": status, "actual_status": self.status})
return False
diff --git a/trove/guestagent/dbaas.py b/trove/guestagent/dbaas.py
index fc975efd..4399216b 100644
--- a/trove/guestagent/dbaas.py
+++ b/trove/guestagent/dbaas.py
@@ -76,7 +76,7 @@ def get_filesystem_volume_stats(fs_path):
try:
stats = os.statvfs(fs_path)
except OSError:
- LOG.exception(_("Error getting volume stats."))
+ LOG.exception("Error getting volume stats.")
raise RuntimeError(_("Filesystem not found (%s)") % fs_path)
total = stats.f_blocks * stats.f_bsize
diff --git a/trove/guestagent/guest_log.py b/trove/guestagent/guest_log.py
index bc1cc000..b7b4ab58 100644
--- a/trove/guestagent/guest_log.py
+++ b/trove/guestagent/guest_log.py
@@ -178,7 +178,7 @@ class GuestLog(object):
self.swift_client.put_container(
container_name, headers=self._get_headers())
else:
- LOG.exception(_("Could not retrieve container '%s'"),
+ LOG.exception("Could not retrieve container '%s'",
container_name)
raise
self._container_name = container_name
@@ -230,7 +230,7 @@ class GuestLog(object):
self._name)
self._published_size = 0
else:
- LOG.exception(_("Could not get meta details for log '%s'"),
+ LOG.exception("Could not get meta details for log '%s'",
self._name)
raise
except ConnectionError as e:
diff --git a/trove/guestagent/models.py b/trove/guestagent/models.py
index 4e4a981d..b81df0a0 100644
--- a/trove/guestagent/models.py
+++ b/trove/guestagent/models.py
@@ -19,7 +19,6 @@ from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
-from trove.common.i18n import _
from trove.common import timeutils
from trove.common import utils
from trove.db import get_db_api
@@ -83,7 +82,7 @@ class AgentHeartBeat(dbmodels.DatabaseModelBase):
return cls.find_by(instance_id=instance_id)
except exception.NotFound:
- LOG.exception(_("Error finding instance %s"), instance_id)
+ LOG.exception("Error finding instance %s", instance_id)
raise exception.ModelNotFoundError(instance_id=instance_id)
@staticmethod
diff --git a/trove/guestagent/module/driver_manager.py b/trove/guestagent/module/driver_manager.py
index a9258793..3580d61a 100644
--- a/trove/guestagent/module/driver_manager.py
+++ b/trove/guestagent/module/driver_manager.py
@@ -31,7 +31,7 @@ class ModuleDriverManager(object):
MODULE_DRIVER_NAMESPACE = 'trove.guestagent.module.drivers'
def __init__(self):
- LOG.info(_('Initializing module driver manager.'))
+ LOG.info('Initializing module driver manager.')
self._drivers = {}
self._module_types = [mt.lower() for mt in CONF.module_types]
@@ -47,14 +47,14 @@ class ModuleDriverManager(object):
try:
manager.map(self.add_driver_extension)
except stevedore.exception.NoMatches:
- LOG.info(_("No module drivers loaded"))
+ LOG.info("No module drivers loaded")
def _check_extension(self, extension):
"""Checks for required methods in driver objects."""
driver = extension.obj
supported = False
try:
- LOG.info(_('Loading Module driver: %s'), driver.get_type())
+ LOG.info('Loading Module driver: %s', driver.get_type())
if driver.get_type() != driver.get_type().lower():
raise AttributeError(_("Driver 'type' must be lower-case"))
LOG.debug(' description: %s', driver.get_description())
@@ -68,10 +68,10 @@ class ModuleDriverManager(object):
if driver.get_type() in self._module_types:
supported = True
else:
- LOG.info(_("Driver '%s' not supported, skipping"),
+ LOG.info("Driver '%s' not supported, skipping",
driver.get_type())
except AttributeError as ex:
- LOG.exception(_("Exception loading module driver: %s"),
+ LOG.exception("Exception loading module driver: %s",
encodeutils.exception_to_unicode(ex))
return supported
@@ -83,7 +83,7 @@ class ModuleDriverManager(object):
# by calling self._check_extension(extension)
driver = extension.obj
driver_type = driver.get_type()
- LOG.info(_('Loaded module driver: %s'), driver_type)
+ LOG.info('Loaded module driver: %s', driver_type)
if driver_type in self._drivers:
raise exception.Error(_("Found duplicate driver: %s") %
diff --git a/trove/guestagent/module/drivers/module_driver.py b/trove/guestagent/module/drivers/module_driver.py
index 6ae74c74..851e7b53 100644
--- a/trove/guestagent/module/drivers/module_driver.py
+++ b/trove/guestagent/module/drivers/module_driver.py
@@ -22,7 +22,6 @@ import six
from oslo_log import log as logging
from trove.common import exception
-from trove.common.i18n import _
LOG = logging.getLogger(__name__)
@@ -168,7 +167,7 @@ def output(log_message=None, success_message=None,
fail_msg = fail_msg % message_args
except Exception:
# if there's a problem, just log it and drive on
- LOG.warning(_("Could not apply message args: %s"),
+ LOG.warning("Could not apply message args: %s",
message_args)
pass
@@ -184,7 +183,7 @@ def output(log_message=None, success_message=None,
success = True
message = success_msg
except exception.ProcessExecutionError as ex:
- message = (_("%(msg)s: %(out)s\n%(err)s") %
+ message = ("%(msg)s: %(out)s\n%(err)s" %
{'msg': fail_msg,
'out': ex.stdout,
'err': ex.stderr})
@@ -192,11 +191,11 @@ def output(log_message=None, success_message=None,
message = message.rstrip()
LOG.exception(message)
except exception.TroveError as ex:
- message = (_("%(msg)s: %(err)s") %
+ message = ("%(msg)s: %(err)s" %
{'msg': fail_msg, 'err': ex._error_string})
LOG.exception(message)
except Exception as ex:
- message = (_("%(msg)s: %(err)s") %
+ message = ("%(msg)s: %(err)s" %
{'msg': fail_msg, 'err': ex.message})
LOG.exception(message)
return success, message
diff --git a/trove/guestagent/module/drivers/new_relic_license_driver.py b/trove/guestagent/module/drivers/new_relic_license_driver.py
index c2cf1267..a8fb3e75 100644
--- a/trove/guestagent/module/drivers/new_relic_license_driver.py
+++ b/trove/guestagent/module/drivers/new_relic_license_driver.py
@@ -18,7 +18,6 @@ from datetime import date
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.common import stream_codecs
from trove.common import utils
from trove.guestagent.common import operating_system
@@ -42,9 +41,9 @@ class NewRelicLicenseDriver(module_driver.ModuleDriver):
return date(2016, 4, 12)
@module_driver.output(
- log_message=_('Installing New Relic license key'),
- success_message=_('New Relic license key installed'),
- fail_message=_('New Relic license key not installed'))
+ log_message='Installing New Relic license key',
+ success_message='New Relic license key installed',
+ fail_message='New Relic license key not installed')
def apply(self, name, datastore, ds_version, data_file, admin_module):
license_key = None
data = operating_system.read_file(
@@ -68,7 +67,7 @@ class NewRelicLicenseDriver(module_driver.ModuleDriver):
cmd[-1] = cmd[-1] % license_key
utils.execute_with_timeout(*cmd, **exec_args)
except Exception:
- LOG.exception(_("Could not install license key '%s'"),
+ LOG.exception("Could not install license key '%s'",
license_key)
raise
@@ -81,13 +80,13 @@ class NewRelicLicenseDriver(module_driver.ModuleDriver):
cmd.append(command)
utils.execute_with_timeout(*cmd, **exec_args)
except Exception:
- LOG.exception(_("Could not %s New Relic server"), command)
+ LOG.exception("Could not %s New Relic server", command)
raise
@module_driver.output(
- log_message=_('Removing New Relic license key'),
- success_message=_('New Relic license key removed'),
- fail_message=_('New Relic license key not removed'))
+ log_message='Removing New Relic license key',
+ success_message='New Relic license key removed',
+ fail_message='New Relic license key not removed')
def remove(self, name, datastore, ds_version, data_file):
self._add_license_key("bad_key_that_is_exactly_40_characters_xx")
self._server_control('stop')
diff --git a/trove/guestagent/module/drivers/ping_driver.py b/trove/guestagent/module/drivers/ping_driver.py
index 7738bc8c..76f283f6 100644
--- a/trove/guestagent/module/drivers/ping_driver.py
+++ b/trove/guestagent/module/drivers/ping_driver.py
@@ -16,7 +16,6 @@
from datetime import date
-from trove.common.i18n import _
from trove.common import stream_codecs
from trove.guestagent.common import operating_system
from trove.guestagent.module.drivers import module_driver
@@ -37,8 +36,8 @@ class PingDriver(module_driver.ModuleDriver):
return date(2016, 3, 4)
@module_driver.output(
- log_message=_('Extracting %(type)s message'),
- fail_message=_('Could not extract %(type)s message'))
+ log_message='Extracting %(type)s message',
+ fail_message='Could not extract %(type)s message')
def apply(self, name, datastore, ds_version, data_file, admin_module):
data = operating_system.read_file(
data_file, codec=stream_codecs.KeyValueCodec())
@@ -48,6 +47,6 @@ class PingDriver(module_driver.ModuleDriver):
return False, 'Message not found in contents file'
@module_driver.output(
- log_message=_('Removing %(type)s module'))
+ log_message='Removing %(type)s module')
def remove(self, name, datastore, ds_version, data_file):
return True, ""
diff --git a/trove/guestagent/module/module_manager.py b/trove/guestagent/module/module_manager.py
index 569db9fb..52237334 100644
--- a/trove/guestagent/module/module_manager.py
+++ b/trove/guestagent/module/module_manager.py
@@ -66,7 +66,7 @@ class ModuleManager(object):
applied, message = driver.apply(
name, datastore, ds_version, data_file, admin_module)
except Exception as ex:
- LOG.exception(_("Could not apply module '%s'"), name)
+ LOG.exception("Could not apply module '%s'", name)
applied = False
message = ex.message
finally:
@@ -149,7 +149,7 @@ class ModuleManager(object):
result_file, codec=stream_codecs.JsonCodec())
except Exception:
if not result:
- LOG.exception(_("Could not find module result in %s"),
+ LOG.exception("Could not find module result in %s",
result_file)
raise
return result
@@ -217,7 +217,7 @@ class ModuleManager(object):
name, datastore, ds_version, contents_file)
cls.remove_module_result(module_dir)
except Exception:
- LOG.exception(_("Could not remove module '%s'"), name)
+ LOG.exception("Could not remove module '%s'", name)
raise
return removed, message
diff --git a/trove/guestagent/pkg.py b/trove/guestagent/pkg.py
index 01f50d45..06734c41 100644
--- a/trove/guestagent/pkg.py
+++ b/trove/guestagent/pkg.py
@@ -134,7 +134,7 @@ class RPMPackagerMixin(BasePackagerMixin):
utils.execute("rpm", "-e", "--nodeps", package_name,
run_as_root=True, root_helper="sudo")
except ProcessExecutionError:
- LOG.exception(_("Error removing conflict %(package)s"),
+ LOG.exception("Error removing conflict %(package)s",
package_name)
def _install(self, packages, time_out):
@@ -179,7 +179,7 @@ class RPMPackagerMixin(BasePackagerMixin):
line = matches.group()
return line
- LOG.error(_("Unexpected output from rpm command. (%(output)s)"),
+ LOG.error("Unexpected output from rpm command. (%(output)s)",
{'output': std_out})
def pkg_remove(self, package_name, time_out):
@@ -265,7 +265,7 @@ class DebianPackagerMixin(BasePackagerMixin):
utils.execute("dpkg", "--configure", "-a", run_as_root=True,
root_helper="sudo")
except ProcessExecutionError:
- LOG.exception(_("Error fixing dpkg"))
+ LOG.exception("Error fixing dpkg")
def _fix_package_selections(self, packages, config_opts):
"""
@@ -377,7 +377,7 @@ class DebianPackagerMixin(BasePackagerMixin):
utils.execute("apt-get", "update", run_as_root=True,
root_helper="sudo")
except ProcessExecutionError:
- LOG.exception(_("Error updating the apt sources"))
+ LOG.exception("Error updating the apt sources")
result = self._install(packages, time_out)
if result != OK:
diff --git a/trove/guestagent/strategies/backup/experimental/couchbase_impl.py b/trove/guestagent/strategies/backup/experimental/couchbase_impl.py
index dba08596..80e70cda 100644
--- a/trove/guestagent/strategies/backup/experimental/couchbase_impl.py
+++ b/trove/guestagent/strategies/backup/experimental/couchbase_impl.py
@@ -19,7 +19,6 @@ import json
from oslo_log import log as logging
from trove.common import exception
-from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.couchbase import service
@@ -88,8 +87,8 @@ class CbBackup(base.BackupRunner):
if not all_memcached:
self._backup(pw)
else:
- LOG.info(_("All buckets are memcached. "
- "Skipping backup."))
+ LOG.info("All buckets are memcached. "
+ "Skipping backup.")
operating_system.move(OUTFILE, system.COUCHBASE_DUMP_DIR)
if pw != "password":
# Not default password, backup generated root password
@@ -97,7 +96,7 @@ class CbBackup(base.BackupRunner):
system.COUCHBASE_DUMP_DIR,
preserve=True, as_root=True)
except exception.ProcessExecutionError:
- LOG.exception(_("Error during pre-backup phase."))
+ LOG.exception("Error during pre-backup phase.")
raise
def _run_post_backup(self):
@@ -105,5 +104,5 @@ class CbBackup(base.BackupRunner):
for cmd in self.post_backup_commands:
utils.execute_with_timeout(*cmd)
except exception.ProcessExecutionError:
- LOG.exception(_("Error during post-backup phase."))
+ LOG.exception("Error during post-backup phase.")
raise
diff --git a/trove/guestagent/strategies/backup/experimental/db2_impl.py b/trove/guestagent/strategies/backup/experimental/db2_impl.py
index 51b434ff..f308f4d4 100644
--- a/trove/guestagent/strategies/backup/experimental/db2_impl.py
+++ b/trove/guestagent/strategies/backup/experimental/db2_impl.py
@@ -16,7 +16,6 @@
from oslo_log import log as logging
from trove.common.db import models
from trove.common import exception
-from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.db2 import service
@@ -57,8 +56,8 @@ class DB2Backup(base.BackupRunner):
dbname})
size = size + int(out[0])
except exception.ProcessExecutionError:
- LOG.exception(_("An error occurred while trying to "
- "estimate backup size"))
+ LOG.exception("An error occurred while trying to "
+ "estimate backup size")
LOG.debug("Estimated size for databases: %d", size)
return size
@@ -130,8 +129,8 @@ class DB2OnlineBackup(DB2Backup):
log_size = log_size + int(out[0])
log_size = log_size * 1024
except exception.ProcessExecutionError:
- LOG.exception(_("An error occurred while trying to estimate log "
- "size"))
+ LOG.exception("An error occurred while trying to estimate log "
+ "size")
LOG.debug("Estimated log size for all databases: %d", log_size)
return log_size
@@ -139,8 +138,8 @@ class DB2OnlineBackup(DB2Backup):
try:
self.execute_backup_cmd(system.ONLINE_BACKUP_DB)
except exception.ProcessExecutionError:
- LOG.exception(_("An exception occurred while doing an online "
- "backup."))
+ LOG.exception("An exception occurred while doing an online "
+ "backup.")
self.cleanup()
raise
@@ -172,7 +171,7 @@ class DB2OfflineBackup(DB2Backup):
self.execute_backup_cmd(system.OFFLINE_BACKUP_DB)
service.run_command(system.UNQUIESCE_DB2)
except exception.ProcessExecutionError:
- LOG.exception(_("An exception occurred while doing an offline "
- "backup."))
+ LOG.exception("An exception occurred while doing an offline "
+ "backup.")
self.cleanup()
raise
diff --git a/trove/guestagent/strategies/backup/experimental/postgresql_impl.py b/trove/guestagent/strategies/backup/experimental/postgresql_impl.py
index 10050b5d..672223b7 100644
--- a/trove/guestagent/strategies/backup/experimental/postgresql_impl.py
+++ b/trove/guestagent/strategies/backup/experimental/postgresql_impl.py
@@ -161,13 +161,13 @@ class PgBaseBackup(base.BackupRunner, PgBaseBackupUtil):
# or direct retrieval from the pgsql backup commands, then something
# has gone wrong
if not self.start_segment or not self.start_wal_file:
- LOG.info(_("Unable to determine starting WAL file/segment"))
+ LOG.info("Unable to determine starting WAL file/segment")
return False
if not self.stop_segment or not self.stop_wal_file:
- LOG.info(_("Unable to determine ending WAL file/segment"))
+ LOG.info("Unable to determine ending WAL file/segment")
return False
if not self.label:
- LOG.info(_("No backup label found"))
+ LOG.info("No backup label found")
return False
return True
@@ -191,7 +191,7 @@ class PgBaseBackup(base.BackupRunner, PgBaseBackupUtil):
"label found: %(label)s",
{'base_filename': self.base_filename,
'label': metadata['label']})
- LOG.info(_("Metadata for backup: %s."), str(metadata))
+ LOG.info("Metadata for backup: %s.", str(metadata))
return metadata['label'] == self.base_filename
try:
diff --git a/trove/guestagent/strategies/backup/mysql_impl.py b/trove/guestagent/strategies/backup/mysql_impl.py
index f650d474..8c51fc7a 100644
--- a/trove/guestagent/strategies/backup/mysql_impl.py
+++ b/trove/guestagent/strategies/backup/mysql_impl.py
@@ -73,11 +73,11 @@ class InnoBackupEx(base.BackupRunner):
output = backup_log.read()
LOG.info(output)
if not output:
- LOG.error(_("Innobackupex log file empty."))
+ LOG.error("Innobackupex log file empty.")
return False
last_line = output.splitlines()[-1].strip()
if not re.search('completed OK!', last_line):
- LOG.error(_("Innobackupex did not complete successfully."))
+ LOG.error("Innobackupex did not complete successfully.")
return False
return True
@@ -91,7 +91,7 @@ class InnoBackupEx(base.BackupRunner):
match = lsn.search(output)
if match:
meta = {'lsn': match.group(1)}
- LOG.info(_("Metadata for backup: %s."), str(meta))
+ LOG.info("Metadata for backup: %s.", str(meta))
return meta
@property
diff --git a/trove/guestagent/strategies/replication/experimental/postgresql_impl.py b/trove/guestagent/strategies/replication/experimental/postgresql_impl.py
index d6fdf72c..f7d791b1 100644
--- a/trove/guestagent/strategies/replication/experimental/postgresql_impl.py
+++ b/trove/guestagent/strategies/replication/experimental/postgresql_impl.py
@@ -86,7 +86,7 @@ class PostgresqlReplicationStreaming(base.Replication):
extra_opts=self.repl_backup_extra_opts,
incremental_runner=self.repl_incr_backup_runner)
else:
- LOG.info(_("Using existing backup created for previous replica."))
+ LOG.info("Using existing backup created for previous replica.")
repl_user_info = self._get_or_create_replication_user(service)
@@ -185,7 +185,7 @@ class PostgresqlReplicationStreaming(base.Replication):
def detach_slave(self, service, for_failover):
"""Touch trigger file in to disable recovery mode"""
- LOG.info(_("Detaching slave, use trigger to disable recovery mode"))
+ LOG.info("Detaching slave, use trigger to disable recovery mode")
operating_system.write_file(TRIGGER_FILE, '')
operating_system.chown(TRIGGER_FILE, user=service.pgsql_owner,
group=service.pgsql_owner, as_root=True)
@@ -252,7 +252,7 @@ class PostgresqlReplicationStreaming(base.Replication):
def _write_standby_recovery_file(self, service, snapshot,
sslmode='prefer'):
- LOG.info(_("Snapshot data received: %s"), str(snapshot))
+ LOG.info("Snapshot data received: %s", str(snapshot))
logging_config = snapshot['log_position']
conninfo_params = \
diff --git a/trove/guestagent/strategies/replication/mysql_base.py b/trove/guestagent/strategies/replication/mysql_base.py
index eb18841e..549198a5 100644
--- a/trove/guestagent/strategies/replication/mysql_base.py
+++ b/trove/guestagent/strategies/replication/mysql_base.py
@@ -22,7 +22,6 @@ from oslo_utils import netutils
from trove.common import cfg
from trove.common.db.mysql import models
-from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.backup.backupagent import BackupAgent
from trove.guestagent.datastore.mysql.service import MySqlAdmin
@@ -89,7 +88,7 @@ class MysqlReplicationBase(base.Replication):
except Exception:
retry_count += 1
if retry_count > 5:
- LOG.error(_("Replication user retry count exceeded"))
+ LOG.error("Replication user retry count exceeded")
raise
return replication_user
@@ -138,7 +137,7 @@ class MysqlReplicationBase(base.Replication):
service.restart()
self.connect_to_master(service, snapshot)
except Exception:
- LOG.exception(_("Exception enabling guest as replica"))
+ LOG.exception("Exception enabling guest as replica")
raise
def detach_slave(self, service, for_failover):
diff --git a/trove/guestagent/strategies/replication/mysql_binlog.py b/trove/guestagent/strategies/replication/mysql_binlog.py
index 7a7f043f..854cd16d 100644
--- a/trove/guestagent/strategies/replication/mysql_binlog.py
+++ b/trove/guestagent/strategies/replication/mysql_binlog.py
@@ -62,9 +62,9 @@ class MysqlBinlogReplication(mysql_base.MysqlReplicationBase):
def _read_log_position(self):
INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir())
- LOG.info(_("Setting read permissions on %s"), INFO_FILE)
+ LOG.info("Setting read permissions on %s", INFO_FILE)
operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True)
- LOG.info(_("Reading log position from %s"), INFO_FILE)
+ LOG.info("Reading log position from %s", INFO_FILE)
try:
with open(INFO_FILE, 'rb') as f:
row = next(csv.reader(f, delimiter='\t',
diff --git a/trove/guestagent/strategies/replication/mysql_gtid.py b/trove/guestagent/strategies/replication/mysql_gtid.py
index 0f171dcd..8097f1ac 100644
--- a/trove/guestagent/strategies/replication/mysql_gtid.py
+++ b/trove/guestagent/strategies/replication/mysql_gtid.py
@@ -66,9 +66,9 @@ class MysqlGTIDReplication(mysql_base.MysqlReplicationBase):
def _read_last_master_gtid(self):
INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir())
- LOG.info(_("Setting read permissions on %s"), INFO_FILE)
+ LOG.info("Setting read permissions on %s", INFO_FILE)
operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True)
- LOG.info(_("Reading last master GTID from %s"), INFO_FILE)
+ LOG.info("Reading last master GTID from %s", INFO_FILE)
try:
with open(INFO_FILE, 'rb') as f:
row = f.read().split('\t')
diff --git a/trove/guestagent/strategies/restore/experimental/couchbase_impl.py b/trove/guestagent/strategies/restore/experimental/couchbase_impl.py
index f2aad173..674c3d98 100644
--- a/trove/guestagent/strategies/restore/experimental/couchbase_impl.py
+++ b/trove/guestagent/strategies/restore/experimental/couchbase_impl.py
@@ -21,7 +21,6 @@ import time
from oslo_log import log as logging
from trove.common import exception
-from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.couchbase import service
@@ -46,7 +45,7 @@ class CbBackup(base.RestoreRunner):
try:
operating_system.remove(system.COUCHBASE_DUMP_DIR, force=True)
except exception.ProcessExecutionError:
- LOG.exception(_("Error during pre-restore phase."))
+ LOG.exception("Error during pre-restore phase.")
raise
def post_restore(self):
@@ -191,7 +190,7 @@ class CbBackup(base.RestoreRunner):
# cbrestore fails or hangs at times:
# http://www.couchbase.com/issues/browse/MB-10832
# Retrying typically works
- LOG.exception(_("cbrestore failed. Retrying..."))
+ LOG.exception("cbrestore failed. Retrying...")
utils.execute_with_timeout(restore_cmd,
shell=True,
timeout=300)
diff --git a/trove/guestagent/strategies/restore/experimental/db2_impl.py b/trove/guestagent/strategies/restore/experimental/db2_impl.py
index 14a4a5e3..7cf2cb33 100644
--- a/trove/guestagent/strategies/restore/experimental/db2_impl.py
+++ b/trove/guestagent/strategies/restore/experimental/db2_impl.py
@@ -16,7 +16,6 @@
from oslo_log import log as logging
from trove.common import exception
-from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.datastore.experimental.db2 import service
from trove.guestagent.datastore.experimental.db2 import system
@@ -47,7 +46,7 @@ class DB2Backup(base.RestoreRunner):
out, err = utils.execute_with_timeout(system.GET_DB_NAMES,
shell=True)
except exception.ProcessExecutionError:
- LOG.exception(_("Couldn't find any databases."))
+ LOG.exception("Couldn't find any databases.")
dbNames = out.split()
for dbName in dbNames:
@@ -56,7 +55,7 @@ class DB2Backup(base.RestoreRunner):
service.run_command(system.ROLL_FORWARD_DB % {'dbname':
dbName})
- LOG.info(_("Cleaning out restore location: %s."),
+ LOG.info("Cleaning out restore location: %s.",
system.DB2_BACKUP_DIR)
service.remove_db2_dir(system.DB2_BACKUP_DIR)
diff --git a/trove/guestagent/strategies/restore/experimental/postgresql_impl.py b/trove/guestagent/strategies/restore/experimental/postgresql_impl.py
index 405c9c36..f5080c73 100644
--- a/trove/guestagent/strategies/restore/experimental/postgresql_impl.py
+++ b/trove/guestagent/strategies/restore/experimental/postgresql_impl.py
@@ -20,7 +20,6 @@ from eventlet.green import subprocess
from oslo_log import log as logging
from trove.common import cfg
-from trove.common.i18n import _
from trove.common import stream_codecs
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
@@ -68,7 +67,7 @@ class PgDump(base.RestoreRunner):
content_length += len(chunk)
process.stdin.close()
self._handle_errors(process)
- LOG.info(_("Restored %s bytes from stream."), content_length)
+ LOG.info("Restored %s bytes from stream.", content_length)
return content_length
@@ -119,7 +118,7 @@ class PgBaseBackup(base.RestoreRunner):
def pre_restore(self):
self.app.stop_db()
- LOG.info(_("Preparing WAL archive dir"))
+ LOG.info("Preparing WAL archive dir")
self.app.recreate_wal_archive_dir()
datadir = self.app.pgsql_data_dir
operating_system.remove(datadir, force=True, recursive=True,
@@ -178,7 +177,7 @@ class PgBaseBackupIncremental(PgBaseBackup):
metadata = self.storage.load_metadata(location, checksum)
if 'parent_location' in metadata:
- LOG.info(_("Found parent at %s"), metadata['parent_location'])
+ LOG.info("Found parent at %s", metadata['parent_location'])
parent_location = metadata['parent_location']
parent_checksum = metadata['parent_checksum']
self._incremental_restore(parent_location, parent_checksum)
@@ -187,7 +186,7 @@ class PgBaseBackupIncremental(PgBaseBackup):
else:
# For the parent base backup, revert to the default restore cmd
- LOG.info(_("Recursed back to full backup."))
+ LOG.info("Recursed back to full backup.")
super(PgBaseBackupIncremental, self).pre_restore()
cmd = self._incremental_restore_cmd(incr=False)
diff --git a/trove/guestagent/strategies/restore/experimental/redis_impl.py b/trove/guestagent/strategies/restore/experimental/redis_impl.py
index e48b43ab..14dff436 100644
--- a/trove/guestagent/strategies/restore/experimental/redis_impl.py
+++ b/trove/guestagent/strategies/restore/experimental/redis_impl.py
@@ -16,7 +16,6 @@
import os
from oslo_log import log as logging
-from trove.common.i18n import _
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.redis import service
@@ -44,7 +43,7 @@ class RedisBackup(base.RestoreRunner):
def pre_restore(self):
self.app.stop_db()
- LOG.info(_("Removing old persistence file: %s."),
+ LOG.info("Removing old persistence file: %s.",
self.restore_location)
operating_system.remove(self.restore_location, force=True,
as_root=True)
diff --git a/trove/guestagent/strategies/restore/mysql_impl.py b/trove/guestagent/strategies/restore/mysql_impl.py
index ebc5ca38..27faef4e 100644
--- a/trove/guestagent/strategies/restore/mysql_impl.py
+++ b/trove/guestagent/strategies/restore/mysql_impl.py
@@ -24,7 +24,6 @@ import pexpect
from trove.common import cfg
from trove.common import exception
-from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
@@ -58,7 +57,7 @@ class MySQLRestoreMixin(object):
def mysql_is_not_running(self):
try:
utils.execute_with_timeout("/usr/bin/pgrep", "mysqld")
- LOG.info(_("MySQL is still running."))
+ LOG.info("MySQL is still running.")
return False
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
@@ -92,9 +91,9 @@ class MySQLRestoreMixin(object):
try:
index = child.expect(['Starting mysqld daemon'])
if index == 0:
- LOG.info(_("Starting MySQL"))
+ LOG.info("Starting MySQL")
except pexpect.TIMEOUT:
- LOG.exception(_("Got a timeout launching mysqld_safe"))
+ LOG.exception("Got a timeout launching mysqld_safe")
finally:
# There is a race condition here where we kill mysqld before
# the init file been executed. We need to ensure mysqld is up.
@@ -110,7 +109,7 @@ class MySQLRestoreMixin(object):
raise base.RestoreError("Reset root password failed: %s"
% first_err_message)
- LOG.info(_("Root password reset successfully."))
+ LOG.info("Root password reset successfully.")
LOG.debug("Cleaning up the temp mysqld process.")
utils.execute_with_timeout("mysqladmin", "-uroot",
"--protocol=tcp", "shutdown")
@@ -206,7 +205,7 @@ class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
def pre_restore(self):
self.app.stop_db()
- LOG.info(_("Cleaning out restore location: %s."),
+ LOG.info("Cleaning out restore location: %s.",
self.restore_location)
operating_system.chmod(self.restore_location, FileMode.SET_FULL,
as_root=True)
@@ -215,7 +214,7 @@ class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
def _run_prepare(self):
LOG.debug("Running innobackupex prepare: %s.", self.prepare_cmd)
self.prep_retcode = utils.execute(self.prepare_cmd, shell=True)
- LOG.info(_("Innobackupex prepare finished successfully."))
+ LOG.info("Innobackupex prepare finished successfully.")
def post_restore(self):
self._run_prepare()
@@ -271,7 +270,7 @@ class InnoBackupExIncremental(InnoBackupEx):
prepare_cmd = self._incremental_prepare_cmd(incremental_dir)
LOG.debug("Running innobackupex prepare: %s.", prepare_cmd)
utils.execute(prepare_cmd, shell=True)
- LOG.info(_("Innobackupex prepare finished successfully."))
+ LOG.info("Innobackupex prepare finished successfully.")
def _incremental_restore(self, location, checksum):
"""Recursively apply backups from all parents.
@@ -286,8 +285,8 @@ class InnoBackupExIncremental(InnoBackupEx):
metadata = self.storage.load_metadata(location, checksum)
incremental_dir = None
if 'parent_location' in metadata:
- LOG.info(_("Restoring parent: %(parent_location)s"
- " checksum: %(parent_checksum)s."), metadata)
+ LOG.info("Restoring parent: %(parent_location)s"
+ " checksum: %(parent_checksum)s.", metadata)
parent_location = metadata['parent_location']
parent_checksum = metadata['parent_checksum']
# Restore parents recursively so backup are applied sequentially
diff --git a/trove/guestagent/volume.py b/trove/guestagent/volume.py
index 9c15f19a..20706b78 100644
--- a/trove/guestagent/volume.py
+++ b/trove/guestagent/volume.py
@@ -32,9 +32,20 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
-def log_and_raise(message):
- LOG.exception(message)
- raise_msg = message + _("\nExc: %s") % traceback.format_exc()
+# We removed all translation for messages destinated to log file.
+# However we cannot use _(xxx) instead of _("xxxx") because of the
+# H701 pep8 checking, so we have to pass different message format
+# string and format content here.
+def log_and_raise(log_fmt, exc_fmt, fmt_content=None):
+ if fmt_content is not None:
+ LOG.exception(log_fmt, fmt_content)
+ raise_msg = exc_fmt % fmt_content
+ else:
+ # if fmt_content is not provided, log_fmt and
+ # exc_fmt are just plain string messages
+ LOG.exception(log_fmt)
+ raise_msg = exc_fmt
+ raise_msg += _("\nExc: %s") % traceback.format_exc()
raise exception.GuestError(original_message=raise_msg)
@@ -59,8 +70,9 @@ class VolumeDevice(object):
"--sparse", source_dir, target_dir,
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
- msg = _("Could not migrate data.")
- log_and_raise(msg)
+ log_msg = "Could not migrate data."
+ exc_msg = _("Could not migrate date.")
+ log_and_raise(log_msg, exc_msg)
self.unmount(TMP_MOUNT_POINT)
def _check_device_exists(self):
@@ -78,8 +90,9 @@ class VolumeDevice(object):
run_as_root=True, root_helper="sudo",
attempts=num_tries)
except exception.ProcessExecutionError:
- msg = _("Device '%s' is not ready.") % self.device_path
- log_and_raise(msg)
+ log_fmt = "Device '%s' is not ready."
+ exc_fmt = _("Device '%s' is not ready.")
+ log_and_raise(log_fmt, exc_fmt, self.device_path)
def _check_format(self):
"""Checks that a volume is formatted."""
@@ -95,11 +108,13 @@ class VolumeDevice(object):
except exception.ProcessExecutionError as pe:
if 'Wrong magic number' in pe.stderr:
volume_fstype = CONF.volume_fstype
- msg = _("'Device '%(dev)s' did not seem to be '%(type)s'.") % (
- {'dev': self.device_path, 'type': volume_fstype})
- log_and_raise(msg)
- msg = _("Volume '%s' was not formatted.") % self.device_path
- log_and_raise(msg)
+ log_fmt = "'Device '%(dev)s' did not seem to be '%(type)s'."
+ exc_fmt = _("'Device '%(dev)s' did not seem to be '%(type)s'.")
+ log_and_raise(log_fmt, exc_fmt, {'dev': self.device_path,
+ 'type': volume_fstype})
+ log_fmt = "Volume '%s' was not formatted."
+ exc_fmt = _("Volume '%s' was not formatted.")
+ log_and_raise(log_fmt, exc_fmt, self.device_path)
def _format(self):
"""Calls mkfs to format the device at device_path."""
@@ -114,8 +129,9 @@ class VolumeDevice(object):
run_as_root=True, root_helper="sudo",
timeout=volume_format_timeout)
except exception.ProcessExecutionError:
- msg = _("Could not format '%s'.") % self.device_path
- log_and_raise(msg)
+ log_fmt = "Could not format '%s'."
+ exc_fmt = _("Could not format '%s'.")
+ log_and_raise(log_fmt, exc_fmt, self.device_path)
def format(self):
"""Formats the device at device_path and checks the filesystem."""
@@ -161,9 +177,9 @@ class VolumeDevice(object):
utils.execute("resize2fs", self.device_path,
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
- msg = _("Error resizing the filesystem with device '%s'.") % (
- self.device_path)
- log_and_raise(msg)
+ log_fmt = "Error resizing the filesystem with device '%s'."
+ exc_fmt = _("Error resizing the filesystem with device '%s'.")
+ log_and_raise(log_fmt, exc_fmt, self.device_path)
def unmount(self, mount_point):
if operating_system.is_mount(mount_point):
@@ -171,8 +187,9 @@ class VolumeDevice(object):
utils.execute("umount", mount_point,
run_as_root=True, root_helper='sudo')
except exception.ProcessExecutionError:
- msg = _("Error unmounting '%s'.") % mount_point
- log_and_raise(msg)
+ log_fmt = "Error unmounting '%s'."
+ exc_fmt = _("Error unmounting '%s'.")
+ log_and_raise(log_fmt, exc_fmt, mount_point)
else:
LOG.debug("'%s' is not a mounted fs, cannot unmount", mount_point)
@@ -180,8 +197,8 @@ class VolumeDevice(object):
# unmount if device is already mounted
mount_points = self.mount_points(device_path)
for mnt in mount_points:
- LOG.info(_("Device '%(device)s' is mounted on "
- "'%(mount_point)s'. Unmounting now."),
+ LOG.info("Device '%(device)s' is mounted on "
+ "'%(mount_point)s'. Unmounting now.",
{'device': device_path, 'mount_point': mnt})
self.unmount(mnt)
@@ -200,10 +217,12 @@ class VolumeDevice(object):
readahead_size, self.device_path,
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
- msg = _("Error setting readahead size to %(size)s "
- "for device %(device)s.") % {
- 'size': readahead_size, 'device': self.device_path}
- log_and_raise(msg)
+ log_fmt = ("Error setting readahead size to %(size)s "
+ "for device %(device)s.")
+ exc_fmt = _("Error setting readahead size to %(size)s "
+ "for device %(device)s.")
+ log_and_raise(log_fmt, exc_fmt, {'size': readahead_size,
+ 'device': self.device_path})
class VolumeMountPoint(object):
@@ -228,8 +247,9 @@ class VolumeMountPoint(object):
self.device_path, self.mount_point,
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
- msg = _("Could not mount '%s'.") % self.mount_point
- log_and_raise(msg)
+ log_fmt = "Could not mount '%s'."
+ exc_fmt = _("Could not mount '%s'.")
+ log_and_raise(log_fmt, exc_fmt, self.mount_point)
def write_to_fstab(self):
fstab_line = ("%s\t%s\t%s\t%s\t0\t0" %
@@ -245,6 +265,7 @@ class VolumeMountPoint(object):
"-m", "644", tempfstab.name, "/etc/fstab",
run_as_root=True, root_helper="sudo")
except exception.ProcessExecutionError:
- msg = _("Could not add '%s' to fstab.") % self.mount_point
- log_and_raise(msg)
+ log_fmt = "Could not add '%s' to fstab."
+ exc_fmt = _("Could not add '%s' to fstab.")
+ log_and_raise(log_fmt, exc_fmt, self.mount_point)
os.remove(tempfstab.name)
diff --git a/trove/hacking/checks.py b/trove/hacking/checks.py
index 0ca2f883..26d30ec9 100644
--- a/trove/hacking/checks.py
+++ b/trove/hacking/checks.py
@@ -14,30 +14,19 @@ import re
import pep8
-_all_log_levels = {
- 'critical': '_',
- 'error': '_',
- 'exception': '_',
- 'info': '_',
- 'reserved': '_',
- 'warning': '_',
-}
-_all_hints = set(_all_log_levels.values())
+_all_log_levels = (
+ 'critical',
+ 'debug',
+ 'error',
+ 'exception',
+ 'info',
+ 'reserved',
+ 'warning',
+)
-
-def _regex_for_level(level, hint):
- return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
- 'level': level,
- 'wrong_hints': '|'.join(_all_hints - set([hint])),
- }
-
-
-_log_translation_hint = re.compile(
- '|'.join('(?:%s)' % _regex_for_level(level, hint)
- for level, hint in _all_log_levels.items()))
-
-_log_string_interpolation = re.compile(
- r".*LOG\.(error|warning|info|critical|exception|debug)\([^,]*%[^,]*[,)]")
+_translated_log = re.compile(
+ r".*LOG\.(%(levels)s)\(\s*_\(\s*('|\")" % {
+ 'levels': '|'.join(_all_log_levels)})
def _translation_is_not_expected(filename):
@@ -45,44 +34,6 @@ def _translation_is_not_expected(filename):
return any(pat in filename for pat in ["/tests/"])
-def validate_log_translations(logical_line, physical_line, filename):
- """T101 - Log messages require translation hints.
- :param logical_line: The logical line to check.
- :param physical_line: The physical line to check.
- :param filename: The file name where the logical line exists.
- :returns: None if the logical line passes the check, otherwise a tuple
- is yielded that contains the offending index in logical line and a
- message describe the check validation failure.
- """
- if _translation_is_not_expected(filename):
- return
-
- if pep8.noqa(physical_line):
- return
-
- msg = "T101: Untranslated Log message."
- if _log_translation_hint.match(logical_line):
- yield (0, msg)
-
-
-def no_translate_debug_logs(logical_line, filename):
- """T102 - Don't translate debug level logs.
- Check for 'LOG.debug(_(' and 'LOG.debug(_Lx('
- As per our translation policy,
- https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
- we shouldn't translate debug level logs.
- * This check assumes that 'LOG' is a logger.
- :param logical_line: The logical line to check.
- :param filename: The file name where the logical line exists.
- :returns: None if the logical line passes the check, otherwise a tuple
- is yielded that contains the offending index in logical line and a
- message describe the check validation failure.
- """
- for hint in _all_hints:
- if logical_line.startswith("LOG.debug(%s(" % hint):
- yield(0, "T102 Don't translate debug level logs")
-
-
def check_raised_localized_exceptions(logical_line, filename):
"""T103 - Untranslated exception message.
:param logical_line: The logical line to check.
@@ -115,6 +66,27 @@ def check_no_basestring(logical_line):
yield(0, msg)
+def no_translate_logs(logical_line, physical_line, filename):
+ """T105 - Log messages shouldn't be translated from the
+ Pike release.
+ :param logical_line: The logical line to check.
+ :param physical_line: The physical line to check.
+ :param filename: The file name where the logical line exists.
+ :returns: None if the logical line passes the check, otherwise a tuple
+ is yielded that contains the offending index in logical line and a
+ message describe the check validation failure.
+ """
+ if _translation_is_not_expected(filename):
+ return
+
+ if pep8.noqa(physical_line):
+ return
+
+ msg = "T105: Log message shouldn't be translated."
+ if _translated_log.match(logical_line):
+ yield (0, msg)
+
+
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
@@ -130,8 +102,7 @@ def assert_raises_regexp(logical_line):
def factory(register):
- register(validate_log_translations)
- register(no_translate_debug_logs)
register(check_raised_localized_exceptions)
register(check_no_basestring)
+ register(no_translate_logs)
register(assert_raises_regexp)
diff --git a/trove/instance/models.py b/trove/instance/models.py
index de89bd56..dbc9487d 100644
--- a/trove/instance/models.py
+++ b/trove/instance/models.py
@@ -30,7 +30,7 @@ from trove.common import cfg
from trove.common import crypto_utils as cu
from trove.common import exception
from trove.common.glance_remote import create_glance_client
-from trove.common.i18n import _, _LE, _LI, _LW
+from trove.common.i18n import _
import trove.common.instance as tr_instance
from trove.common.notification import StartNotification
from trove.common.remote import create_cinder_client
@@ -84,7 +84,7 @@ def load_server(context, instance_id, server_id, region_name):
try:
server = client.servers.get(server_id)
except nova_exceptions.NotFound:
- LOG.error(_LE("Could not find nova server_id(%s)."), server_id)
+ LOG.error("Could not find nova server_id(%s).", server_id)
raise exception.ComputeInstanceNotFound(instance_id=instance_id,
server_id=server_id)
except nova_exceptions.ClientException as e:
@@ -353,8 +353,8 @@ class SimpleInstance(object):
if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED"]:
return InstanceStatus.SHUTDOWN
else:
- LOG.error(_LE("While shutting down instance (%(instance)s): "
- "server had status (%(status)s)."),
+ LOG.error("While shutting down instance (%(instance)s): "
+ "server had status (%(status)s).",
{'instance': self.id,
'status': self.db_info.server_status})
return InstanceStatus.ERROR
@@ -514,7 +514,7 @@ def load_any_instance(context, id, load_server=True):
return load_instance(BuiltInstance, context, id,
needs_server=load_server)
except exception.UnprocessableEntity:
- LOG.warning(_LW("Could not load instance %s."), id)
+ LOG.warning("Could not load instance %s.", id)
return load_instance(FreshInstance, context, id, needs_server=False)
@@ -536,7 +536,7 @@ def load_instance(cls, context, id, needs_server=False,
db_info.server_status = server.status
db_info.addresses = server.addresses
except exception.ComputeInstanceNotFound:
- LOG.error(_LE("Could not load compute instance %s."),
+ LOG.error("Could not load compute instance %s.",
db_info.compute_instance_id)
raise exception.UnprocessableEntity("Instance %s is not ready." %
id)
@@ -637,9 +637,9 @@ class BaseInstance(SimpleInstance):
raise exception.ClusterInstanceOperationNotSupported()
if self.slaves:
- msg = _("Detach replicas before deleting replica source.")
- LOG.warning(msg)
- raise exception.ReplicaSourceDeleteForbidden(msg)
+ LOG.warning("Detach replicas before deleting replica source.")
+ raise exception.ReplicaSourceDeleteForbidden(
+ _("Detach replicas before deleting replica source."))
self.update_db(task_status=InstanceTasks.DELETING,
configuration_id=None)
@@ -710,7 +710,7 @@ class BaseInstance(SimpleInstance):
return self._volume_client
def reset_task_status(self):
- LOG.info(_LI("Resetting task status to NONE on instance %s."),
+ LOG.info("Resetting task status to NONE on instance %s.",
self.id)
self.update_db(task_status=InstanceTasks.NONE)
@@ -758,7 +758,7 @@ class BaseInstance(SimpleInstance):
return files
def reset_status(self):
- LOG.info(_LI("Resetting the status to ERROR on instance %s."),
+ LOG.info("Resetting the status to ERROR on instance %s.",
self.id)
self.reset_task_status()
@@ -977,8 +977,8 @@ class Instance(BuiltInstance):
replica_source_instance.validate_can_perform_action()
except exception.ModelNotFoundError:
LOG.exception(
- _("Cannot create a replica of %(id)s "
- "as that instance could not be found."),
+ "Cannot create a replica of %(id)s "
+ "as that instance could not be found.",
{'id': slave_of_id})
raise exception.NotFound(uuid=slave_of_id)
elif replica_count and replica_count != 1:
@@ -1113,8 +1113,8 @@ class Instance(BuiltInstance):
def resize_flavor(self, new_flavor_id):
self.validate_can_perform_action()
- LOG.info(_LI("Resizing instance %(instance_id)s flavor to "
- "%(flavor_id)s."),
+ LOG.info("Resizing instance %(instance_id)s flavor to "
+ "%(flavor_id)s.",
{'instance_id': self.id, 'flavor_id': new_flavor_id})
if self.db_info.cluster_id is not None:
raise exception.ClusterInstanceOperationNotSupported()
@@ -1149,7 +1149,7 @@ class Instance(BuiltInstance):
def resize_volume(self, new_size):
def _resize_resources():
self.validate_can_perform_action()
- LOG.info(_LI("Resizing volume of instance %s."), self.id)
+ LOG.info("Resizing volume of instance %s.", self.id)
if self.db_info.cluster_id is not None:
raise exception.ClusterInstanceOperationNotSupported()
old_size = self.volume_size
@@ -1172,7 +1172,7 @@ class Instance(BuiltInstance):
def reboot(self):
self.validate_can_perform_action()
- LOG.info(_LI("Rebooting instance %s."), self.id)
+ LOG.info("Rebooting instance %s.", self.id)
if self.db_info.cluster_id is not None and not self.context.is_admin:
raise exception.ClusterInstanceOperationNotSupported()
self.update_db(task_status=InstanceTasks.REBOOTING)
@@ -1180,7 +1180,7 @@ class Instance(BuiltInstance):
def restart(self):
self.validate_can_perform_action()
- LOG.info(_LI("Restarting datastore on instance %s."), self.id)
+ LOG.info("Restarting datastore on instance %s.", self.id)
if self.db_info.cluster_id is not None and not self.context.is_admin:
raise exception.ClusterInstanceOperationNotSupported()
# Set our local status since Nova might not change it quick enough.
@@ -1194,7 +1194,7 @@ class Instance(BuiltInstance):
def detach_replica(self):
self.validate_can_perform_action()
- LOG.info(_LI("Detaching instance %s from its replication source."),
+ LOG.info("Detaching instance %s from its replication source.",
self.id)
if not self.slave_of_id:
raise exception.BadRequest(_("Instance %s is not a replica.")
@@ -1206,7 +1206,7 @@ class Instance(BuiltInstance):
def promote_to_replica_source(self):
self.validate_can_perform_action()
- LOG.info(_LI("Promoting instance %s to replication source."), self.id)
+ LOG.info("Promoting instance %s to replication source.", self.id)
if not self.slave_of_id:
raise exception.BadRequest(_("Instance %s is not a replica.")
% self.id)
@@ -1221,7 +1221,7 @@ class Instance(BuiltInstance):
def eject_replica_source(self):
self.validate_can_perform_action()
- LOG.info(_LI("Ejecting replica source %s from its replication set."),
+ LOG.info("Ejecting replica source %s from its replication set.",
self.id)
if not self.slaves:
@@ -1244,8 +1244,8 @@ class Instance(BuiltInstance):
def migrate(self, host=None):
self.validate_can_perform_action()
- LOG.info(_LI("Migrating instance id = %(instance_id)s "
- "to host = %(host)s."),
+ LOG.info("Migrating instance id = %(instance_id)s "
+ "to host = %(host)s.",
{'instance_id': self.id, 'host': host})
self.update_db(task_status=InstanceTasks.MIGRATING)
task_api.API(self.context).migrate(self.id, host)
@@ -1270,13 +1270,18 @@ class Instance(BuiltInstance):
# action can be performed
return
- msg = (_("Instance %(instance_id)s is not currently available for an "
- "action to be performed (%(status_type)s status was "
- "%(action_status)s).") % {'instance_id': self.id,
- 'status_type': status_type,
- 'action_status': status})
- LOG.error(msg)
- raise exception.UnprocessableEntity(msg)
+ log_fmt = ("Instance %(instance_id)s is not currently available for "
+ "an action to be performed (%(status_type)s status was "
+ "%(action_status)s).")
+ exc_fmt = _("Instance %(instance_id)s is not currently available for "
+ "an action to be performed (%(status_type)s status was "
+ "%(action_status)s).")
+ msg_content = {
+ 'instance_id': self.id,
+ 'status_type': status_type,
+ 'action_status': status}
+ LOG.error(log_fmt, msg_content)
+ raise exception.UnprocessableEntity(exc_fmt % msg_content)
def _validate_can_perform_assign(self):
"""
@@ -1457,9 +1462,9 @@ def create_server_list_matcher(server_list):
instance_id=instance_id, server_id=server_id)
else:
# Should never happen, but never say never.
- LOG.error(_LE("Server %(server)s for instance %(instance)s was "
- "found twice!"), {'server': server_id,
- 'instance': instance_id})
+ LOG.error("Server %(server)s for instance %(instance)s was "
+ "found twice!", {'server': server_id,
+ 'instance': instance_id})
raise exception.TroveError(uuid=instance_id)
return find_server
@@ -1553,14 +1558,14 @@ class Instances(object):
datastore_status = InstanceServiceStatus.find_by(
instance_id=db.id)
if not datastore_status.status: # This should never happen.
- LOG.error(_LE("Server status could not be read for "
- "instance id(%s)."), db.id)
+ LOG.error("Server status could not be read for "
+ "instance id(%s).", db.id)
continue
LOG.debug("Server api_status(%s).",
datastore_status.status.api_status)
except exception.ModelNotFoundError:
- LOG.error(_LE("Server status could not be read for "
- "instance id(%s)."), db.id)
+ LOG.error("Server status could not be read for "
+ "instance id(%s).", db.id)
continue
ret.append(load_instance(context, db, datastore_status,
server=server))
diff --git a/trove/instance/service.py b/trove/instance/service.py
index 3ebfe35a..9d1f4b57 100644
--- a/trove/instance/service.py
+++ b/trove/instance/service.py
@@ -22,7 +22,6 @@ import trove.common.apischema as apischema
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
-from trove.common.i18n import _LI
from trove.common import notification
from trove.common.notification import StartNotification
from trove.common import pagination
@@ -97,8 +96,8 @@ class InstanceController(wsgi.Controller):
if key in _actions:
selected_action = _actions[key]
action_name = key
- LOG.info(_LI("Performing %(action_name)s action against "
- "instance %(instance_id)s for tenant '%(tenant_id)s'"),
+ LOG.info("Performing %(action_name)s action against "
+ "instance %(instance_id)s for tenant '%(tenant_id)s'",
{'action_name': action_name, 'instance_id': id,
'tenant_id': tenant_id})
needs_server = True
@@ -195,7 +194,7 @@ class InstanceController(wsgi.Controller):
def index(self, req, tenant_id):
"""Return all instances."""
- LOG.info(_LI("Listing database instances for tenant '%s'"), tenant_id)
+ LOG.info("Listing database instances for tenant '%s'", tenant_id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
policy.authorize_on_tenant(context, 'instance:index')
@@ -209,7 +208,7 @@ class InstanceController(wsgi.Controller):
def backups(self, req, tenant_id, id):
"""Return all backups for the specified instance."""
- LOG.info(_LI("Listing backups for instance '%s'"),
+ LOG.info("Listing backups for instance '%s'",
id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
@@ -225,8 +224,8 @@ class InstanceController(wsgi.Controller):
def show(self, req, tenant_id, id):
"""Return a single instance."""
- LOG.info(_LI("Showing database instance '%(instance_id)s' for tenant "
- "'%(tenant_id)s'"),
+ LOG.info("Showing database instance '%(instance_id)s' for tenant "
+ "'%(tenant_id)s'",
{'instance_id': id, 'tenant_id': tenant_id})
LOG.debug("req : '%s'\n\n", req)
@@ -239,8 +238,8 @@ class InstanceController(wsgi.Controller):
def delete(self, req, tenant_id, id):
"""Delete a single instance."""
- LOG.info(_LI("Deleting database instance '%(instance_id)s' for tenant "
- "'%(tenant_id)s'"),
+ LOG.info("Deleting database instance '%(instance_id)s' for tenant "
+ "'%(tenant_id)s'",
{'instance_id': id, 'tenant_id': tenant_id})
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
@@ -264,7 +263,7 @@ class InstanceController(wsgi.Controller):
def create(self, req, body, tenant_id):
# TODO(hub-cap): turn this into middleware
- LOG.info(_LI("Creating a database instance for tenant '%s'"),
+ LOG.info("Creating a database instance for tenant '%s'",
tenant_id)
LOG.debug("req : '%s'\n\n", strutils.mask_password(req))
LOG.debug("body : '%s'\n\n", strutils.mask_password(body))
@@ -404,8 +403,8 @@ class InstanceController(wsgi.Controller):
def update(self, req, id, body, tenant_id):
"""Updates the instance to attach/detach configuration."""
- LOG.info(_LI("Updating database instance '%(instance_id)s' for tenant "
- "'%(tenant_id)s'"),
+ LOG.info("Updating database instance '%(instance_id)s' for tenant "
+ "'%(tenant_id)s'",
{'instance_id': id, 'tenant_id': tenant_id})
LOG.debug("req: %s", req)
LOG.debug("body: %s", body)
@@ -424,7 +423,7 @@ class InstanceController(wsgi.Controller):
"""
Updates the instance to set or unset one or more attributes.
"""
- LOG.info(_LI("Editing instance for tenant id %s."), tenant_id)
+ LOG.info("Editing instance for tenant id %s.", tenant_id)
LOG.debug("req: %s", strutils.mask_password(req))
LOG.debug("body: %s", strutils.mask_password(body))
context = req.environ[wsgi.CONTEXT_KEY]
@@ -451,7 +450,7 @@ class InstanceController(wsgi.Controller):
"""
Returns the default configuration template applied to the instance.
"""
- LOG.info(_LI("Getting default configuration for instance %s"), id)
+ LOG.info("Getting default configuration for instance %s", id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'configuration', instance)
@@ -477,7 +476,7 @@ class InstanceController(wsgi.Controller):
def guest_log_action(self, req, body, tenant_id, id):
"""Processes a guest log."""
- LOG.info(_("Processing log for tenant %s"), tenant_id)
+ LOG.info("Processing log for tenant %s", tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if not instance:
diff --git a/trove/module/models.py b/trove/module/models.py
index 80df8cb7..b9962aa6 100644
--- a/trove/module/models.py
+++ b/trove/module/models.py
@@ -161,7 +161,7 @@ class Module(object):
datastore_version, auto_apply, visible, live_update,
priority_apply, apply_order, full_access):
if module_type.lower() not in Modules.VALID_MODULE_TYPES:
- LOG.error(_("Valid module types: %s"), Modules.VALID_MODULE_TYPES)
+ LOG.error("Valid module types: %s", Modules.VALID_MODULE_TYPES)
raise exception.ModuleTypeNotFound(module_type=module_type)
Module.validate_action(
context, 'create', tenant_id, auto_apply, visible, priority_apply,
diff --git a/trove/module/service.py b/trove/module/service.py
index fab0a957..e9145d03 100644
--- a/trove/module/service.py
+++ b/trove/module/service.py
@@ -64,7 +64,7 @@ class ModuleController(wsgi.Controller):
return wsgi.Result(view.data(), 200)
def show(self, req, tenant_id, id):
- LOG.info(_("Showing module %s."), id)
+ LOG.info("Showing module %s.", id)
context = req.environ[wsgi.CONTEXT_KEY]
module = models.Module.load(context, id)
@@ -78,7 +78,7 @@ class ModuleController(wsgi.Controller):
def create(self, req, body, tenant_id):
name = body['module']['name']
- LOG.info(_("Creating module '%s'"), name)
+ LOG.info("Creating module '%s'", name)
context = req.environ[wsgi.CONTEXT_KEY]
policy.authorize_on_tenant(context, 'module:create')
@@ -106,7 +106,7 @@ class ModuleController(wsgi.Controller):
return wsgi.Result(view_data.data(), 200)
def delete(self, req, tenant_id, id):
- LOG.info(_("Deleting module %s."), id)
+ LOG.info("Deleting module %s.", id)
context = req.environ[wsgi.CONTEXT_KEY]
module = models.Module.load(context, id)
@@ -115,7 +115,7 @@ class ModuleController(wsgi.Controller):
return wsgi.Result(None, 200)
def update(self, req, body, tenant_id, id):
- LOG.info(_("Updating module %s."), id)
+ LOG.info("Updating module %s.", id)
context = req.environ[wsgi.CONTEXT_KEY]
module = models.Module.load(context, id)
@@ -173,7 +173,7 @@ class ModuleController(wsgi.Controller):
return wsgi.Result(view_data.data(), 200)
def instances(self, req, tenant_id, id):
- LOG.info(_("Getting instances for module %s."), id)
+ LOG.info("Getting instances for module %s.", id)
context = req.environ[wsgi.CONTEXT_KEY]
@@ -206,7 +206,7 @@ class ModuleController(wsgi.Controller):
return wsgi.Result(result_list, 200)
def reapply(self, req, body, tenant_id, id):
- LOG.info(_("Reapplying module %s to all instances."), id)
+ LOG.info("Reapplying module %s to all instances.", id)
context = req.environ[wsgi.CONTEXT_KEY]
md5 = None
diff --git a/trove/network/neutron.py b/trove/network/neutron.py
index 3a5f59dc..1846f126 100644
--- a/trove/network/neutron.py
+++ b/trove/network/neutron.py
@@ -18,7 +18,6 @@ from neutronclient.common import exceptions as neutron_exceptions
from oslo_log import log as logging
from trove.common import exception
-from trove.common.i18n import _
from trove.common import remote
from trove.network import base
@@ -52,7 +51,7 @@ class NeutronDriver(base.NetworkDriver):
try:
return self.client.show_security_group(security_group=group_id)
except neutron_exceptions.NeutronClientException as e:
- LOG.exception(_('Failed to get remote security group'))
+ LOG.exception('Failed to get remote security group')
raise exception.TroveError(str(e))
def create_security_group(self, name, description):
@@ -64,14 +63,14 @@ class NeutronDriver(base.NetworkDriver):
sec_group.get('security_group', sec_group))
except neutron_exceptions.NeutronClientException as e:
- LOG.exception(_('Failed to create remote security group'))
+ LOG.exception('Failed to create remote security group')
raise exception.SecurityGroupCreationError(str(e))
def delete_security_group(self, sec_group_id):
try:
self.client.delete_security_group(security_group=sec_group_id)
except neutron_exceptions.NeutronClientException as e:
- LOG.exception(_('Failed to delete remote security group'))
+ LOG.exception('Failed to delete remote security group')
raise exception.SecurityGroupDeletionError(str(e))
def add_security_group_rule(self, sec_group_id, protocol,
@@ -96,10 +95,10 @@ class NeutronDriver(base.NetworkDriver):
except neutron_exceptions.NeutronClientException as e:
# ignore error if rule already exists
if e.status_code == 409:
- LOG.exception(_("Security group rule already exists"))
+ LOG.exception("Security group rule already exists")
else:
- LOG.exception(_('Failed to add rule to remote security '
- 'group'))
+ LOG.exception('Failed to add rule to remote security '
+ 'group')
raise exception.SecurityGroupRuleCreationError(str(e))
def delete_security_group_rule(self, sec_group_rule_id):
@@ -108,7 +107,7 @@ class NeutronDriver(base.NetworkDriver):
security_group_rule=sec_group_rule_id)
except neutron_exceptions.NeutronClientException as e:
- LOG.exception(_('Failed to delete rule to remote security group'))
+ LOG.exception('Failed to delete rule to remote security group')
raise exception.SecurityGroupRuleDeletionError(str(e))
def _convert_to_nova_security_group_format(self, security_group):
diff --git a/trove/network/nova.py b/trove/network/nova.py
index c45e9481..a66a8be4 100644
--- a/trove/network/nova.py
+++ b/trove/network/nova.py
@@ -18,7 +18,6 @@ from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.common import exception
-from trove.common.i18n import _
from trove.common import remote
from trove.network import base
@@ -39,7 +38,7 @@ class NovaNetwork(base.NetworkDriver):
try:
return self.client.security_groups.get(group_id)
except nova_exceptions.ClientException as e:
- LOG.exception(_('Failed to get remote security group'))
+ LOG.exception('Failed to get remote security group')
raise exception.TroveError(str(e))
def create_security_group(self, name, description):
@@ -48,14 +47,14 @@ class NovaNetwork(base.NetworkDriver):
name=name, description=description)
return sec_group
except nova_exceptions.ClientException as e:
- LOG.exception(_('Failed to create remote security group'))
+ LOG.exception('Failed to create remote security group')
raise exception.SecurityGroupCreationError(str(e))
def delete_security_group(self, sec_group_id):
try:
self.client.security_groups.delete(sec_group_id)
except nova_exceptions.ClientException as e:
- LOG.exception(_('Failed to delete remote security group'))
+ LOG.exception('Failed to delete remote security group')
raise exception.SecurityGroupDeletionError(str(e))
def add_security_group_rule(self, sec_group_id, protocol,
@@ -70,7 +69,7 @@ class NovaNetwork(base.NetworkDriver):
return sec_group_rule
except nova_exceptions.ClientException as e:
- LOG.exception(_('Failed to add rule to remote security group'))
+ LOG.exception('Failed to add rule to remote security group')
raise exception.SecurityGroupRuleCreationError(str(e))
def delete_security_group_rule(self, sec_group_rule_id):
@@ -78,5 +77,5 @@ class NovaNetwork(base.NetworkDriver):
self.client.security_group_rules.delete(sec_group_rule_id)
except nova_exceptions.ClientException as e:
- LOG.exception(_('Failed to delete rule to remote security group'))
+ LOG.exception('Failed to delete rule to remote security group')
raise exception.SecurityGroupRuleDeletionError(str(e))
diff --git a/trove/quota/quota.py b/trove/quota/quota.py
index ee33f886..9e1f27c5 100644
--- a/trove/quota/quota.py
+++ b/trove/quota/quota.py
@@ -21,7 +21,6 @@ from oslo_utils import importutils
import six
from trove.common import exception
-from trove.common.i18n import _
from trove.quota.models import Quota
from trove.quota.models import QuotaUsage
from trove.quota.models import Reservation
@@ -325,8 +324,8 @@ class QuotaEngine(object):
try:
self._driver.commit(reservations)
except Exception:
- LOG.exception(_("Failed to commit reservations "
- "%(reservations)s"), {'reservations': reservations})
+ LOG.exception("Failed to commit reservations "
+ "%(reservations)s", {'reservations': reservations})
def rollback(self, reservations):
"""Roll back reservations.
@@ -338,8 +337,8 @@ class QuotaEngine(object):
try:
self._driver.rollback(reservations)
except Exception:
- LOG.exception(_("Failed to roll back reservations "
- "%(reservations)s"), {'reservations': reservations})
+ LOG.exception("Failed to roll back reservations "
+ "%(reservations)s", {'reservations': reservations})
@property
def resources(self):
diff --git a/trove/taskmanager/manager.py b/trove/taskmanager/manager.py
index 4846bb26..e600e7ad 100644
--- a/trove/taskmanager/manager.py
+++ b/trove/taskmanager/manager.py
@@ -123,24 +123,30 @@ class Manager(periodic_task.PeriodicTasks):
replica.detach_replica(old_master, for_failover=True)
replica.attach_replica(master_candidate)
except exception.TroveError as ex:
- msg = (_("Unable to migrate replica %(slave)s from "
- "old replica source %(old_master)s to "
- "new source %(new_master)s on promote.") %
- {"slave": replica.id,
- "old_master": old_master.id,
- "new_master": master_candidate.id})
- LOG.exception(msg)
+ log_fmt = ("Unable to migrate replica %(slave)s from "
+ "old replica source %(old_master)s to "
+ "new source %(new_master)s on promote.")
+ exc_fmt = _("Unable to migrate replica %(slave)s from "
+ "old replica source %(old_master)s to "
+ "new source %(new_master)s on promote.")
+ msg_content = {
+ "slave": replica.id,
+ "old_master": old_master.id,
+ "new_master": master_candidate.id}
+ LOG.exception(log_fmt, msg_content)
exception_replicas.append(replica)
- error_messages += "%s (%s)\n" % (msg, ex)
+ error_messages += "%s (%s)\n" % (
+ exc_fmt % msg_content, ex)
try:
old_master.demote_replication_master()
except Exception as ex:
- msg = (_("Exception demoting old replica source %s.") %
- old_master.id)
- LOG.exception(msg)
+ log_fmt = "Exception demoting old replica source %s."
+ exc_fmt = _("Exception demoting old replica source %s.")
+ LOG.exception(log_fmt, old_master.id)
exception_replicas.append(old_master)
- error_messages += "%s (%s)\n" % (msg, ex)
+ error_messages += "%s (%s)\n" % (
+ exc_fmt % old_master.id, ex)
self._set_task_status([old_master] + replica_models,
InstanceTasks.NONE)
@@ -213,15 +219,20 @@ class Manager(periodic_task.PeriodicTasks):
replica.detach_replica(old_master, for_failover=True)
replica.attach_replica(master_candidate)
except exception.TroveError as ex:
- msg = (_("Unable to migrate replica %(slave)s from "
- "old replica source %(old_master)s to "
- "new source %(new_master)s on eject.") %
- {"slave": replica.id,
- "old_master": old_master.id,
- "new_master": master_candidate.id})
- LOG.exception(msg)
+ log_fmt = ("Unable to migrate replica %(slave)s from "
+ "old replica source %(old_master)s to "
+ "new source %(new_master)s on eject.")
+ exc_fmt = _("Unable to migrate replica %(slave)s from "
+ "old replica source %(old_master)s to "
+ "new source %(new_master)s on eject.")
+ msg_content = {
+ "slave": replica.id,
+ "old_master": old_master.id,
+ "new_master": master_candidate.id}
+ LOG.exception(log_fmt, msg_content)
exception_replicas.append(replica)
- error_messages += "%s (%s)\n" % (msg, ex)
+ error_messages += "%s (%s)\n" % (
+ exc_fmt % msg_content, ex)
self._set_task_status([old_master] + replica_models,
InstanceTasks.NONE)
@@ -321,8 +332,8 @@ class Manager(periodic_task.PeriodicTasks):
replicas.append(instance_tasks)
except Exception:
# if it's the first replica, then we shouldn't continue
- LOG.exception(_(
- "Could not create replica %(num)d of %(count)d."),
+ LOG.exception(
+ "Could not create replica %(num)d of %(count)d.",
{'num': replica_number, 'count': len(ids)})
if replica_number == 1:
raise
diff --git a/trove/taskmanager/models.py b/trove/taskmanager/models.py
index 5d449ad1..7ce8c424 100755
--- a/trove/taskmanager/models.py
+++ b/trove/taskmanager/models.py
@@ -94,7 +94,7 @@ class NotifyMixin(object):
datastore_manager_id = id_map[datastore_manager]
else:
datastore_manager_id = cfg.UNKNOWN_SERVICE_ID
- LOG.error(_("Datastore ID for Manager (%s) is not configured"),
+ LOG.error("Datastore ID for Manager (%s) is not configured",
datastore_manager)
return datastore_manager_id
@@ -280,14 +280,14 @@ class ClusterTasks(Cluster):
sleep_time=CONF.usage_sleep_time,
time_out=CONF.usage_timeout)
except PollTimeOut:
- LOG.exception(_("Timed out while waiting for all instances "
- "to become %s."), expected_status)
+ LOG.exception("Timed out while waiting for all instances "
+ "to become %s.", expected_status)
self.update_statuses_on_failure(cluster_id, shard_id)
return False
failed_ids = _instance_ids_with_failures(instance_ids)
if failed_ids:
- LOG.error(_("Some instances failed: %s"), failed_ids)
+ LOG.error("Some instances failed: %s", failed_ids)
self.update_statuses_on_failure(cluster_id, shard_id)
return False
@@ -310,7 +310,7 @@ class ClusterTasks(Cluster):
sleep_time=2,
time_out=CONF.cluster_delete_time_out)
except PollTimeOut:
- LOG.error(_("timeout for instances to be marked as deleted."))
+ LOG.error("timeout for instances to be marked as deleted.")
return
LOG.debug("setting cluster %s as deleted.", cluster_id)
@@ -350,10 +350,10 @@ class ClusterTasks(Cluster):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for restarting cluster."))
+ LOG.exception("Timeout for restarting cluster.")
raise
except Exception:
- LOG.exception(_("Error restarting cluster."), cluster_id)
+ LOG.exception("Error restarting cluster.", cluster_id)
raise
finally:
context.notification = cluster_notification
@@ -392,11 +392,11 @@ class ClusterTasks(Cluster):
except Timeout as t:
if t is not timeout:
raise # not my timeout
- LOG.exception(_("Timeout for upgrading cluster."))
+ LOG.exception("Timeout for upgrading cluster.")
self.update_statuses_on_failure(
cluster_id, status=InstanceTasks.UPGRADING_ERROR)
except Exception:
- LOG.exception(_("Error upgrading cluster %s."), cluster_id)
+ LOG.exception("Error upgrading cluster %s.", cluster_id)
self.update_statuses_on_failure(
cluster_id, status=InstanceTasks.UPGRADING_ERROR)
finally:
@@ -417,11 +417,11 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
volume_client = create_cinder_client(self.context)
volume = volume_client.volumes.get(self.volume_id)
if volume.status == "available":
- LOG.info(_("Deleting volume %(v)s for instance: %(i)s."),
+ LOG.info("Deleting volume %(v)s for instance: %(i)s.",
{'v': self.volume_id, 'i': self.id})
volume.delete()
except Exception:
- LOG.exception(_("Error deleting volume of instance %(id)s."),
+ LOG.exception("Error deleting volume of instance %(id)s.",
{'id': self.db_info.id})
LOG.debug("End _delete_resource for instance %s", self.id)
@@ -436,19 +436,19 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
utils.poll_until(self._service_is_active,
sleep_time=CONF.usage_sleep_time,
time_out=timeout)
- LOG.info(_("Created instance %s successfully."), self.id)
+ LOG.info("Created instance %s successfully.", self.id)
TroveInstanceCreate(instance=self,
instance_size=flavor['ram']).notify()
except PollTimeOut as ex:
- LOG.error(_("Failed to create instance %s. "
- "Timeout waiting for instance to become active. "
- "No usage create-event was sent."), self.id)
+ LOG.error("Failed to create instance %s. "
+ "Timeout waiting for instance to become active. "
+ "No usage create-event was sent.", self.id)
self.update_statuses_on_time_out()
error_message = "%s" % ex
error_details = traceback.format_exc()
except Exception as ex:
- LOG.exception(_("Failed to send usage create-event for "
- "instance %s."), self.id)
+ LOG.exception("Failed to send usage create-event for "
+ "instance %s.", self.id)
error_message = "%s" % ex
error_details = traceback.format_exc()
finally:
@@ -466,17 +466,17 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
# FreshInstanceTasks.wait_for_instance is called after
# create_instance to ensure that the proper usage event gets sent
- LOG.info(_("Creating instance %s."), self.id)
+ LOG.info("Creating instance %s.", self.id)
security_groups = None
if CONF.trove_security_groups_support:
try:
security_groups = self._create_secgroup(datastore_manager)
except Exception as e:
- msg = (_("Error creating security group for instance: %s") %
- self.id)
+ log_fmt = "Error creating security group for instance: %s"
+ exc_fmt = _("Error creating security group for instance: %s")
err = inst_models.InstanceTasks.BUILDING_ERROR_SEC_GROUP
- self._log_and_raise(e, msg, err)
+ self._log_and_raise(e, log_fmt, exc_fmt, self.id, err)
else:
LOG.debug("Successfully created security group for "
"instance: %s", self.id)
@@ -537,9 +537,10 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
try:
self._create_dns_entry()
except Exception as e:
- msg = _("Error creating DNS entry for instance: %s") % self.id
+ log_fmt = "Error creating DNS entry for instance: %s"
+ exc_fmt = _("Error creating DNS entry for instance: %s")
err = inst_models.InstanceTasks.BUILDING_ERROR_DNS
- self._log_and_raise(e, msg, err)
+ self._log_and_raise(e, log_fmt, exc_fmt, self.id, err)
def attach_replication_slave(self, snapshot, flavor):
LOG.debug("Calling attach_replication_slave for %s.", self.id)
@@ -548,10 +549,10 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
self.guest.attach_replication_slave(snapshot,
replica_config.config_contents)
except GuestError as e:
- msg = (_("Error attaching instance %s "
- "as replica.") % self.id)
+ log_fmt = "Error attaching instance %s as replica."
+ exc_fmt = _("Error attaching instance %s as replica.")
err = inst_models.InstanceTasks.BUILDING_ERROR_REPLICA
- self._log_and_raise(e, msg, err)
+ self._log_and_raise(e, log_fmt, exc_fmt, self.id, err)
def get_replication_master_snapshot(self, context, slave_of_id, flavor,
backup_id=None, replica_number=1):
@@ -589,10 +590,12 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
db_info = DBBackup.create(**snapshot_info)
replica_backup_id = db_info.id
except InvalidModelError:
- msg = (_("Unable to create replication snapshot record "
- "for instance: %s") % self.id)
- LOG.exception(msg)
- raise BackupCreationError(msg)
+ log_fmt = ("Unable to create replication snapshot record "
+ "for instance: %s")
+ exc_fmt = _("Unable to create replication snapshot record "
+ "for instance: %s")
+ LOG.exception(log_fmt, self.id)
+ raise BackupCreationError(exc_fmt % self.id)
if backup_id:
# Look up the parent backup info or fail early if not
# found or if the user does not have access to the parent.
@@ -622,10 +625,16 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
})
return snapshot
except Exception as e_create:
- msg_create = (
- _("Error creating replication snapshot from "
- "instance %(source)s for new replica %(replica)s.") %
- {'source': slave_of_id, 'replica': self.id})
+ create_log_fmt = (
+ "Error creating replication snapshot from "
+ "instance %(source)s for new replica %(replica)s.")
+ create_exc_fmt = (
+ "Error creating replication snapshot from "
+ "instance %(source)s for new replica %(replica)s.")
+ create_fmt_content = {
+ 'source': slave_of_id,
+ 'replica': self.id
+ }
err = inst_models.InstanceTasks.BUILDING_ERROR_REPLICA
# if the delete of the 'bad' backup fails, it'll mask the
# create exception, so we trap it here
@@ -634,20 +643,24 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
if replica_number == 1 and backup_required:
Backup.delete(context, replica_backup_id)
except Exception as e_delete:
- LOG.error(msg_create)
+ LOG.error(create_log_fmt, create_fmt_content)
# Make sure we log any unexpected errors from the create
if not isinstance(e_create, TroveError):
LOG.exception(e_create)
- msg_delete = (
- _("An error occurred while deleting a bad "
- "replication snapshot from instance %(source)s.") %
- {'source': slave_of_id})
+ delete_log_fmt = (
+ "An error occurred while deleting a bad "
+ "replication snapshot from instance %(source)s.")
+ delete_exc_fmt = _(
+ "An error occurred while deleting a bad "
+ "replication snapshot from instance %(source)s.")
# we've already logged the create exception, so we'll raise
# the delete (otherwise the create will be logged twice)
- self._log_and_raise(e_delete, msg_delete, err)
+ self._log_and_raise(e_delete, delete_log_fmt, delete_exc_fmt,
+ {'source': slave_of_id}, err)
# the delete worked, so just log the original problem with create
- self._log_and_raise(e_create, msg_create, err)
+ self._log_and_raise(e_create, create_log_fmt, create_exc_fmt,
+ create_fmt_content, err)
def report_root_enabled(self):
mysql_models.RootHistory.create(self.context, self.id, 'root')
@@ -660,8 +673,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
service.set_status(ServiceStatuses.
FAILED_TIMEOUT_GUESTAGENT)
service.save()
- LOG.error(_("Service status: %(status)s\n"
- "Service error description: %(desc)s"),
+ LOG.error("Service status: %(status)s\n"
+ "Service error description: %(desc)s",
{'status': ServiceStatuses.
FAILED_TIMEOUT_GUESTAGENT.api_status,
'desc': ServiceStatuses.
@@ -671,8 +684,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
db_info.set_task_status(InstanceTasks.
BUILDING_ERROR_TIMEOUT_GA)
db_info.save()
- LOG.error(_("Trove instance status: %(action)s\n"
- "Trove instance status description: %(text)s"),
+ LOG.error("Trove instance status: %(action)s\n"
+ "Trove instance status description: %(text)s",
{'action': InstanceTasks.
BUILDING_ERROR_TIMEOUT_GA.action,
'text': InstanceTasks.
@@ -750,11 +763,11 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
# Record the server ID and volume ID in case something goes wrong.
self.update_db(compute_instance_id=server.id, volume_id=volume_id)
except Exception as e:
- msg = _("Error creating server and volume for "
- "instance %s") % self.id
+ log_fmt = "Error creating server and volume for instance %s"
+ exc_fmt = _("Error creating server and volume for instance %s")
LOG.debug("End _create_server_volume for id: %s", self.id)
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
- self._log_and_raise(e, msg, err)
+ self._log_and_raise(e, log_fmt, exc_fmt, self.id, err)
device_path = self.device_path
mount_point = CONF.get(datastore_manager).mount_point
@@ -794,9 +807,10 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
# Save server ID.
self.update_db(compute_instance_id=server_id)
except Exception as e:
- msg = _("Failed to create server for instance %s") % self.id
+ log_fmt = "Failed to create server for instance %s"
+ exc_fmt = _("Failed to create server for instance %s")
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
- self._log_and_raise(e, msg, err)
+ self._log_and_raise(e, log_fmt, exc_fmt, self.id, err)
LOG.debug("End _create_server_volume_individually for id: %s",
self.id)
return volume_info
@@ -813,9 +827,10 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
volume_info = self._create_volume(
volume_size, volume_type, datastore_manager)
except Exception as e:
- msg = _("Failed to create volume for instance %s") % self.id
+ log_fmt = "Failed to create volume for instance %s"
+ exc_fmt = _("Failed to create volume for instance %s")
err = inst_models.InstanceTasks.BUILDING_ERROR_VOLUME
- self._log_and_raise(e, msg, err)
+ self._log_and_raise(e, log_fmt, exc_fmt, self.id, err)
else:
LOG.debug("device_path = %(path)s\n"
"mount_point = %(point)s",
@@ -831,14 +846,19 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
}
return volume_info
- def _log_and_raise(self, exc, message, task_status):
- LOG.error(_("%(message)s\n%(exc)s\n%(trace)s"),
- {"message": message,
+ # We remove all translations for messages logging execpet those for
+ # exception raising. And we cannot use _(xxxx) instead of _("xxxx")
+ # because of H701 PEP8 checking. So we pass log format , exception
+ # format, and format content in and do translations only if needed.
+ def _log_and_raise(self, exc, log_fmt, exc_fmt,
+ fmt_content, task_status):
+ LOG.error("%(message)s\n%(exc)s\n%(trace)s",
+ {"message": log_fmt % fmt_content,
"exc": exc,
"trace": traceback.format_exc()})
self.update_db(task_status=task_status)
exc_message = '\n%s' % exc if exc else ''
- full_message = "%s%s" % (message, exc_message)
+ full_message = "%s%s" % (exc_fmt % fmt_content, exc_message)
raise TroveError(message=full_message)
def _create_volume(self, volume_size, volume_type, datastore_manager):
@@ -961,9 +981,9 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
return False
elif (server.addresses == {} and
server.status == InstanceStatus.ERROR):
- LOG.error(_("Failed to create DNS entry for instance "
- "%(instance)s. Server status was "
- "%(status)s)."),
+ LOG.error("Failed to create DNS entry for instance "
+ "%(instance)s. Server status was "
+ "%(status)s).",
{'instance': self.id, 'status': server.status})
raise TroveError(status=server.status)
@@ -1047,12 +1067,12 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
"any resources.", self.id)
self.guest.stop_db()
except Exception:
- LOG.exception(_("Error stopping the datastore before attempting "
- "to delete instance id %s."), self.id)
+ LOG.exception("Error stopping the datastore before attempting "
+ "to delete instance id %s.", self.id)
try:
self.server.delete()
except Exception as ex:
- LOG.exception(_("Error during delete compute server %s"),
+ LOG.exception("Error during delete compute server %s",
self.server.id)
try:
dns_support = CONF.trove_dns_support
@@ -1061,12 +1081,12 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
dns_api = create_dns_client(self.context)
dns_api.delete_instance_entry(instance_id=self.db_info.id)
except Exception as ex:
- LOG.exception(_("Error during dns entry of instance %(id)s: "
- "%(ex)s"), {'id': self.db_info.id, 'ex': ex})
+ LOG.exception("Error during dns entry of instance %(id)s: "
+ "%(ex)s", {'id': self.db_info.id, 'ex': ex})
try:
srv_grp.ServerGroup.delete(self.context, self.server_group)
except Exception:
- LOG.exception(_("Error during delete server group for %s"),
+ LOG.exception("Error during delete server group for %s",
self.id)
# Poll until the server is gone.
@@ -1075,8 +1095,8 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
server = self.nova_client.servers.get(server_id)
if not self.server_status_matches(['SHUTDOWN', 'ACTIVE'],
server=server):
- LOG.error(_("Server %(server_id)s entered ERROR status "
- "when deleting instance %(instance_id)s!"),
+ LOG.error("Server %(server_id)s entered ERROR status "
+ "when deleting instance %(instance_id)s!",
{'server_id': server.id, 'instance_id': self.id})
return False
except nova_exceptions.NotFound:
@@ -1086,8 +1106,8 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
utils.poll_until(server_is_finished, sleep_time=2,
time_out=CONF.server_delete_time_out)
except PollTimeOut:
- LOG.exception(_("Failed to delete instance %(instance_id)s: "
- "Timeout deleting compute server %(server_id)s"),
+ LOG.exception("Failed to delete instance %(instance_id)s: "
+ "Timeout deleting compute server %(server_id)s",
{'instance_id': self.id, 'server_id': server_id})
# If volume has been resized it must be manually removed in cinder
@@ -1097,11 +1117,11 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
self.region_name)
volume = volume_client.volumes.get(self.volume_id)
if volume.status == "available":
- LOG.info(_("Deleting volume %(v)s for instance: %(i)s."),
+ LOG.info("Deleting volume %(v)s for instance: %(i)s.",
{'v': self.volume_id, 'i': self.id})
volume.delete()
except Exception:
- LOG.exception(_("Error deleting volume of instance %(id)s."),
+ LOG.exception("Error deleting volume of instance %(id)s.",
{'id': self.db_info.id})
TroveInstanceDelete(instance=self,
@@ -1116,30 +1136,30 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
status.upper() for status in expected_status)
def resize_volume(self, new_size):
- LOG.info(_("Resizing volume for instance %(instance_id)s from "
- "%(old_size)s GB to %(new_size)s GB."),
+ LOG.info("Resizing volume for instance %(instance_id)s from "
+ "%(old_size)s GB to %(new_size)s GB.",
{'instance_id': self.id, 'old_size': self.volume_size,
'new_size': new_size})
action = ResizeVolumeAction(self, self.volume_size, new_size)
action.execute()
- LOG.info(_("Resized volume for instance %s successfully."), self.id)
+ LOG.info("Resized volume for instance %s successfully.", self.id)
def resize_flavor(self, old_flavor, new_flavor):
- LOG.info(_("Resizing instance %(instance_id)s from flavor "
- "%(old_flavor)s to %(new_flavor)s."),
+ LOG.info("Resizing instance %(instance_id)s from flavor "
+ "%(old_flavor)s to %(new_flavor)s.",
{'instance_id': self.id, 'old_flavor': old_flavor['id'],
'new_flavor': new_flavor['id']})
action = ResizeAction(self, old_flavor, new_flavor)
action.execute()
- LOG.info(_("Resized instance %s successfully."), self.id)
+ LOG.info("Resized instance %s successfully.", self.id)
def migrate(self, host):
- LOG.info(_("Initiating migration to host %s."), host)
+ LOG.info("Initiating migration to host %s.", host)
action = MigrateAction(self, host)
action.execute()
def create_backup(self, backup_info):
- LOG.info(_("Initiating backup for instance %s."), self.id)
+ LOG.info("Initiating backup for instance %s.", self.id)
self.guest.create_backup(backup_info)
def backup_required_for_replication(self):
@@ -1158,7 +1178,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
LOG.debug("Got replication snapshot from guest successfully.")
return result
except Exception:
- LOG.exception(_("Failed to get replication snapshot from %s."),
+ LOG.exception("Failed to get replication snapshot from %s.",
self.id)
raise
@@ -1172,7 +1192,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
self.update_db(slave_of_id=None)
self.slave_list = None
except (GuestError, GuestTimeout):
- LOG.exception(_("Failed to detach replica %s."), self.id)
+ LOG.exception("Failed to detach replica %s.", self.id)
raise
finally:
if not for_failover:
@@ -1188,7 +1208,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
self.update_db(slave_of_id=master.id)
self.slave_list = None
except (GuestError, GuestTimeout):
- LOG.exception(_("Failed to attach replica %s."), self.id)
+ LOG.exception("Failed to attach replica %s.", self.id)
raise
def make_read_only(self, read_only):
@@ -1270,13 +1290,13 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
self.datastore_status_matches(
rd_instance.ServiceStatuses.CRASHED)):
# We will bail if db did not get stopped or is blocked
- LOG.error(_("Cannot reboot instance. DB status is %s."),
+ LOG.error("Cannot reboot instance. DB status is %s.",
self.datastore_status.status)
return
LOG.debug("The guest service status is %s.",
self.datastore_status.status)
- LOG.info(_("Rebooting instance %s."), self.id)
+ LOG.info("Rebooting instance %s.", self.id)
self.server.reboot()
# Poll nova until instance is active
reboot_time_out = CONF.reboot_time_out
@@ -1293,41 +1313,41 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
# Set the status to PAUSED. The guest agent will reset the status
# when the reboot completes and MySQL is running.
self.set_datastore_status_to_paused()
- LOG.info(_("Rebooted instance %s successfully."), self.id)
+ LOG.info("Rebooted instance %s successfully.", self.id)
except Exception as e:
- LOG.error(_("Failed to reboot instance %(id)s: %(e)s"),
+ LOG.error("Failed to reboot instance %(id)s: %(e)s",
{'id': self.id, 'e': str(e)})
finally:
LOG.debug("Rebooting FINALLY %s", self.id)
self.reset_task_status()
def restart(self):
- LOG.info(_("Initiating datastore restart on instance %s."), self.id)
+ LOG.info("Initiating datastore restart on instance %s.", self.id)
try:
self.guest.restart()
except GuestError:
- LOG.error(_("Failed to initiate datastore restart on instance "
- "%s."), self.id)
+ LOG.error("Failed to initiate datastore restart on instance "
+ "%s.", self.id)
finally:
self.reset_task_status()
def guest_log_list(self):
- LOG.info(_("Retrieving guest log list for instance %s."), self.id)
+ LOG.info("Retrieving guest log list for instance %s.", self.id)
try:
return self.guest.guest_log_list()
except GuestError:
- LOG.error(_("Failed to retrieve guest log list for instance "
- "%s."), self.id)
+ LOG.error("Failed to retrieve guest log list for instance "
+ "%s.", self.id)
finally:
self.reset_task_status()
def guest_log_action(self, log_name, enable, disable, publish, discard):
- LOG.info(_("Processing guest log for instance %s."), self.id)
+ LOG.info("Processing guest log for instance %s.", self.id)
try:
return self.guest.guest_log_action(log_name, enable, disable,
publish, discard)
except GuestError:
- LOG.error(_("Failed to process guest log for instance %s."),
+ LOG.error("Failed to process guest log for instance %s.",
self.id)
finally:
self.reset_task_status()
@@ -1460,7 +1480,7 @@ class BackupTasks(object):
@classmethod
def delete_backup(cls, context, backup_id):
"""Delete backup from swift."""
- LOG.info(_("Deleting backup %s."), backup_id)
+ LOG.info("Deleting backup %s.", backup_id)
backup = bkup_models.Backup.get_by_id(context, backup_id)
try:
filename = backup.filename
@@ -1473,15 +1493,15 @@ class BackupTasks(object):
# Backup already deleted in swift
backup.delete()
else:
- LOG.exception(_("Error occurred when deleting from swift. "
- "Details: %s"), e)
+ LOG.exception("Error occurred when deleting from swift. "
+ "Details: %s", e)
backup.state = bkup_models.BackupState.DELETE_FAILED
backup.save()
raise TroveError(_("Failed to delete swift object for backup "
"%s.") % backup_id)
else:
backup.delete()
- LOG.info(_("Deleted backup %s successfully."), backup_id)
+ LOG.info("Deleted backup %s successfully.", backup_id)
class ModuleTasks(object):
@@ -1490,7 +1510,7 @@ class ModuleTasks(object):
def reapply_module(cls, context, module_id, md5, include_clustered,
batch_size, batch_delay, force):
"""Reapply module."""
- LOG.info(_("Reapplying module %s."), module_id)
+ LOG.info("Reapplying module %s.", module_id)
batch_size = batch_size or CONF.module_reapply_max_batch_size
batch_delay = batch_delay or CONF.module_reapply_min_batch_delay
@@ -1528,7 +1548,7 @@ class ModuleTasks(object):
context, instance_id, modules)
reapply_count += 1
except exception.ModuleInvalid as ex:
- LOG.info(_("Skipping: %s"), ex)
+ LOG.info("Skipping: %s", ex)
skipped_count += 1
# Sleep if we've fired off too many in a row.
@@ -1549,8 +1569,8 @@ class ModuleTasks(object):
LOG.debug("Instance '%s' does not match "
"criteria, skipping reapply.", instance_id)
skipped_count += 1
- LOG.info(_("Reapplied module to %(num)d instances "
- "(skipped %(skip)d)."),
+ LOG.info("Reapplied module to %(num)d instances "
+ "(skipped %(skip)d).",
{'num': reapply_count, 'skip': skipped_count})
@@ -1571,39 +1591,39 @@ class ResizeVolumeAction(object):
return self.instance.device_path
def _fail(self, orig_func):
- LOG.exception(_("%(func)s encountered an error when "
- "attempting to resize the volume for "
- "instance %(id)s. Setting service "
- "status to failed."), {'func': orig_func.__name__,
- 'id': self.instance.id})
+ LOG.exception("%(func)s encountered an error when "
+ "attempting to resize the volume for "
+ "instance %(id)s. Setting service "
+ "status to failed.", {'func': orig_func.__name__,
+ 'id': self.instance.id})
service = InstanceServiceStatus.find_by(instance_id=self.instance.id)
service.set_status(ServiceStatuses.FAILED)
service.save()
def _recover_restart(self, orig_func):
- LOG.exception(_("%(func)s encountered an error when attempting to "
- "resize the volume for instance %(id)s. Trying to "
- "recover by restarting the "
- "guest."), {'func': orig_func.__name__,
- 'id': self.instance.id})
+ LOG.exception("%(func)s encountered an error when attempting to "
+ "resize the volume for instance %(id)s. Trying to "
+ "recover by restarting the "
+ "guest.", {'func': orig_func.__name__,
+ 'id': self.instance.id})
self.instance.restart()
def _recover_mount_restart(self, orig_func):
- LOG.exception(_("%(func)s encountered an error when attempting to "
- "resize the volume for instance %(id)s. Trying to "
- "recover by mounting the volume and then restarting "
- "the guest."), {'func': orig_func.__name__,
- 'id': self.instance.id})
+ LOG.exception("%(func)s encountered an error when attempting to "
+ "resize the volume for instance %(id)s. Trying to "
+ "recover by mounting the volume and then restarting "
+ "the guest.", {'func': orig_func.__name__,
+ 'id': self.instance.id})
self._mount_volume()
self.instance.restart()
def _recover_full(self, orig_func):
- LOG.exception(_("%(func)s encountered an error when attempting to "
- "resize the volume for instance %(id)s. Trying to "
- "recover by attaching and"
- " mounting the volume and then restarting the "
- "guest."), {'func': orig_func.__name__,
- 'id': self.instance.id})
+ LOG.exception("%(func)s encountered an error when attempting to "
+ "resize the volume for instance %(id)s. Trying to "
+ "recover by attaching and"
+ " mounting the volume and then restarting the "
+ "guest.", {'func': orig_func.__name__,
+ 'id': self.instance.id})
self._attach_volume()
self._mount_volume()
self.instance.restart()
@@ -1720,10 +1740,10 @@ class ResizeVolumeAction(object):
self.instance.update_db(volume_size=self.new_size)
except PollTimeOut:
- LOG.exception(_("Timeout trying to extend the volume %(vol_id)s "
- "for instance %(id)s"), {
- 'vol_id': self.instance.volume_id,
- 'id': self.instance.id})
+ LOG.exception("Timeout trying to extend the volume %(vol_id)s "
+ "for instance %(id)s",
+ {'vol_id': self.instance.volume_id,
+ 'id': self.instance.id})
volume = self.instance.volume_client.volumes.get(
self.instance.volume_id)
if volume.status == 'extending':
@@ -1733,10 +1753,10 @@ class ResizeVolumeAction(object):
self._recover_full(self._verify_extend)
raise
except Exception:
- LOG.exception(_("Error encountered trying to verify extend for "
- "the volume %(vol_id)s for instance %(id)s"), {
- 'vol_id': self.instance.volume_id,
- 'id': self.instance.id})
+ LOG.exception("Error encountered trying to verify extend for "
+ "the volume %(vol_id)s for instance %(id)s",
+ {'vol_id': self.instance.volume_id,
+ 'id': self.instance.id})
self._recover_full(self._verify_extend)
raise
@@ -1903,21 +1923,21 @@ class ResizeActionBase(object):
LOG.debug("Confirming nova action")
self._confirm_nova_action()
except Exception:
- LOG.exception(_("Exception during nova action."))
+ LOG.exception("Exception during nova action.")
if need_to_revert:
- LOG.error(_("Reverting action for instance %s"),
+ LOG.error("Reverting action for instance %s",
self.instance.id)
self._revert_nova_action()
self._wait_for_revert_nova_action()
if self.instance.server_status_matches(['ACTIVE']):
- LOG.error(_("Restarting datastore."))
+ LOG.error("Restarting datastore.")
self.instance.guest.restart()
else:
- LOG.error(_("Cannot restart datastore because "
- "Nova server status is not ACTIVE"))
+ LOG.error("Cannot restart datastore because "
+ "Nova server status is not ACTIVE")
- LOG.error(_("Error resizing instance %s."), self.instance.id)
+ LOG.error("Error resizing instance %s.", self.instance.id)
raise
LOG.debug("Recording success")
@@ -1978,7 +1998,7 @@ class ResizeAction(ResizeActionBase):
config = {'config_contents': config.config_contents}
self.instance.guest.reset_configuration(config)
except GuestTimeout:
- LOG.exception(_("Error sending reset_configuration call."))
+ LOG.exception("Error sending reset_configuration call.")
LOG.debug("Reverting resize.")
super(ResizeAction, self)._revert_nova_action()
diff --git a/trove/tests/fakes/nova.py b/trove/tests/fakes/nova.py
index 79a57f77..46f7f2b5 100644
--- a/trove/tests/fakes/nova.py
+++ b/trove/tests/fakes/nova.py
@@ -17,7 +17,6 @@ from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.common.exception import PollTimeOut
-from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.tests.fakes.common import authorize
@@ -277,7 +276,7 @@ class FakeServers(object):
while volume.status == "BUILD":
eventlet.sleep(0.1)
if volume.status != "available":
- LOG.info(_("volume status = %s"), volume.status)
+ LOG.info("volume status = %s", volume.status)
raise nova_exceptions.ClientException("Volume was bad!")
mapping = "%s::%s:%s" % (volume.id, volume.size, 1)
block_device_mapping = {'vdb': mapping}
@@ -329,7 +328,7 @@ class FakeServers(object):
def get(self, id):
if id not in self.db:
- LOG.error(_("Couldn't find server id %(id)s, collection=%(db)s"),
+ LOG.error("Couldn't find server id %(id)s, collection=%(db)s",
{'id': id, 'db': self.db})
raise nova_exceptions.NotFound(404, "Not found")
else:
@@ -349,7 +348,7 @@ class FakeServers(object):
def schedule_delete(self, id, time_from_now):
def delete_server():
- LOG.info(_("Simulated event ended, deleting server %s."), id)
+ LOG.info("Simulated event ended, deleting server %s.", id)
del self.db[id]
eventlet.spawn_after(time_from_now, delete_server)
@@ -491,7 +490,7 @@ class FakeVolumes(object):
def get(self, id):
if id not in self.db:
- LOG.error(_("Couldn't find volume id %(id)s, collection=%(db)s"),
+ LOG.error("Couldn't find volume id %(id)s, collection=%(db)s",
{'id': id, 'db': self.db})
raise nova_exceptions.NotFound(404, "Not found")
else:
diff --git a/trove/tests/unittests/hacking/test_check.py b/trove/tests/unittests/hacking/test_check.py
index 61262818..d2874b44 100644
--- a/trove/tests/unittests/hacking/test_check.py
+++ b/trove/tests/unittests/hacking/test_check.py
@@ -34,47 +34,36 @@ class HackingTestCase(trove_testtools.TestCase):
self.assertIsNone(tc.factory(check_callable))
def test_log_translations(self):
- expected_marks = {
- 'error': '_',
- 'info': '_',
- 'warning': '_',
- 'critical': '_',
- 'exception': '_',
- }
- logs = expected_marks.keys()
- debug = "LOG.debug('OK')"
- self.assertEqual(
- 0, len(list(tc.validate_log_translations(debug, debug, 'f'))))
- for log in logs:
- bad = 'LOG.%s("Bad")' % log
+ all_log_levels = (
+ 'critical',
+ 'debug',
+ 'error',
+ 'exception',
+ 'info',
+ 'reserved',
+ 'warning',
+ )
+ for level in all_log_levels:
+ bad = 'LOG.%s(_("Bad"))' % level
self.assertEqual(
- 1, len(list(tc.validate_log_translations(bad, bad, 'f'))))
- ok = 'LOG.%s(_("OK"))' % log
+ 1, len(list(tc.no_translate_logs(bad, bad, 'f'))))
+ bad = "LOG.%s(_('Bad'))" % level
self.assertEqual(
- 0, len(list(tc.validate_log_translations(ok, ok, 'f'))))
- ok = "LOG.%s('OK') # noqa" % log
+ 1, len(list(tc.no_translate_logs(bad, bad, 'f'))))
+ ok = 'LOG.%s("OK")' % level
self.assertEqual(
- 0, len(list(tc.validate_log_translations(ok, ok, 'f'))))
- ok = "LOG.%s(variable)" % log
+ 0, len(list(tc.no_translate_logs(ok, ok, 'f'))))
+ ok = "LOG.%s(_('OK')) # noqa" % level
self.assertEqual(
- 0, len(list(tc.validate_log_translations(ok, ok, 'f'))))
- # Do not do validations in tests
- ok = 'LOG.%s("OK - unit tests")' % log
+ 0, len(list(tc.no_translate_logs(ok, ok, 'f'))))
+ ok = "LOG.%s(variable)" % level
self.assertEqual(
- 0, len(list(tc.validate_log_translations(ok, ok,
- 'f/tests/f'))))
-
- for mark in tc._all_hints:
- stmt = "LOG.%s(%s('test'))" % (log, mark)
- self.assertEqual(
- 0 if expected_marks[log] == mark else 1,
- len(list(tc.validate_log_translations(stmt, stmt, 'f'))))
-
- def test_no_translate_debug_logs(self):
- for hint in tc._all_hints:
- bad = "LOG.debug(%s('bad'))" % hint
+ 0, len(list(tc.no_translate_logs(ok, ok, 'f'))))
+ # Do not do validations in tests
+ ok = 'LOG.%s(_("OK - unit tests"))' % level
self.assertEqual(
- 1, len(list(tc.no_translate_debug_logs(bad, 'f'))))
+ 0, len(list(tc.no_translate_logs(ok, ok,
+ 'f/tests/f'))))
def test_check_localized_exception_messages(self):
f = tc.check_raised_localized_exceptions
diff --git a/trove/tests/util/utils.py b/trove/tests/util/utils.py
index ade0ff95..43a0dcf0 100644
--- a/trove/tests/util/utils.py
+++ b/trove/tests/util/utils.py
@@ -19,8 +19,6 @@ import time
from functools import wraps
from oslo_log import log as logging
-from trove.common.i18n import _
-
LOG = logging.getLogger(__name__)
@@ -47,9 +45,9 @@ def retry(expected_exception_cls, retries=3, delay_fun=lambda n: 3 * n):
except expected_exception_cls:
remaining_attempts -= 1
delay = delay_fun(retries - remaining_attempts)
- LOG.exception(_(
+ LOG.exception(
"Retrying in %(delay)d seconds "
- "(remaining attempts: %(remaining)d)...") %
+ "(remaining attempts: %(remaining)d)..." %
{'delay': delay, 'remaining': remaining_attempts})
time.sleep(delay)
return f(*args, **kwargs)