summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po244
-rw-r--r--trove/common/clients.py4
-rw-r--r--trove/common/notification.py2
-rw-r--r--trove/common/wsgi.py2
-rw-r--r--trove/extensions/common/models.py2
-rw-r--r--trove/taskmanager/manager.py2
-rwxr-xr-xtrove/taskmanager/models.py4
-rw-r--r--trove/tests/unittests/common/test_context.py12
-rw-r--r--trove/tests/unittests/common/test_wsgi.py2
-rw-r--r--trove/tests/unittests/extensions/common/test_service.py12
-rw-r--r--trove/tests/unittests/taskmanager/test_models.py10
11 files changed, 268 insertions, 28 deletions
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index 91b83c41..027222aa 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -4,11 +4,11 @@ msgid ""
msgstr ""
"Project-Id-Version: trove\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-08-15 01:26+0000\n"
+"POT-Creation-Date: 2022-09-26 08:29+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-08-14 12:43+0000\n"
+"PO-Revision-Date: 2022-10-12 01:21+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -42,6 +42,15 @@ msgstr "15.0.0-13"
msgid "16.0.0"
msgstr "16.0.0"
+msgid "16.0.0-6"
+msgstr "16.0.0-6"
+
+msgid "17.0.0-5"
+msgstr "17.0.0-5"
+
+msgid "18.0.0.0rc1"
+msgstr "18.0.0.0rc1"
+
msgid "5.0.0"
msgstr "5.0.0"
@@ -529,6 +538,13 @@ msgstr ""
"is not set."
msgid ""
+"Fix guest-agent.conf is not generated in trove guest vm. `Stroy 2010231 "
+"<https://storyboard.openstack.org/#!/story/2010231>`__"
+msgstr ""
+"Fix guest-agent.conf is not generated in Trove guest VM. `Stroy 2010231 "
+"<https://storyboard.openstack.org/#!/story/2010231>`__"
+
+msgid ""
"Fix race condition in cluster-show that returned erroneous not found error. "
"Bug 1643002"
msgstr ""
@@ -735,9 +751,127 @@ msgstr ""
"Improved mountpoint detection by running it as root. This prevents guests "
"that have undiscoverable mount points from failing to unmount."
+msgid ""
+"In Mitaka release, support was added for full offline backup and restore "
+"using the default circular logging. In this release, the name of the "
+"strategy for offline backup and restore was changed from DB2Backup to "
+"DB2OfflineBackup. Hence, to enable offline backups, we should set "
+"backup_strategy=DB2OfflineBackup and for online backups, "
+"backup_strategy=DB2OnlineBackup. The property backup_namespace and "
+"restore_namespace will be the same for both types of backup and restore."
+msgstr ""
+"In the Mitaka release, support was added for full offline backup and restore "
+"using the default circular logging. In this release, the name of the "
+"strategy for offline backup and restore was changed from DB2Backup to "
+"DB2OfflineBackup. Hence, to enable offline backups, we should set "
+"backup_strategy=DB2OfflineBackup and for online backups, "
+"backup_strategy=DB2OnlineBackup. The property backup_namespace and "
+"restore_namespace will be the same for both types of backup and restore."
+
+msgid ""
+"In multi-region deployment with geo-replicated Swift, the user can restore a "
+"backup in one region by manually specifying the original backup data "
+"location created in another region."
+msgstr ""
+"In a multi-region deployment with geo-replicated Swift, the user can restore "
+"a backup in one region by manually specifying the original backup data "
+"location created in another region."
+
+msgid ""
+"Increased agent_call_high_timeout config setting to 10 minutes. This "
+"configures the length of time that the taskmanager will wait for an "
+"asynchronous guest agent call to complete."
+msgstr ""
+"Increased agent_call_high_timeout config setting to 10 minutes. This "
+"configures the length of time that the taskmanager will wait for an "
+"asynchronous guest agent call to complete."
+
+msgid "Make 'default_password_length' per-datastore-property. Bug 1572230"
+msgstr "Make 'default_password_length' per-datastore-property. Bug 1572230"
+
+msgid ""
+"Make 'long query time' manageable via configuration groups (see bug "
+"1542485). Deprecate the global 'guest_log_long_query_time' option in "
+"preference of datastore-specific configurations. MySQL long_query_time "
+"Percona long_query_time Percona XtraDB Cluster long_query_time MariaDB "
+"long_query_time PostgreSQL log_min_duration_statement"
+msgstr ""
+"Make 'long query time' manageable via configuration groups (see bug "
+"1542485). Deprecate the global 'guest_log_long_query_time' option in "
+"preference of datastore-specific configurations. MySQL long_query_time "
+"Percona long_query_time Percona XtraDB Cluster long_query_time MariaDB "
+"long_query_time PostgreSQL log_min_duration_statement"
+
+msgid ""
+"Make guestagent reuse Cassandra connections to eliminate resource leaks. Bug "
+"1566946."
+msgstr ""
+"Make guestagent reuse Cassandra connections to eliminate resource leaks. Bug "
+"1566946."
+
+msgid ""
+"MariaDB allows an server to be a master and a slave simutaneously, so when "
+"migrating masters, if the old master is reactivated before attaching the "
+"other replicas to the new master, new unexpected GTIDs may be created on the "
+"old master and synced to some of the other replicas by chance, as the other "
+"replicas are still connecting to the old one by the time. After that these "
+"diverged slave will fail changing to the new master. This will be fixed by "
+"first attaching the other replicas to the new master, and then dealing with "
+"old master. Fixes #1754539"
+msgstr ""
+"MariaDB allows a server to be a master and a slave simultaneously, so when "
+"migrating masters, if the old master is reactivated before attaching the "
+"other replicas to the new master, new unexpected GTIDs may be created on the "
+"old master and synced to some of the other replicas by chance, as the other "
+"replicas are still connecting to the old one by the time. After that these "
+"diverged slaves will fail to change to the new master. This will be fixed by "
+"first attaching the other replicas to the new master, and then dealing with "
+"the old master. Fixes #1754539"
+
+msgid ""
+"MariaDB historically leveraged the mysql manager for guest support including "
+"the configuration groups implementation. With MariaDB now having its own "
+"manager class that inherits from Mysql, it needs to have validation_rules "
+"and a ConfigParser setup. Bug 1532256"
+msgstr ""
+"MariaDB historically leveraged the MySQL manager for guest support including "
+"the configuration groups implementation. With MariaDB now having its own "
+"manager class that inherits from MySQL, it needs to have validation_rules "
+"and a ConfigParser setup. Bug 1532256"
+
msgid "Mitaka Series Release Notes"
msgstr "Mitaka Series Release Notes"
+msgid ""
+"Module list/show now returns boolean values as True/False instead of 1/0. "
+"Bug 1656398"
+msgstr ""
+"Module list/show now returns boolean values as True/False instead of 1/0. "
+"Bug 1656398"
+
+msgid ""
+"Modules can now be applied in a consistent order, based on the new "
+"'priority_apply' and 'apply_order' attributes when creating them. Blueprint "
+"module-management-ordering"
+msgstr ""
+"Modules can now be applied in a consistent order, based on the new "
+"'priority_apply' and 'apply_order' attributes when creating them. Blueprint "
+"module-management-ordering"
+
+msgid ""
+"Mongo cluster grow operations were not creating instances with the provided "
+"az and nic values. These should be used if the caller provided them."
+msgstr ""
+"Mongo cluster grow operations were not creating instances with the provided "
+"az and nic values. These should be used if the caller provided them."
+
+msgid ""
+"Most of the options related to backup and restore are removed, e.g. "
+"backup_namespace, restore_namespace, backup_incremental_strategy"
+msgstr ""
+"Most of the options related to backup and restore are removed, e.g. "
+"backup_namespace, restore_namespace, backup_incremental_strategy"
+
msgid "Newton Series Release Notes"
msgstr "Newton Series Release Notes"
@@ -753,12 +887,59 @@ msgstr "Prelude"
msgid "Queens Series Release Notes"
msgstr "Queens Series Release Notes"
+msgid "Refactor the datastore guest manager code."
+msgstr "Refactor the datastore guest manager code."
+
+msgid ""
+"Remove support of creating volume from Nova. The former configuration "
+"\"use_nova_server_volume\" is not used any more, for creating volumes, "
+"cinderclient will be always used. Fixes bug #1673408."
+msgstr ""
+"Remove support for creating volume from Nova. The former configuration "
+"\"use_nova_server_volume\" is not used anymore, for creating volumes, "
+"cinderclient will be always used. Fixes bug #1673408."
+
+msgid "Remove unused 'override.config.template' files. Bug 1575852"
+msgstr "Remove unused 'override.config.template' files. Bug 1575852"
+
+msgid ""
+"Replace the deprecated 'myisam-recover' option with its newer counterpart "
+"'myisam-recover-options'."
+msgstr ""
+"Replace the deprecated 'myisam-recover' option with its newer counterpart "
+"'myisam-recover-options'."
+
msgid "Rocky Series Release Notes"
msgstr "Rocky Series Release Notes"
+msgid "Security Issues"
+msgstr "Security Issues"
+
msgid "Stein Series Release Notes"
msgstr "Stein Series Release Notes"
+msgid ""
+"Support for the new 'reapply' command. This allows a given module to be "
+"reapplied to all instances that it had previously been applied to. Bug "
+"1554903"
+msgstr ""
+"Support for the new 'reapply' command. This allows a given module to be "
+"reapplied to all instances that it had previously been applied to. Bug "
+"1554903"
+
+msgid "Support for upgrading Redis cluster."
+msgstr "Support for upgrading Redis cluster."
+
+msgid "Support for upgrading Redis instance."
+msgstr "Support for upgrading Redis instance."
+
+msgid ""
+"Support has been added for Cassandra backup and resture using the Nodetool "
+"utility."
+msgstr ""
+"Support has been added for Cassandra backup and restore using the Nodetool "
+"utility."
+
msgid "Support has been added for CouchDB Backup and Restore."
msgstr "Support has been added for CouchDB Backup and Restore."
@@ -804,6 +985,37 @@ msgstr ""
"removed in W release."
msgid ""
+"The --incremental flag for backup-create will add the ability to create "
+"incremental backup based on last full or incremental backup. If no full or "
+"incremental backup exists a new full backup will be created."
+msgstr ""
+"The --incremental flag for backup-create will add the ability to create "
+"incremental backup based on last full or incremental backup. If no full or "
+"incremental backup exists a new full backup will be created."
+
+msgid ""
+"The adds support for pxc to grow a cluster. * api and taskmanager support "
+"for shrinking a cluster * validate that the networks given are the same for "
+"each instance in the cluster. * make sure to add the existing networks on an "
+"instance in the cluster. * add new Error task for grow and shrink. * nova "
+"client version configuration changed to a string option rather than an int "
+"option because the nova microversions change nova api output. This was "
+"needed for the network interfaces on existing instances. * testing for grow "
+"and shrink cluster"
+msgstr ""
+"This adds support for pxc to grow a cluster. * API and taskmanager support "
+"for shrinking a cluster * validate that the networks given are the same for "
+"each instance in the cluster. * make sure to add the existing networks on an "
+"instance in the cluster. * add new Error task for grow and shrink. * Nova "
+"client version configuration changed to a string option rather than an int "
+"option because the Nova microversions change Nova API output. This was "
+"needed for the network interfaces on existing instances. * testing for grow "
+"and shrink cluster"
+
+msgid "The admin user is able to get backups of a specific project."
+msgstr "The admin user is able to get backups of a specific project."
+
+msgid ""
"The cloud admin is able to apply a security group to management port(with "
"purpose of communicating with control plane and other management tasks) of "
"the Trove instance, by setting the ``management_security_groups`` config "
@@ -833,6 +1045,24 @@ msgstr ""
"container inside the Trove instance."
msgid ""
+"The default value of ``[oslo_policy] policy_file`` config option has been "
+"changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing "
+"customized or previously generated static policy JSON files (which are not "
+"needed by default), should generate new policy files or convert them in YAML "
+"format. Use the `oslopolicy-convert-json-to-yaml <https://docs.openstack.org/"
+"oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html>`_ tool to "
+"convert a JSON to YAML formatted policy file in backward compatible way."
+msgstr ""
+"The default value of ``[oslo_policy] policy_file`` config option has been "
+"changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing "
+"customized or previously generated static policy JSON files (which are not "
+"needed by default), should generate new policy files or convert them into "
+"YAML format. Use the `oslopolicy-convert-json-to-yaml <https://docs."
+"openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html>`_ "
+"tool to convert a JSON to YAML formatted policy file in a backward "
+"compatible way."
+
+msgid ""
"The default value of the trove guest agent config option ``[postgresql] "
"backup_docker_image`` is changed to ``openstacktrove/db-backup-"
"postgresql:1.1.1``. There is nothing to do if the option is not configured "
@@ -878,6 +1108,13 @@ msgstr ""
"before the upgrade."
msgid ""
+"The minimum version of oslo.concurrency required has been changed from 3.5.0 "
+"to 3.7.1"
+msgstr ""
+"The minimum version of oslo.concurrency required has been changed from 3.5.0 "
+"to 3.7.1"
+
+msgid ""
"The module-instances command now returns a paginated list of instances. A --"
"count_only flag was added to the command to return a summary of the applied "
"instances based on the MD5 of the module (this is most useful for "
@@ -905,6 +1142,9 @@ msgstr ""
"The reset-status command will set the task and status of an instance to "
"ERROR after which it can be deleted."
+msgid "The support of Bionic has been removed."
+msgstr "The support of Bionic has been removed."
+
msgid ""
"The user can create backup strategy to define the configurations for "
"creating backups, e.g. the swift container to store the backup data. Users "
diff --git a/trove/common/clients.py b/trove/common/clients.py
index af24f15e..40a5136f 100644
--- a/trove/common/clients.py
+++ b/trove/common/clients.py
@@ -102,7 +102,7 @@ def nova_client(context, region_name=None, password=None):
)
client = Client(CONF.nova_client_version,
- username=context.user,
+ username=context.user_id,
password=password,
endpoint_override=url,
project_id=context.project_id,
@@ -141,7 +141,7 @@ def cinder_client(context, region_name=None):
endpoint_type=CONF.cinder_endpoint_type
)
- client = CinderClient.Client(context.user, context.auth_token,
+ client = CinderClient.Client(context.user_id, context.auth_token,
project_id=context.project_id,
auth_url=CONF.service_credentials.auth_url,
insecure=CONF.cinder_api_insecure)
diff --git a/trove/common/notification.py b/trove/common/notification.py
index a050e0a7..65d1894a 100644
--- a/trove/common/notification.py
+++ b/trove/common/notification.py
@@ -115,7 +115,7 @@ class TroveBaseTraits(object):
'state_description': instance.status.lower(),
'state': instance.status.lower(),
'tenant_id': instance.tenant_id,
- 'user_id': instance.context.user,
+ 'user_id': instance.context.user_id,
})
self.payload.update(kwargs)
diff --git a/trove/common/wsgi.py b/trove/common/wsgi.py
index 72fcfc1f..5231811d 100644
--- a/trove/common/wsgi.py
+++ b/trove/common/wsgi.py
@@ -549,7 +549,7 @@ class ContextMiddleware(base_wsgi.Middleware):
limits = self._extract_limits(request.params)
context = rd_context.TroveContext(auth_token=auth_token,
project_id=tenant_id,
- user=user_id,
+ user_id=user_id,
is_admin=is_admin,
limit=limits.get('limit'),
marker=limits.get('marker'),
diff --git a/trove/extensions/common/models.py b/trove/extensions/common/models.py
index 6b5ede6e..0ea791d0 100644
--- a/trove/extensions/common/models.py
+++ b/trove/extensions/common/models.py
@@ -139,5 +139,5 @@ class RootHistory(object):
history = cls.load(context, instance_id)
if history is not None:
return history
- history = RootHistory(instance_id, context.user)
+ history = RootHistory(instance_id, context.user_id)
return history.save()
diff --git a/trove/taskmanager/manager.py b/trove/taskmanager/manager.py
index a7a16493..bb6a5da7 100644
--- a/trove/taskmanager/manager.py
+++ b/trove/taskmanager/manager.py
@@ -44,7 +44,7 @@ class Manager(periodic_task.PeriodicTasks):
def __init__(self):
super(Manager, self).__init__(CONF)
self.admin_context = TroveContext(
- user=CONF.service_credentials.username,
+ user_id=CONF.service_credentials.username,
project_id=CONF.service_credentials.project_id,
user_domain_name=CONF.service_credentials.user_domain_name)
if CONF.exists_notification_transformer:
diff --git a/trove/taskmanager/models.py b/trove/taskmanager/models.py
index 58dd8f2b..fcfa2827 100755
--- a/trove/taskmanager/models.py
+++ b/trove/taskmanager/models.py
@@ -116,7 +116,7 @@ class NotifyMixin(object):
'state_description': self.status,
'state': self.status,
'tenant_id': self.tenant_id,
- 'user_id': self.context.user,
+ 'user_id': self.context.user_id,
}
if CONF.get(self.datastore_version.manager).volume_support:
@@ -980,7 +980,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
nics, files={}, scheduler_hints=None):
userdata = self.prepare_userdata(datastore_manager)
metadata = {'trove_project_id': self.tenant_id,
- 'trove_user_id': self.context.user,
+ 'trove_user_id': self.context.user_id,
'trove_instance_id': self.id}
bdmap_v2 = block_device_mapping_v2
config_drive = CONF.use_nova_server_config_drive
diff --git a/trove/tests/unittests/common/test_context.py b/trove/tests/unittests/common/test_context.py
index a5f2f3a2..16df1f6b 100644
--- a/trove/tests/unittests/common/test_context.py
+++ b/trove/tests/unittests/common/test_context.py
@@ -24,7 +24,7 @@ from trove.tests.unittests import trove_testtools
class TestTroveContext(trove_testtools.TestCase):
def test_create_with_extended_args(self):
expected_service_catalog = {'key': 'value'}
- ctx = context.TroveContext(user="test_user_id",
+ ctx = context.TroveContext(user_id="test_user_id",
request_id="test_req_id",
limit="500",
marker="x",
@@ -34,23 +34,23 @@ class TestTroveContext(trove_testtools.TestCase):
self.assertThat(ctx.service_catalog, Equals(expected_service_catalog))
def test_create(self):
- ctx = context.TroveContext(user='test_user_id',
+ ctx = context.TroveContext(user_id='test_user_id',
request_id='test_req_id')
- self.assertThat(ctx.user, Equals('test_user_id'))
+ self.assertThat(ctx.user_id, Equals('test_user_id'))
self.assertThat(ctx.request_id, Equals('test_req_id'))
self.assertThat(ctx.limit, Is(None))
self.assertThat(ctx.marker, Is(None))
self.assertThat(ctx.service_catalog, Is(None))
def test_to_dict(self):
- ctx = context.TroveContext(user='test_user_id',
+ ctx = context.TroveContext(user_id='test_user_id',
request_id='test_req_id')
ctx_dict = ctx.to_dict()
self.assertThat(ctx_dict.get('user'), Equals('test_user_id'))
self.assertThat(ctx_dict.get('request_id'), Equals('test_req_id'))
def test_to_dict_with_notification(self):
- ctx = context.TroveContext(user='test_user_id',
+ ctx = context.TroveContext(user_id='test_user_id',
project_id='the_tenant',
request_id='test_req_id')
ctx.notification = DBaaSInstanceCreate(ctx,
@@ -70,7 +70,7 @@ class TestTroveContext(trove_testtools.TestCase):
'request_id': 'test_req_id',
'project_id': 'abc',
'blah_blah': 'blah blah'})
- self.assertThat(ctx.user, Equals('test_user_id'))
+ self.assertThat(ctx.user_id, Equals('test_user_id'))
self.assertThat(ctx.request_id, Equals('test_req_id'))
self.assertThat(ctx.project_id, Equals('abc'))
self.assertThat(ctx.limit, Is(None))
diff --git a/trove/tests/unittests/common/test_wsgi.py b/trove/tests/unittests/common/test_wsgi.py
index 324078da..9297fed6 100644
--- a/trove/tests/unittests/common/test_wsgi.py
+++ b/trove/tests/unittests/common/test_wsgi.py
@@ -42,7 +42,7 @@ class TestWsgi(trove_testtools.TestCase):
# assertions
ctx = req.environ[wsgi.CONTEXT_KEY]
self.assertThat(ctx, Not(Is(None)))
- self.assertThat(ctx.user, Equals(user_id))
+ self.assertThat(ctx.user_id, Equals(user_id))
self.assertThat(ctx.auth_token, Equals(token))
self.assertEqual(0, len(ctx.service_catalog))
diff --git a/trove/tests/unittests/extensions/common/test_service.py b/trove/tests/unittests/extensions/common/test_service.py
index ecb61f21..2eddc2db 100644
--- a/trove/tests/unittests/extensions/common/test_service.py
+++ b/trove/tests/unittests/extensions/common/test_service.py
@@ -61,8 +61,8 @@ class TestDefaultRootController(trove_testtools.TestCase):
def test_root_create(self, root_create):
user = Mock()
context = Mock()
- context.user = Mock()
- context.user.__getitem__ = Mock(return_value=user)
+ context.user_id = Mock()
+ context.user_id.__getitem__ = Mock(return_value=user)
req = Mock()
req.environ = Mock()
req.environ.__getitem__ = Mock(return_value=context)
@@ -376,8 +376,8 @@ class TestClusterRootController(trove_testtools.TestCase):
@patch.object(models.ClusterRoot, "create")
def test_instance_root_create(self, mock_cluster_root_create):
user = Mock()
- self.context.user = Mock()
- self.context.user.__getitem__ = Mock(return_value=user)
+ self.context.user_id = Mock()
+ self.context.user_id.__getitem__ = Mock(return_value=user)
req = Mock()
req.environ = {'trove.context': self.context}
password = Mock()
@@ -393,8 +393,8 @@ class TestClusterRootController(trove_testtools.TestCase):
@patch.object(models.ClusterRoot, "create")
def test_instance_root_create_no_body(self, mock_cluster_root_create):
user = Mock()
- self.context.user = Mock()
- self.context.user.__getitem__ = Mock(return_value=user)
+ self.context.user_id = Mock()
+ self.context.user_id.__getitem__ = Mock(return_value=user)
req = Mock()
req.environ = {'trove.context': self.context}
password = None
diff --git a/trove/tests/unittests/taskmanager/test_models.py b/trove/tests/unittests/taskmanager/test_models.py
index 3eb81b8c..4eff71a3 100644
--- a/trove/tests/unittests/taskmanager/test_models.py
+++ b/trove/tests/unittests/taskmanager/test_models.py
@@ -221,7 +221,7 @@ class BaseFreshInstanceTasksTest(trove_testtools.TestCase):
self.freshinstancetasks = taskmanager_models.FreshInstanceTasks(
None, MagicMock(), None, None)
self.freshinstancetasks.context = trove.common.context.TroveContext(
- user='test_user')
+ user_id='test_user')
def tearDown(self):
super(BaseFreshInstanceTasksTest, self).tearDown()
@@ -1195,16 +1195,16 @@ class RootReportTest(trove_testtools.TestCase):
def test_report_root_first_time(self):
context = Mock()
- context.user = utils.generate_uuid()
+ context.user_id = utils.generate_uuid()
report = mysql_models.RootHistory.create(
context, utils.generate_uuid())
self.assertIsNotNone(report)
def test_report_root_double_create(self):
context = Mock()
- context.user = utils.generate_uuid()
+ context.user_id = utils.generate_uuid()
id = utils.generate_uuid()
- history = mysql_models.RootHistory(id, context.user).save()
+ history = mysql_models.RootHistory(id, context.user_id).save()
with patch.object(mysql_models.RootHistory, 'load',
Mock(return_value=history)):
report = mysql_models.RootHistory.create(context, id)
@@ -1219,7 +1219,7 @@ class ClusterRootTest(trove_testtools.TestCase):
@patch.object(common_models.Root, "create")
def test_cluster_root_create(self, root_create, root_history_create):
context = Mock()
- context.user = utils.generate_uuid()
+ context.user_id = utils.generate_uuid()
id = utils.generate_uuid()
password = "rootpassword"
cluster_instances = [utils.generate_uuid(), utils.generate_uuid()]