diff options
author | melanie witt <melwittt@gmail.com> | 2019-02-20 02:51:36 +0000 |
---|---|---|
committer | melanie witt <melwittt@gmail.com> | 2019-05-23 18:01:58 +0000 |
commit | 8354f42e20da952a6ab5612363afb77386886df0 (patch) | |
tree | 1c2afb263fd31567319bc494f7af9d28c29da3e1 /nova/tests/unit/test_quota.py | |
parent | 54408d0ce4a74395cd585d5f24b0c143b8618623 (diff) | |
download | nova-8354f42e20da952a6ab5612363afb77386886df0.tar.gz |
Count instances from mappings and cores/ram from placement
This counts instance mappings for counting quota usage for instances
and adds calls to placement for counting quota usage for cores and ram.
During an upgrade, if any un-migrated instance mappings are found (with
NULL user_id or NULL queued_for_delete fields), we will fall back to
the legacy counting method.
Counting quota usage from placement is opt-in via the
[quota]count_usage_from_placement configuration option because:
* Though beneficial for multi-cell deployments to be resilient to
down cells, the vast majority of deployments are single cell and
will not be able to realize a down cells resiliency benefit and may
prefer to keep legacy quota usage counting.
* Usage for resizes will reflect resources being held on both the
source and destination until the resize is confirmed or reverted.
Operators may not want to enable counting from placement based on
whether the behavior change is problematic for them.
* Placement does not yet support the ability to partition resource
providers from mulitple Nova deployments, so environments that are
sharing a single placement deployment would see usage that
aggregates all Nova deployments together. Such environments should
not enable counting from placement.
* Usage for unscheduled instances in ERROR state will not reflect
resource consumption for cores and ram because the instance has no
placement allocations.
* Usage for instances in SHELVED_OFFLOADED state will not reflect
resource consumption for cores and ram because the instance has no
placement allocations. Note that because of this, it will be possible for a
request to unshelve a server to be rejected if the user does not have
enough quota available to support the cores and ram needed by the server to
be unshelved.
Part of blueprint count-quota-usage-from-placement
Change-Id: Ie22b0acb5824a41da327abdcf9848d02fc9a92f5
Diffstat (limited to 'nova/tests/unit/test_quota.py')
-rw-r--r-- | nova/tests/unit/test_quota.py | 151 |
1 files changed, 151 insertions, 0 deletions
diff --git a/nova/tests/unit/test_quota.py b/nova/tests/unit/test_quota.py index bcb4838c76..30824e1ba8 100644 --- a/nova/tests/unit/test_quota.py +++ b/nova/tests/unit/test_quota.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import ddt import mock from oslo_db.sqlalchemy import enginefacade from six.moves import range @@ -1961,3 +1962,153 @@ class NoopQuotaDriverTestCase(test.TestCase): quota.QUOTAS._resources, 'test_project') self.assertEqual(self.expected_settable_quotas, result) + + +@ddt.ddt +class QuotaCountTestCase(test.NoDBTestCase): + @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' + 'get_usages_counts_for_quota') + def test_cores_ram_count_placement(self, mock_get_usages): + usages = quota._cores_ram_count_placement( + mock.sentinel.context, mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + mock_get_usages.assert_called_once_with( + mock.sentinel.context, mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + self.assertEqual(mock_get_usages.return_value, usages) + + @mock.patch('nova.objects.InstanceMappingList.get_counts') + @mock.patch('nova.quota._cores_ram_count_placement') + def test_instances_cores_ram_count_api_db_placement( + self, mock_placement_count, mock_get_im_count): + # Fake response from placement with project and user usages of cores + # and ram. + mock_placement_count.return_value = {'project': {'cores': 2, 'ram': 4}, + 'user': {'cores': 1, 'ram': 2}} + # Fake count of instances based on instance mappings in the API DB. + mock_get_im_count.return_value = {'project': {'instances': 2}, + 'user': {'instances': 1}} + + counts = quota._instances_cores_ram_count_api_db_placement( + mock.sentinel.context, mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + + mock_get_im_count.assert_called_once_with( + mock.sentinel.context, mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + mock_placement_count.assert_called_once_with( + mock.sentinel.context, mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + expected = {'project': {'instances': 2, 'cores': 2, 'ram': 4}, + 'user': {'instances': 1, 'cores': 1, 'ram': 2}} + self.assertDictEqual(expected, counts) + + @ddt.data((True, True), + (True, False), + (False, True), + (False, False)) + @ddt.unpack + @mock.patch('nova.quota.LOG.warning') + @mock.patch('nova.quota._user_id_queued_for_delete_populated') + @mock.patch('nova.quota._instances_cores_ram_count_legacy') + @mock.patch('nova.quota._instances_cores_ram_count_api_db_placement') + def test_instances_cores_ram_count(self, quota_from_placement, + uid_qfd_populated, + mock_api_db_placement_count, + mock_legacy_count, + mock_uid_qfd_populated, mock_warn_log): + # Check that all the combinations of + # [quota]count_usage_from_placement (True/False) and + # user_id_queued_for_delete_populated (True/False) do the right things. + + # Fake count of instances, cores, and ram. + expected = {'project': {'instances': 2, 'cores': 2, 'ram': 4}, + 'user': {'instances': 1, 'cores': 1, 'ram': 2}} + mock_api_db_placement_count.return_value = expected + mock_legacy_count.return_value = expected + # user_id and queued_for_delete populated/migrated (True/False) + mock_uid_qfd_populated.return_value = uid_qfd_populated + # Counting quota usage from placement enabled (True/False) + self.flags(count_usage_from_placement=quota_from_placement, + group='quota') + + counts = quota._instances_cores_ram_count( + mock.sentinel.context, mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + + if quota_from_placement and uid_qfd_populated: + # If we are counting quota usage from placement and user_id and + # queued_for_delete data has all been migrated, we should count + # instances from the API DB using instance mappings and count + # cores and ram from placement. + mock_api_db_placement_count.assert_called_once_with( + mock.sentinel.context, mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + # We should not have called the legacy counting method. + mock_legacy_count.assert_not_called() + # We should not have logged a warn message saying we were falling + # back to the legacy counting method. + mock_warn_log.assert_not_called() + else: + # If counting quota usage from placement is not enabled or if + # user_id or queued_for_delete data has not all been migrated yet, + # we should use the legacy counting method. + mock_legacy_count.assert_called_once_with( + mock.sentinel.context, mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + # We should have logged a warn message saying we were falling back + # to the legacy counting method. + if quota_from_placement: + # We only log the message if someone has opted in to counting + # from placement. + mock_warn_log.assert_called_once() + # We should not have called the API DB and placement counting + # method. + mock_api_db_placement_count.assert_not_called() + + self.assertDictEqual(expected, counts) + + @mock.patch('nova.quota._user_id_queued_for_delete_populated') + @mock.patch('nova.quota._instances_cores_ram_count_legacy') + @mock.patch('nova.quota._instances_cores_ram_count_api_db_placement') + def test_user_id_queued_for_delete_populated_cache_by_project( + self, mock_api_db_placement_count, mock_legacy_count, + mock_uid_qfd_populated): + # We need quota usage from placement enabled to test this. For legacy + # counting, the cache is not used. + self.flags(count_usage_from_placement=True, group='quota') + # Fake count of instances, cores, and ram. + fake_counts = {'project': {'instances': 2, 'cores': 2, 'ram': 4}, + 'user': {'instances': 1, 'cores': 1, 'ram': 2}} + mock_api_db_placement_count.return_value = fake_counts + mock_legacy_count.return_value = fake_counts + + # First, check the case where user_id and queued_for_delete are found + # not to be migrated. + mock_uid_qfd_populated.return_value = False + quota._instances_cores_ram_count(mock.sentinel.context, + mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + mock_uid_qfd_populated.assert_called_once() + # The second call should check for unmigrated records again, since the + # project was found not to be completely migrated last time. + quota._instances_cores_ram_count(mock.sentinel.context, + mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + self.assertEqual(2, mock_uid_qfd_populated.call_count) + + # Now check the case where the data migration was found to be complete. + mock_uid_qfd_populated.reset_mock() + mock_uid_qfd_populated.return_value = True + # The first call will check whether there are any unmigrated records. + quota._instances_cores_ram_count(mock.sentinel.context, + mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + mock_uid_qfd_populated.assert_called_once() + # Second call should skip the check for user_id and queued_for_delete + # migrated because the result was cached. + mock_uid_qfd_populated.reset_mock() + quota._instances_cores_ram_count(mock.sentinel.context, + mock.sentinel.project_id, + user_id=mock.sentinel.user_id) + mock_uid_qfd_populated.assert_not_called() |