summaryrefslogtreecommitdiff
path: root/nova
diff options
context:
space:
mode:
authorYikun Jiang <yikunkero@gmail.com>2018-09-15 11:31:41 +0800
committerMatt Riedemann <mriedem.os@gmail.com>2018-11-30 15:32:06 -0500
commit08f3ae960623c94bdd997cacb3e81f04b4bbba69 (patch)
tree5803a053e6d1def021f630a849d322639d31c31a /nova
parent212eff600ab6f016ed696a8d4bd884d572db8990 (diff)
downloadnova-08f3ae960623c94bdd997cacb3e81f04b4bbba69.tar.gz
Use new ``initial_xxx_allocation_ratio`` CONF
This patch adds new ``initial_xxx_allocation_ratio`` CONF options and modifies the resource tracker's initial compute node creation to use these values. During the update_available_resource periodic task, the allocation ratios reported to inventory for VCPU, MEMORY_MB and DISK_GB will be based on: * If CONF.*_allocation_ratio is set, use it. This overrides everything including externally set allocation ratios via the placement API. * If reporting inventory for the first time, the CONF.initial_*_allocation_ratio value is used. * For everything else, the inventory reported remains unchanged which allows operators to set the allocation ratios on the inventory records in placement directly without worrying about nova-compute overwriting those changes. As a result, several TODOs are removed from the virt drivers that implement the update_provider_tree interface and a TODO in the resource tracker about unset-ing allocation ratios to get back to initial values. Change-Id: I14a310b20bd9892e7b34464e6baad49bf5928ece blueprint: initial-allocation-ratios
Diffstat (limited to 'nova')
-rw-r--r--nova/compute/resource_tracker.py25
-rw-r--r--nova/conf/compute.py63
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py104
-rw-r--r--nova/tests/functional/integrated_helpers.py11
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py6
-rw-r--r--nova/tests/unit/virt/powervm/test_driver.py7
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py50
-rw-r--r--nova/virt/driver.py36
-rw-r--r--nova/virt/fake.py17
-rw-r--r--nova/virt/libvirt/driver.py16
-rw-r--r--nova/virt/powervm/driver.py19
11 files changed, 309 insertions, 45 deletions
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 5c21f60a6b..7d7ddc3be8 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -583,7 +583,7 @@ class ResourceTracker(object):
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
- self._copy_resources(cn, resources)
+ self._copy_resources(cn, resources, initial=True)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info('Compute node record created for '
@@ -605,7 +605,7 @@ class ResourceTracker(object):
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
- def _copy_resources(self, compute_node, resources):
+ def _copy_resources(self, compute_node, resources, initial=False):
"""Copy resource values to supplied compute_node."""
nodename = resources['hypervisor_hostname']
stats = self.stats[nodename]
@@ -629,20 +629,17 @@ class ResourceTracker(object):
# resetting the ComputeNode fields to None because that will make
# the _resource_change method think something changed when really it
# didn't.
- # TODO(mriedem): Will this break any scenarios where an operator is
- # trying to *reset* the allocation ratios by changing config from
- # non-None back to None? Maybe we should only do this if the fields on
- # the ComputeNode object are not already set. For example, let's say
- # the cpu_allocation_ratio config was 1.0 and then the operator wants
- # to get back to the default (16.0 via the facade), and to do that they
- # change the config back to None (or just unset the config option).
- # Should we support that or treat these config options as "sticky" in
- # that once you start setting them, you can't go back to the implied
- # defaults by unsetting or resetting to None? Sort of like how
- # per-tenant quota is sticky once you change it in the API.
+ # NOTE(yikun): The CONF.initial_(cpu|ram|disk)_allocation_ratio would
+ # be used when we initialize the compute node object, that means the
+ # ComputeNode.(cpu|ram|disk)_allocation_ratio will be set to
+ # CONF.initial_(cpu|ram|disk)_allocation_ratio when initial flag is
+ # True.
for res in ('cpu', 'disk', 'ram'):
attr = '%s_allocation_ratio' % res
- conf_alloc_ratio = getattr(self, attr)
+ if initial:
+ conf_alloc_ratio = getattr(CONF, 'initial_%s' % attr)
+ else:
+ conf_alloc_ratio = getattr(self, attr)
# NOTE(yikun): In Stein version, we change the default value of
# (cpu|ram|disk)_allocation_ratio from 0.0 to None, but we still
# should allow 0.0 to keep compatibility, and this 0.0 condition
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index 1258f82e08..1fe1d97168 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -399,11 +399,16 @@ NOTE: If this option is set to something *other than* ``None`` or ``0.0``, the
allocation ratio will be overwritten by the value of this option, otherwise,
the allocation ratio will not change. Once set to a non-default value, it is
not possible to "unset" the config to get back to the default behavior. If you
-want to reset back to the default, explicitly specify 16.0.
+want to reset back to the initial value, explicitly specify it to the value of
+``initial_cpu_allocation_ratio``.
Possible values:
* Any valid positive integer or float value
+
+Related options:
+
+* ``initial_cpu_allocation_ratio``
"""),
cfg.FloatOpt('ram_allocation_ratio',
default=None,
@@ -425,11 +430,16 @@ NOTE: If this option is set to something *other than* ``None`` or ``0.0``, the
allocation ratio will be overwritten by the value of this option, otherwise,
the allocation ratio will not change. Once set to a non-default value, it is
not possible to "unset" the config to get back to the default behavior. If you
-want to reset back to the default, explicitly specify 1.5.
+want to reset back to the initial value, explicitly specify it to the value of
+``initial_ram_allocation_ratio``.
Possible values:
* Any valid positive integer or float value
+
+Related options:
+
+* ``initial_ram_allocation_ratio``
"""),
cfg.FloatOpt('disk_allocation_ratio',
default=None,
@@ -455,11 +465,58 @@ NOTE: If this option is set to something *other than* ``None`` or ``0.0``, the
allocation ratio will be overwritten by the value of this option, otherwise,
the allocation ratio will not change. Once set to a non-default value, it is
not possible to "unset" the config to get back to the default behavior. If you
-want to reset back to the default, explicitly specify 1.0.
+want to reset back to the initial value, explicitly specify it to the value of
+``initial_disk_allocation_ratio``.
Possible values:
* Any valid positive integer or float value
+
+Related options:
+
+* ``initial_disk_allocation_ratio``
+"""),
+ cfg.FloatOpt('initial_cpu_allocation_ratio',
+ default=16.0,
+ min=0.0,
+ help="""
+This option helps you specify initial virtual CPU to physical CPU allocation
+ratio.
+
+This is only used when initially creating the ``computes_nodes`` table record
+for a given nova-compute service.
+
+Related options:
+
+* ``cpu_allocation_ratio``
+"""),
+ cfg.FloatOpt('initial_ram_allocation_ratio',
+ default=1.5,
+ min=0.0,
+ help="""
+This option helps you specify initial virtual RAM to physical RAM allocation
+ratio.
+
+This is only used when initially creating the ``computes_nodes`` table record
+for a given nova-compute service.
+
+Related options:
+
+* ``ram_allocation_ratio``
+"""),
+ cfg.FloatOpt('initial_disk_allocation_ratio',
+ default=1.0,
+ min=0.0,
+ help="""
+This option helps you specify initial virtual disk to physical disk allocation
+ratio.
+
+This is only used when initially creating the ``computes_nodes`` table record
+for a given nova-compute service.
+
+Related options:
+
+* ``disk_allocation_ratio``
""")
]
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index c4ef50d743..0cb7b8280b 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -362,3 +362,107 @@ class TestUpdateComputeNodeReservedAndAllocationRatio(
# The reserved host values should also come from config.
self._assert_reserved_inventory(inventories)
+
+ def test_allocation_ratio_create_with_initial_allocation_ratio(self):
+ # The xxx_allocation_ratio is set to None by default, and we use
+ # 16.1/1.6/1.1 since disk_allocation_ratio defaults to 16.0/1.5/1.0.
+ self.flags(initial_cpu_allocation_ratio=16.1)
+ self.flags(initial_ram_allocation_ratio=1.6)
+ self.flags(initial_disk_allocation_ratio=1.1)
+ # Start a compute service which should create a corresponding resource
+ # provider in the placement service.
+ self._start_compute('fake-host')
+ # Assert the compute node resource provider exists in placement with
+ # the default reserved and allocation ratio values from config.
+ rp_uuid = self._get_provider_uuid_by_host('fake-host')
+ inventories = self._get_provider_inventory(rp_uuid)
+ ctxt = context.get_admin_context()
+ # Note that the CellDatabases fixture usage means we don't need to
+ # target the context to cell1 even though the compute_nodes table is
+ # in the cell1 database.
+ cn = objects.ComputeNode.get_by_uuid(ctxt, rp_uuid)
+ ratios = {
+ 'VCPU': cn.cpu_allocation_ratio,
+ 'MEMORY_MB': cn.ram_allocation_ratio,
+ 'DISK_GB': cn.disk_allocation_ratio
+ }
+ initial_ratio_conf = {
+ 'VCPU': CONF.initial_cpu_allocation_ratio,
+ 'MEMORY_MB': CONF.initial_ram_allocation_ratio,
+ 'DISK_GB': CONF.initial_disk_allocation_ratio
+ }
+ for rc, ratio in ratios.items():
+ self.assertIn(rc, inventories)
+ self.assertIn('allocation_ratio', inventories[rc])
+ # Check the allocation_ratio values come from the new
+ # CONF.initial_xxx_allocation_ratio
+ self.assertEqual(initial_ratio_conf[rc], ratio,
+ 'Unexpected allocation ratio for %s' % rc)
+ # Check the initial allocation ratio is updated to inventories
+ self.assertEqual(ratio, inventories[rc]['allocation_ratio'],
+ 'Unexpected allocation ratio for %s' % rc)
+
+ def test_allocation_ratio_overwritten_from_config(self):
+ # NOTE(yikun): This test case includes below step:
+ # 1. Overwrite the allocation_ratio via the placement API directly -
+ # run the RT.update_available_resource periodic and assert the
+ # allocation ratios are not overwritten from config.
+ #
+ # 2. Set the CONF.*_allocation_ratio, run the periodic, and assert
+ # that the config overwrites what was set via the placement API.
+ compute_service = self._start_compute('fake-host')
+ rp_uuid = self._get_provider_uuid_by_host('fake-host')
+ ctxt = context.get_admin_context()
+
+ rt = compute_service.manager._get_resource_tracker()
+
+ inv = self.placement_api.get(
+ '/resource_providers/%s/inventories' % rp_uuid).body
+ ratios = {'VCPU': 16.1, 'MEMORY_MB': 1.6, 'DISK_GB': 1.1}
+
+ for rc, ratio in ratios.items():
+ inv['inventories'][rc]['allocation_ratio'] = ratio
+
+ # Overwrite the allocation_ratio via the placement API directly
+ self._update_inventory(rp_uuid, inv)
+ inv = self._get_provider_inventory(rp_uuid)
+ # Check inventories is updated to ratios
+ for rc, ratio in ratios.items():
+ self.assertIn(rc, inv)
+ self.assertIn('allocation_ratio', inv[rc])
+ self.assertEqual(ratio, inv[rc]['allocation_ratio'],
+ 'Unexpected allocation ratio for %s' % rc)
+
+ # Make sure xxx_allocation_ratio is None by default
+ self.assertIsNone(CONF.cpu_allocation_ratio)
+ self.assertIsNone(CONF.ram_allocation_ratio)
+ self.assertIsNone(CONF.disk_allocation_ratio)
+ # run the RT.update_available_resource periodic
+ rt.update_available_resource(ctxt, 'fake-host')
+ # assert the allocation ratios are not overwritten from config
+ inv = self._get_provider_inventory(rp_uuid)
+ for rc, ratio in ratios.items():
+ self.assertIn(rc, inv)
+ self.assertIn('allocation_ratio', inv[rc])
+ self.assertEqual(ratio, inv[rc]['allocation_ratio'],
+ 'Unexpected allocation ratio for %s' % rc)
+
+ # set the CONF.*_allocation_ratio
+ self.flags(cpu_allocation_ratio=15.9)
+ self.flags(ram_allocation_ratio=1.4)
+ self.flags(disk_allocation_ratio=0.9)
+
+ # run the RT.update_available_resource periodic
+ rt.update_available_resource(ctxt, 'fake-host')
+ inv = self._get_provider_inventory(rp_uuid)
+ ratios = {
+ 'VCPU': CONF.cpu_allocation_ratio,
+ 'MEMORY_MB': CONF.ram_allocation_ratio,
+ 'DISK_GB': CONF.disk_allocation_ratio
+ }
+ # assert that the config overwrites what was set via the placement API.
+ for rc, ratio in ratios.items():
+ self.assertIn(rc, inv)
+ self.assertIn('allocation_ratio', inv[rc])
+ self.assertEqual(ratio, inv[rc]['allocation_ratio'],
+ 'Unexpected allocation ratio for %s' % rc)
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index 333b3b014f..e5f02d23ee 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -488,6 +488,17 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
url= ('/resource_providers/%s/inventories' % rp_uuid),
version='1.15', body=inv_body).body
+ def _update_inventory(self, rp_uuid, inv_body):
+ """This will update the inventory for a given resource provider.
+
+ :param rp_uuid: UUID of the resource provider to update
+ :param inv_body: inventory to set on the provider
+ :returns: APIResponse object with the results
+ """
+ return self.placement_api.put(
+ url= ('/resource_providers/%s/inventories' % rp_uuid),
+ body=inv_body).body
+
def _get_resource_provider_by_uuid(self, rp_uuid):
return self.placement_api.get(
'/resource_providers/%s' % rp_uuid, version='1.15').body
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index a4e4d0b5ab..08a88c7d4e 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -1238,9 +1238,9 @@ class TestInitComputeNode(BaseTestCase):
# NOTE(sbauza): ResourceTracker adds host field
host=_HOSTNAME,
# NOTE(sbauza): ResourceTracker adds CONF allocation ratios
- ram_allocation_ratio=1.0,
- cpu_allocation_ratio=1.0,
- disk_allocation_ratio=1.0,
+ ram_allocation_ratio=CONF.initial_ram_allocation_ratio,
+ cpu_allocation_ratio=CONF.initial_cpu_allocation_ratio,
+ disk_allocation_ratio=CONF.initial_disk_allocation_ratio,
stats={'failed_builds': 0},
pci_device_pools=objects.PciDevicePoolList(objects=[]),
uuid=uuids.compute_node_uuid
diff --git a/nova/tests/unit/virt/powervm/test_driver.py b/nova/tests/unit/virt/powervm/test_driver.py
index 9f3cd9d8c7..08ae97b1e7 100644
--- a/nova/tests/unit/virt/powervm/test_driver.py
+++ b/nova/tests/unit/virt/powervm/test_driver.py
@@ -217,7 +217,7 @@ class TestPowerVMDriver(test.NoDBTestCase):
def test_update_provider_tree_complex_ptree(self):
# Overrides inventory already on the provider; leaves other providers
# and aggregates/traits alone.
- with self._update_provider_tree() as (ptree, _):
+ with self._update_provider_tree() as (ptree, exp_inv):
ptree.update_inventory('compute_host', {
# these should get blown away
'VCPU': {
@@ -238,6 +238,11 @@ class TestPowerVMDriver(test.NoDBTestCase):
'for': 'ssp'})
ptree.update_aggregates('ssp', [uuids.ss_agg])
ptree.new_child('sriov', 'compute_host', uuid=uuids.sriov)
+ # Since CONF.cpu_allocation_ratio is not set and this is not
+ # the initial upt call (so CONF.initial_cpu_allocation_ratio would
+ # be used), the existing allocation ratio value from the tree is
+ # used.
+ exp_inv['VCPU']['allocation_ratio'] = 1.0
# Make sure the compute's agg and traits were left alone
cndata = ptree.data('compute_host')
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index d03370909e..f275250e0f 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -28,10 +28,12 @@ from oslo_utils import timeutils
import six
from nova.compute import manager
+from nova import conf
from nova.console import type as ctype
from nova import context
from nova import exception
from nova import objects
+from nova import rc_fields
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_block_device
@@ -46,6 +48,7 @@ from nova.virt import libvirt
from nova.virt.libvirt import imagebackend
LOG = logging.getLogger(__name__)
+CONF = conf.CONF
def catch_notimplementederror(f):
@@ -809,6 +812,53 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.assertEqual(instance.host,
self.connection.network_binding_host_id(self.ctxt, instance))
+ def test_get_allocation_ratio(self):
+ inv = {}
+ self.flags(cpu_allocation_ratio=16.1)
+ self.flags(ram_allocation_ratio=1.6)
+ self.flags(disk_allocation_ratio=1.1)
+ expeced_ratios = {
+ rc_fields.ResourceClass.VCPU: CONF.cpu_allocation_ratio,
+ rc_fields.ResourceClass.MEMORY_MB: CONF.ram_allocation_ratio,
+ rc_fields.ResourceClass.DISK_GB: CONF.disk_allocation_ratio
+ }
+ # If conf is set, return conf
+ self.assertEqual(expeced_ratios,
+ self.connection._get_allocation_ratios(inv))
+
+ self.flags(cpu_allocation_ratio=None)
+ self.flags(ram_allocation_ratio=None)
+ self.flags(disk_allocation_ratio=None)
+ self.flags(initial_cpu_allocation_ratio=15.9)
+ self.flags(initial_ram_allocation_ratio=1.4)
+ self.flags(initial_disk_allocation_ratio=0.9)
+ expeced_ratios = {
+ rc_fields.ResourceClass.VCPU:
+ CONF.initial_cpu_allocation_ratio,
+ rc_fields.ResourceClass.MEMORY_MB:
+ CONF.initial_ram_allocation_ratio,
+ rc_fields.ResourceClass.DISK_GB:
+ CONF.initial_disk_allocation_ratio
+ }
+ # if conf is unset and inv doesn't exists, return init conf
+ self.assertEqual(expeced_ratios,
+ self.connection._get_allocation_ratios(inv))
+
+ inv = {rc_fields.ResourceClass.VCPU: {'allocation_ratio': 3.0},
+ rc_fields.ResourceClass.MEMORY_MB: {'allocation_ratio': 3.1},
+ rc_fields.ResourceClass.DISK_GB: {'allocation_ratio': 3.2}}
+ expeced_ratios = {
+ rc_fields.ResourceClass.VCPU:
+ inv[rc_fields.ResourceClass.VCPU]['allocation_ratio'],
+ rc_fields.ResourceClass.MEMORY_MB:
+ inv[rc_fields.ResourceClass.MEMORY_MB]['allocation_ratio'],
+ rc_fields.ResourceClass.DISK_GB:
+ inv[rc_fields.ResourceClass.DISK_GB]['allocation_ratio']
+ }
+ # if conf is unset and inv exists, return inv
+ self.assertEqual(expeced_ratios,
+ self.connection._get_allocation_ratios(inv))
+
class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 59340cbdaf..1fd33b0748 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -28,6 +28,7 @@ import six
import nova.conf
from nova.i18n import _
+from nova import rc_fields
from nova.virt import event as virtevent
CONF = nova.conf.CONF
@@ -862,6 +863,41 @@ class ComputeDriver(object):
import nova.compute.utils as compute_utils # avoid circular import
return compute_utils.convert_mb_to_ceil_gb(CONF.reserved_host_disk_mb)
+ @staticmethod
+ def _get_allocation_ratios(inventory):
+ """Get the cpu/ram/disk allocation ratios for the given inventory.
+
+ This utility method is used to get the inventory allocation ratio
+ for VCPU, MEMORY_MB and DISK_GB resource classes based on the following
+ precedence:
+
+ * Use ``[DEFAULT]/*_allocation_ratio`` if set - this overrides
+ everything including externally set allocation ratios on the
+ inventory via the placement API
+ * Use ``[DEFAULT]/initial_*_allocation_ratio`` if a value does not
+ exist for a given resource class in the ``inventory`` dict
+ * Use what is already in the ``inventory`` dict for the allocation
+ ratio if the above conditions are false
+
+ :param inventory: dict, keyed by resource class, of inventory
+ information.
+ :returns: Return a dict, keyed by resource class, of allocation ratio
+ """
+ keys = {'cpu': rc_fields.ResourceClass.VCPU,
+ 'ram': rc_fields.ResourceClass.MEMORY_MB,
+ 'disk': rc_fields.ResourceClass.DISK_GB}
+ result = {}
+ for res, rc in keys.items():
+ attr = '%s_allocation_ratio' % res
+ conf_ratio = getattr(CONF, attr)
+ if conf_ratio:
+ result[rc] = conf_ratio
+ elif rc not in inventory:
+ result[rc] = getattr(CONF, 'initial_%s' % attr)
+ else:
+ result[rc] = inventory[rc]['allocation_ratio']
+ return result
+
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider and
inventory information.
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 26a06eabdb..b4e222217d 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -41,6 +41,7 @@ from nova import exception
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields as obj_fields
from nova.objects import migrate_data
+from nova import rc_fields
from nova.virt import driver
from nova.virt import hardware
from nova.virt import virtapi
@@ -500,17 +501,19 @@ class FakeDriver(driver.ComputeDriver):
return host_status
def update_provider_tree(self, provider_tree, nodename, allocations=None):
- # TODO(mriedem): The allocation_ratio config usage will change with
- # blueprint initial-allocation-ratios. For now, the allocation ratio
- # config values all default to 0.0 and the ComputeNode provides a
- # facade for giving the real defaults, so we have to mimic that here.
+ # NOTE(yikun): If the inv record does not exists, the allocation_ratio
+ # will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
+ # is set, and fallback to use the initial_xxx_allocation_ratio
+ # otherwise.
+ inv = provider_tree.data(nodename).inventory
+ ratios = self._get_allocation_ratios(inv)
inventory = {
'VCPU': {
'total': self.vcpus,
'min_unit': 1,
'max_unit': self.vcpus,
'step_size': 1,
- 'allocation_ratio': CONF.cpu_allocation_ratio or 16.0,
+ 'allocation_ratio': ratios[rc_fields.ResourceClass.VCPU],
'reserved': CONF.reserved_host_cpus,
},
'MEMORY_MB': {
@@ -518,7 +521,7 @@ class FakeDriver(driver.ComputeDriver):
'min_unit': 1,
'max_unit': self.memory_mb,
'step_size': 1,
- 'allocation_ratio': CONF.ram_allocation_ratio or 1.5,
+ 'allocation_ratio': ratios[rc_fields.ResourceClass.MEMORY_MB],
'reserved': CONF.reserved_host_memory_mb,
},
'DISK_GB': {
@@ -526,7 +529,7 @@ class FakeDriver(driver.ComputeDriver):
'min_unit': 1,
'max_unit': self.local_gb,
'step_size': 1,
- 'allocation_ratio': CONF.disk_allocation_ratio or 1.0,
+ 'allocation_ratio': ratios[rc_fields.ResourceClass.DISK_GB],
'reserved': self._get_reserved_host_disk_gb_from_config(),
},
}
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 67fb6f53e5..276d33f4ac 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -6457,17 +6457,19 @@ class LibvirtDriver(driver.ComputeDriver):
# TODO(sbauza): Use traits to make a better world.
vgpus = self._get_vgpu_total()
- # TODO(mriedem): The allocation_ratio config usage will change with
- # blueprint initial-allocation-ratios. For now, the allocation ratio
- # config values all default to 0.0 and the ComputeNode provides a
- # facade for giving the real defaults, so we have to mimic that here.
+ # NOTE(yikun): If the inv record does not exists, the allocation_ratio
+ # will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
+ # is set, and fallback to use the initial_xxx_allocation_ratio
+ # otherwise.
+ inv = provider_tree.data(nodename).inventory
+ ratios = self._get_allocation_ratios(inv)
result = {
rc_fields.ResourceClass.VCPU: {
'total': vcpus,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
- 'allocation_ratio': CONF.cpu_allocation_ratio or 16.0,
+ 'allocation_ratio': ratios[rc_fields.ResourceClass.VCPU],
'reserved': CONF.reserved_host_cpus,
},
rc_fields.ResourceClass.MEMORY_MB: {
@@ -6475,7 +6477,7 @@ class LibvirtDriver(driver.ComputeDriver):
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
- 'allocation_ratio': CONF.ram_allocation_ratio or 1.5,
+ 'allocation_ratio': ratios[rc_fields.ResourceClass.MEMORY_MB],
'reserved': CONF.reserved_host_memory_mb,
},
}
@@ -6492,7 +6494,7 @@ class LibvirtDriver(driver.ComputeDriver):
'min_unit': 1,
'max_unit': disk_gb,
'step_size': 1,
- 'allocation_ratio': CONF.disk_allocation_ratio or 1.0,
+ 'allocation_ratio': ratios[rc_fields.ResourceClass.DISK_GB],
'reserved': self._get_reserved_host_disk_gb_from_config(),
}
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index a153257ee7..21a6a979ab 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -191,35 +191,34 @@ class PowerVMDriver(driver.ComputeDriver):
# update_available_resource flow.
data = self._get_available_resource()
- # TODO(efried): Fix these to reflect something like reality
- # For now, duplicate the logic the resource tracker uses via
- # update_compute_node when get_inventory/update_provider_tree is not
- # implemented.
- cpu_alloc_ratio = CONF.cpu_allocation_ratio or 16.0
+ # NOTE(yikun): If the inv record does not exists, the allocation_ratio
+ # will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
+ # is set, and fallback to use the initial_xxx_allocation_ratio
+ # otherwise.
+ inv = provider_tree.data(nodename).inventory
+ ratios = self._get_allocation_ratios(inv)
cpu_reserved = CONF.reserved_host_cpus
- mem_alloc_ratio = CONF.ram_allocation_ratio or 1.5
mem_reserved = CONF.reserved_host_memory_mb
- disk_alloc_ratio = CONF.disk_allocation_ratio or 1.0
disk_reserved = self._get_reserved_host_disk_gb_from_config()
inventory = {
rc_fields.ResourceClass.VCPU: {
'total': data['vcpus'],
'max_unit': data['vcpus'],
- 'allocation_ratio': cpu_alloc_ratio,
+ 'allocation_ratio': ratios[rc_fields.ResourceClass.VCPU],
'reserved': cpu_reserved,
},
rc_fields.ResourceClass.MEMORY_MB: {
'total': data['memory_mb'],
'max_unit': data['memory_mb'],
- 'allocation_ratio': mem_alloc_ratio,
+ 'allocation_ratio': ratios[rc_fields.ResourceClass.MEMORY_MB],
'reserved': mem_reserved,
},
rc_fields.ResourceClass.DISK_GB: {
# TODO(efried): Proper DISK_GB sharing when SSP driver in play
'total': int(data['local_gb']),
'max_unit': int(data['local_gb']),
- 'allocation_ratio': disk_alloc_ratio,
+ 'allocation_ratio': ratios[rc_fields.ResourceClass.DISK_GB],
'reserved': disk_reserved,
},
}