summaryrefslogtreecommitdiff
path: root/nova/scheduler
diff options
context:
space:
mode:
authorStephen Finucane <stephenfin@redhat.com>2021-03-03 11:50:52 +0000
committerStephen Finucane <stephenfin@redhat.com>2021-03-29 12:24:15 +0100
commit1bf45c47205057801129dc20153de0a98d9c4e08 (patch)
tree1fbad1b69ab8f2f8ecee50869fdafabb7da5c152 /nova/scheduler
parent1de6e960af829142c0c6e606ef10d06a44d2cae1 (diff)
downloadnova-1bf45c47205057801129dc20153de0a98d9c4e08.tar.gz
Remove (almost) all references to 'instance_type'
This continues on from I81fec10535034f3a81d46713a6eda813f90561cf and removes all other references to 'instance_type' where it's possible to do so. The only things left are DB columns, o.vo fields, some unversioned objects, and RPC API methods. If we want to remove these, we can but it's a lot more work. Change-Id: I264d6df1809d7283415e69a66a9153829b8df537 Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
Diffstat (limited to 'nova/scheduler')
-rw-r--r--nova/scheduler/filters/aggregate_instance_extra_specs.py27
-rw-r--r--nova/scheduler/filters/compute_capabilities_filter.py16
-rw-r--r--nova/scheduler/filters/type_filter.py10
-rw-r--r--nova/scheduler/utils.py36
4 files changed, 45 insertions, 44 deletions
diff --git a/nova/scheduler/filters/aggregate_instance_extra_specs.py b/nova/scheduler/filters/aggregate_instance_extra_specs.py
index 58471ba375..68017b1d38 100644
--- a/nova/scheduler/filters/aggregate_instance_extra_specs.py
+++ b/nova/scheduler/filters/aggregate_instance_extra_specs.py
@@ -28,7 +28,7 @@ _SCOPE = 'aggregate_instance_extra_specs'
class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
- """AggregateInstanceExtraSpecsFilter works with InstanceType records."""
+ """AggregateInstanceExtraSpecsFilter works with flavor records."""
# Aggregate data and instance type does not change within a request
run_filter_once_per_request = True
@@ -36,21 +36,20 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
- """Return a list of hosts that can create instance_type
+ """Return a list of hosts that can create flavor.
Check that the extra specs associated with the instance type match
the metadata provided by aggregates. If not present return False.
"""
- instance_type = spec_obj.flavor
+ flavor = spec_obj.flavor
# If 'extra_specs' is not present or extra_specs are empty then we
# need not proceed further
- if (not instance_type.obj_attr_is_set('extra_specs') or
- not instance_type.extra_specs):
+ if 'extra_specs' not in flavor or not flavor.extra_specs:
return True
metadata = utils.aggregate_metadata_get_by_host(host_state)
- for key, req in instance_type.extra_specs.items():
+ for key, req in flavor.extra_specs.items():
# Either not scope format, or aggregate_instance_extra_specs scope
scope = key.split(':', 1)
if len(scope) > 1:
@@ -62,18 +61,20 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
aggregate_vals = metadata.get(key, None)
if not aggregate_vals:
LOG.debug(
- "%(host_state)s fails instance_type extra_specs "
- "requirements. Extra_spec %(key)s is not in aggregate.",
+ "%(host_state)s fails flavor extra_specs requirements. "
+ "Extra_spec %(key)s is not in aggregate.",
{'host_state': host_state, 'key': key})
return False
for aggregate_val in aggregate_vals:
if extra_specs_ops.match(aggregate_val, req):
break
else:
- LOG.debug("%(host_state)s fails instance_type extra_specs "
- "requirements. '%(aggregate_vals)s' do not "
- "match '%(req)s'",
- {'host_state': host_state, 'req': req,
- 'aggregate_vals': aggregate_vals})
+ LOG.debug(
+ "%(host_state)s fails flavor extra_specs requirements. "
+ "'%(aggregate_vals)s' do not match '%(req)s'",
+ {
+ 'host_state': host_state, 'req': req,
+ 'aggregate_vals': aggregate_vals,
+ })
return False
return True
diff --git a/nova/scheduler/filters/compute_capabilities_filter.py b/nova/scheduler/filters/compute_capabilities_filter.py
index 8d2e06cbd9..9818867618 100644
--- a/nova/scheduler/filters/compute_capabilities_filter.py
+++ b/nova/scheduler/filters/compute_capabilities_filter.py
@@ -65,14 +65,14 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
return None
return cap
- def _satisfies_extra_specs(self, host_state, instance_type):
+ def _satisfies_extra_specs(self, host_state, flavor):
"""Check that the host_state provided by the compute service
satisfies the extra specs associated with the instance type.
"""
- if 'extra_specs' not in instance_type:
+ if 'extra_specs' not in flavor:
return True
- for key, req in instance_type.extra_specs.items():
+ for key, req in flavor.extra_specs.items():
# Either not scope format, or in capabilities scope
scope = key.split(':')
# If key does not have a namespace, the scope's size is 1, check
@@ -106,10 +106,10 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
return True
def host_passes(self, host_state, spec_obj):
- """Return a list of hosts that can create instance_type."""
- instance_type = spec_obj.flavor
- if not self._satisfies_extra_specs(host_state, instance_type):
- LOG.debug("%(host_state)s fails instance_type extra_specs "
- "requirements", {'host_state': host_state})
+ """Return a list of hosts that can create flavor."""
+ if not self._satisfies_extra_specs(host_state, spec_obj.flavor):
+ LOG.debug(
+ "%(host_state)s fails flavor extra_specs requirements",
+ {'host_state': host_state})
return False
return True
diff --git a/nova/scheduler/filters/type_filter.py b/nova/scheduler/filters/type_filter.py
index 5b386cf83f..f60392a138 100644
--- a/nova/scheduler/filters/type_filter.py
+++ b/nova/scheduler/filters/type_filter.py
@@ -19,9 +19,9 @@ from nova.scheduler.filters import utils
class AggregateTypeAffinityFilter(filters.BaseHostFilter):
- """AggregateTypeAffinityFilter limits instance_type by aggregate
+ """AggregateTypeAffinityFilter limits flavors by aggregate
- return True if no instance_type key is set or if the aggregate metadata
+ return True if no flavor key is set or if the aggregate metadata
key 'instance_type' has the instance_type name as a value
"""
@@ -31,13 +31,11 @@ class AggregateTypeAffinityFilter(filters.BaseHostFilter):
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
- instance_type = spec_obj.flavor
-
+ # TODO(stephenfin): Add support for 'flavor' key
aggregate_vals = utils.aggregate_values_from_key(
host_state, 'instance_type')
for val in aggregate_vals:
- if (instance_type.name in
- [x.strip() for x in val.split(',')]):
+ if spec_obj.flavor.name in [x.strip() for x in val.split(',')]:
return True
return not aggregate_vals
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index b71c209d13..d3d13ce0e4 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -534,7 +534,7 @@ class ResourceRequest(object):
list(str(rg) for rg in list(self._rg_by_id.values()))))
-def build_request_spec(image, instances, instance_type=None):
+def build_request_spec(image, instances, flavor=None):
"""Build a request_spec (ahem, not a RequestSpec) for the scheduler.
The request_spec assumes that all instances to be scheduled are the same
@@ -543,21 +543,21 @@ def build_request_spec(image, instances, instance_type=None):
:param image: optional primitive image meta dict
:param instances: list of instances; objects will be converted to
primitives
- :param instance_type: optional flavor; objects will be converted to
+ :param flavor: optional flavor; objects will be converted to
primitives
:return: dict with the following keys::
'image': the image dict passed in or {}
'instance_properties': primitive version of the first instance passed
- 'instance_type': primitive version of the instance_type or None
+ 'instance_type': primitive version of the flavor or None
'num_instances': the number of instances passed in
"""
instance = instances[0]
- if instance_type is None:
+ if flavor is None:
if isinstance(instance, obj_instance.Instance):
- instance_type = instance.get_flavor()
+ flavor = instance.get_flavor()
else:
- instance_type = flavors.extract_flavor(instance)
+ flavor = flavors.extract_flavor(instance)
if isinstance(instance, obj_instance.Instance):
instance = obj_base.obj_to_primitive(instance)
@@ -565,25 +565,26 @@ def build_request_spec(image, instances, instance_type=None):
# to detach our metadata blob because we modify it below.
instance['system_metadata'] = dict(instance.get('system_metadata', {}))
- if isinstance(instance_type, objects.Flavor):
- instance_type = obj_base.obj_to_primitive(instance_type)
+ if isinstance(flavor, objects.Flavor):
+ flavor = obj_base.obj_to_primitive(flavor)
# NOTE(danms): Replicate this old behavior because the
# scheduler RPC interface technically expects it to be
# there. Remove this when we bump the scheduler RPC API to
# v5.0
try:
- flavors.save_flavor_info(instance.get('system_metadata', {}),
- instance_type)
+ flavors.save_flavor_info(
+ instance.get('system_metadata', {}), flavor)
except KeyError:
# If the flavor isn't complete (which is legit with a
# flavor object, just don't put it in the request spec
pass
request_spec = {
- 'image': image or {},
- 'instance_properties': instance,
- 'instance_type': instance_type,
- 'num_instances': len(instances)}
+ 'image': image or {},
+ 'instance_properties': instance,
+ 'instance_type': flavor,
+ 'num_instances': len(instances),
+ }
# NOTE(mriedem): obj_to_primitive above does not serialize everything
# in an object, like datetime fields, so we need to still call to_primitive
# to recursively serialize the items in the request_spec dict.
@@ -898,11 +899,12 @@ def set_vm_state_and_notify(context, instance_uuid, service, method, updates,
context, method, instance_uuid, request_spec, vm_state, ex)
-def build_filter_properties(scheduler_hints, forced_host,
- forced_node, instance_type):
+def build_filter_properties(
+ scheduler_hints, forced_host, forced_node, flavor,
+):
"""Build the filter_properties dict from data in the boot request."""
filter_properties = dict(scheduler_hints=scheduler_hints)
- filter_properties['instance_type'] = instance_type
+ filter_properties['instance_type'] = flavor
# TODO(alaski): It doesn't seem necessary that these are conditionally
# added. Let's just add empty lists if not forced_host/node.
if forced_host: