summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml6
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json1
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json3
-rw-r--r--doc/source/admin/compute-node-identification.rst83
-rw-r--r--doc/source/admin/cpu-topologies.rst91
-rw-r--r--doc/source/admin/index.rst1
-rw-r--r--doc/source/admin/live-migration-usage.rst2
-rw-r--r--doc/source/admin/remote-console-access.rst10
-rw-r--r--doc/source/admin/upgrades.rst20
-rw-r--r--doc/source/cli/nova-compute.rst2
-rw-r--r--doc/source/contributor/ptl-guide.rst73
-rw-r--r--mypy-files.txt4
-rw-r--r--nova/api/openstack/compute/rest_api_version_history.rst12
-rw-r--r--nova/compute/manager.py60
-rw-r--r--nova/compute/resource_tracker.py8
-rw-r--r--nova/compute/rpcapi.py2
-rw-r--r--nova/conductor/manager.py4
-rw-r--r--nova/conf/libvirt.py18
-rw-r--r--nova/conf/spice.py53
-rw-r--r--nova/exception.py6
-rw-r--r--nova/filesystem.py59
-rw-r--r--nova/network/neutron.py21
-rw-r--r--nova/objects/compute_node.py8
-rw-r--r--nova/objects/service.py23
-rw-r--r--nova/pci/request.py4
-rw-r--r--nova/pci/stats.py2
-rw-r--r--nova/pci/whitelist.py2
-rw-r--r--nova/scheduler/client/report.py2
-rw-r--r--nova/scheduler/filters/__init__.py41
-rw-r--r--nova/scheduler/filters/numa_topology_filter.py40
-rw-r--r--nova/scheduler/filters/pci_passthrough_filter.py33
-rw-r--r--nova/scheduler/manager.py35
-rw-r--r--nova/scheduler/utils.py11
-rw-r--r--nova/test.py16
-rw-r--r--nova/tests/fixtures/__init__.py2
-rw-r--r--nova/tests/fixtures/filesystem.py81
-rw-r--r--nova/tests/fixtures/nova.py4
-rw-r--r--nova/tests/functional/libvirt/test_pci_sriov_servers.py41
-rw-r--r--nova/tests/functional/libvirt/test_power_manage.py270
-rw-r--r--nova/tests/functional/test_servers.py38
-rw-r--r--nova/tests/functional/test_service.py85
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py79
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py14
-rw-r--r--nova/tests/unit/compute/test_shelve.py6
-rw-r--r--nova/tests/unit/objects/test_compute_node.py9
-rw-r--r--nova/tests/unit/scheduler/test_manager.py36
-rw-r--r--nova/tests/unit/test_filesystem.py52
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/__init__.py0
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_api.py194
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_core.py122
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py26
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py95
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py6
-rw-r--r--nova/tests/unit/virt/test_hardware.py21
-rw-r--r--nova/utils.py47
-rw-r--r--nova/virt/driver.py6
-rw-r--r--nova/virt/fake.py27
-rw-r--r--nova/virt/hardware.py18
-rw-r--r--nova/virt/ironic/driver.py8
-rw-r--r--nova/virt/libvirt/config.py24
-rw-r--r--nova/virt/libvirt/cpu/__init__.py0
-rw-r--r--nova/virt/libvirt/cpu/api.py157
-rw-r--r--nova/virt/libvirt/cpu/core.py78
-rw-r--r--nova/virt/libvirt/driver.py43
-rw-r--r--nova/virt/libvirt/event.py7
-rw-r--r--nova/virt/libvirt/host.py8
-rw-r--r--nova/virt/libvirt/utils.py6
-rw-r--r--releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml23
-rw-r--r--releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml51
-rw-r--r--releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml18
-rw-r--r--releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml6
-rw-r--r--releasenotes/source/2023.1.rst6
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po89
-rw-r--r--tox.ini2
75 files changed, 2324 insertions, 238 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 25d6cc6819..abe4d2fa4a 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -419,6 +419,7 @@
# Added in Yoga.
NOVNC_FROM_PACKAGE: False
NOVA_USE_UNIFIED_LIMITS: True
+ MYSQL_REDUCE_MEMORY: True
devstack_services:
# Disable OVN services
br-ex-tcpdump: false
@@ -608,6 +609,7 @@
GLANCE_USE_IMPORT_WORKFLOW: True
DEVSTACK_PARALLEL: True
GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 2048
+ MYSQL_REDUCE_MEMORY: True
# NOTE(danms): This job is pretty heavy as it is, so we disable some
# services that are not relevant to the nova-glance-ceph scenario
# that this job is intended to validate.
@@ -755,7 +757,7 @@
irrelevant-files: *policies-irrelevant-files
- tempest-integrated-compute-enforce-scope-new-defaults:
irrelevant-files: *policies-irrelevant-files
- - grenade-skip-level:
+ - grenade-skip-level-always:
irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
@@ -792,6 +794,8 @@
irrelevant-files: *policies-irrelevant-files
- tempest-integrated-compute-enforce-scope-new-defaults:
irrelevant-files: *policies-irrelevant-files
+ - grenade-skip-level-always:
+ irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
- tempest-ipv6-only:
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
index ae9fb0a67b..8ad929226e 100644
--- a/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
@@ -1,5 +1,4 @@
{
"evacuate": {
- "targetState": "stopped"
}
}
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
index a9f809c830..d192892cdc 100644
--- a/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
@@ -1,6 +1,5 @@
{
"evacuate": {
- "host": "testHost",
- "targetState": "stopped"
+ "host": "testHost"
}
}
diff --git a/doc/source/admin/compute-node-identification.rst b/doc/source/admin/compute-node-identification.rst
new file mode 100644
index 0000000000..31d4802d0b
--- /dev/null
+++ b/doc/source/admin/compute-node-identification.rst
@@ -0,0 +1,83 @@
+===========================
+Compute Node Identification
+===========================
+
+Nova requires that compute nodes maintain a constant and consistent identity
+during their lifecycle. With the exception of the ironic driver, starting in
+the 2023.1 release, this is achieved by use of a file containing the node
+unique identifier that is persisted on disk. Prior to 2023.1, a combination of
+the compute node's hostname and the :oslo.config:option:`host` value in the
+configuration file were used.
+
+The 2023.1 and later compute node identification file must remain unchanged
+during the lifecycle of the compute node. Changing the value or removing the
+file will result in a failure to start and may require advanced techniques
+for recovery. The file is read once at `nova-compute`` startup, at which point
+it is validated for formatting and the corresponding node is located or
+created in the database.
+
+.. note::
+
+ Even after 2023.1, the compute node's hostname may not be changed after
+ the initial registration with the controller nodes, it is just not used
+ as the primary method for identification.
+
+The behavior of ``nova-compute`` is different when using the ironic driver,
+as the (UUID-based) identity and mapping of compute nodes to compute manager
+service hosts is dynamic. In that case, no single node identity is maintained
+by the compute host and thus no identity file is read or written. Thus none
+of the sections below apply to hosts with :oslo.config:option:`compute_driver`
+set to `ironic`.
+
+Self-provisioning of the node identity
+--------------------------------------
+
+By default, ``nova-compute`` will automatically generate and write a UUID to
+disk the first time it starts up, and will use that going forward as its
+stable identity. Using the :oslo.config:option:`state_path`
+(which is ``/var/lib/nova`` on most systems), a ``compute_id`` file will be
+created with a generated UUID.
+
+Since this file (and it's parent directory) is writable by nova, it may be
+desirable to move this to one of the other locations that nova looks for the
+identification file.
+
+Deployment provisioning of the node identity
+--------------------------------------------
+
+In addition to the location mentioned above, nova will also search the parent
+directories of any config file in use (either the defaults or provided on
+the command line) for a ``compute_id`` file. Thus, a deployment tool may, on
+most systems, pre-provision the node's UUID by writing one to
+``/etc/nova/compute_id``.
+
+The contents of the file should be a single UUID in canonical textual
+representation with no additional whitespace or other characters. The following
+should work on most Linux systems:
+
+.. code-block:: shell
+
+ $ uuidgen > /etc/nova/compute_id
+
+.. note::
+
+ **Do not** execute the above command blindly in every run of a deployment
+ tool, as that will result in overwriting the ``compute_id`` file each time,
+ which *will* prevent nova from working properly.
+
+Upgrading from pre-2023.1
+-------------------------
+
+Before release 2023.1, ``nova-compute`` only used the hostname (combined with
+:oslo.config:option:`host`, if set) to identify its compute node objects in
+the database. When upgrading from a prior release, the compute node will
+perform a one-time migration of the hostname-matched compute node UUID to the
+``compute_id`` file in the :oslo.config:option:`state_path` location.
+
+.. note::
+
+ It is imperative that you allow the above migration to run and complete on
+ compute nodes that are being upgraded. Skipping this step by
+ pre-provisioning a ``compute_id`` file before the upgrade will **not** work
+ and will be equivalent to changing the compute node UUID after it has
+ already been created once.
diff --git a/doc/source/admin/cpu-topologies.rst b/doc/source/admin/cpu-topologies.rst
index 9770639c3a..082c88f655 100644
--- a/doc/source/admin/cpu-topologies.rst
+++ b/doc/source/admin/cpu-topologies.rst
@@ -730,6 +730,97 @@ CPU policy, meanwhile, will consume ``VCPU`` inventory.
.. _configure-hyperv-numa:
+Configuring CPU power management for dedicated cores
+----------------------------------------------------
+
+.. versionchanged:: 27.0.0
+
+ This feature was only introduced by the 2023.1 Antelope release
+
+.. important::
+
+ The functionality described below is currently only supported by the
+ libvirt/KVM driver.
+
+For power saving reasons, operators can decide to turn down the power usage of
+CPU cores whether they are in use or not. For obvious reasons, Nova only allows
+to change the power consumption of a dedicated CPU core and not a shared one.
+Accordingly, usage of this feature relies on the reading of
+:oslo.config:option:`compute.cpu_dedicated_set` config option to know which CPU
+cores to handle.
+The main action to enable the power management of dedicated cores is to set
+:oslo.config:option:`libvirt.cpu_power_management` config option to ``True``.
+
+By default, if this option is enabled, Nova will lookup the dedicated cores and
+power them down at the compute service startup. Then, once an instance starts
+by being attached to a dedicated core, this below core will be powered up right
+before the libvirt guest starts. On the other way, once an instance is stopped,
+migrated or deleted, then the corresponding dedicated core will be powered down.
+
+There are two distinct strategies for powering up or down :
+
+- the default is to offline the CPU core and online it when needed.
+- an alternative strategy is to use two distinct CPU governors for the up state
+ and the down state.
+
+The strategy can be chosen using
+:oslo.config:option:`libvirt.cpu_power_management_strategy` config option.
+``cpu_state`` supports the first online/offline strategy, while ``governor``
+sets the alternative strategy.
+We default to turning off the cores as it provides you the best power savings
+while there could be other tools outside Nova to manage the governor, like
+tuned. That being said, we also provide a way to automatically change the
+governors on the fly, as explained below.
+
+If the strategy is set to ``governor``, a couple of config options are provided
+to define which exact CPU govenor to use for each of the up and down states :
+
+- :oslo.config:option:`libvirt.cpu_power_governor_low` will define the governor
+ to use for the powerdown state (defaults to ``powersave``)
+- :oslo.config:option:`libvirt.cpu_power_governor_high` will define the
+ governor to use for the powerup state (defaults to ``performance``)
+
+.. important::
+ This is the responsibility of the operator to ensure that the govenors
+ defined by the configuration options are currently supported by the OS
+ underlying kernel that runs the compute service.
+
+ As a side note, we recommend the ``schedutil`` governor as an alternative for
+ the high-power state (if the kernel supports it) as the CPU frequency is
+ dynamically set based on CPU task states. Other governors may be worth to
+ be tested, including ``conservative`` and ``ondemand`` which are quite a bit
+ more power consuming than ``schedutil`` but more efficient than
+ ``performance``. See `Linux kernel docs`_ for further explanations.
+
+.. _`Linux kernel docs`: https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+
+As an example, a ``nova.conf`` part of configuration would look like::
+
+ [compute]
+ cpu_dedicated_set=2-17
+
+ [libvirt]
+ cpu_power_management=True
+ cpu_power_management_strategy=cpu_state
+
+.. warning::
+
+ The CPU core #0 has a special meaning in most of the recent Linux kernels.
+ This is always highly discouraged to use it for CPU pinning but please
+ refrain to have it power managed or you could have surprises if Nova turns
+ it off !
+
+One last important note : you can decide to change the CPU management strategy
+during the compute lifecycle, or you can currently already manage the CPU
+states. For ensuring that Nova can correctly manage the CPU performances, we
+added a couple of checks at startup that refuse to start nova-compute service
+if those arbitrary rules aren't enforced :
+
+- if the operator opts for ``cpu_state`` strategy, then all dedicated CPU
+ governors *MUST* be identical.
+- if they decide using ``governor``, then all dedicated CPU cores *MUST* be
+ online.
+
Configuring Hyper-V compute nodes for instance NUMA policies
------------------------------------------------------------
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index 93b4e6a554..8cb5bf7156 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -206,6 +206,7 @@ instance for these kind of workloads.
secure-boot
sev
managing-resource-providers
+ compute-node-identification
resource-limits
cpu-models
libvirt-misc
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index 783ab5e27c..32c67c2b0a 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -102,7 +102,7 @@ Manual selection of the destination host
.. code-block:: console
- $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live HostC
+ $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live-migration --host HostC
#. Confirm that the instance has been migrated successfully:
diff --git a/doc/source/admin/remote-console-access.rst b/doc/source/admin/remote-console-access.rst
index 015c6522d0..9b28646d27 100644
--- a/doc/source/admin/remote-console-access.rst
+++ b/doc/source/admin/remote-console-access.rst
@@ -366,6 +366,16 @@ Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible
by the outside world. For example, this may be the management interface IP
address of the controller or the VIP.
+Optionally, the :program:`nova-compute` service supports the following
+additional options to configure compression settings (algorithms and modes)
+for SPICE consoles.
+
+- :oslo.config:option:`spice.image_compression`
+- :oslo.config:option:`spice.jpeg_compression`
+- :oslo.config:option:`spice.zlib_compression`
+- :oslo.config:option:`spice.playback_compression`
+- :oslo.config:option:`spice.streaming_mode`
+
Serial
------
diff --git a/doc/source/admin/upgrades.rst b/doc/source/admin/upgrades.rst
index 00a714970b..61fd0cf258 100644
--- a/doc/source/admin/upgrades.rst
+++ b/doc/source/admin/upgrades.rst
@@ -41,21 +41,27 @@ Rolling upgrade process
To reduce downtime, the compute services can be upgraded in a rolling fashion.
It means upgrading a few services at a time. This results in a condition where
both old (N) and new (N+1) nova-compute services co-exist for a certain time
-period. Note that, there is no upgrade of the hypervisor here, this is just
+period (or even N with N+2 upgraded nova-compute services, see below).
+Note that, there is no upgrade of the hypervisor here, this is just
upgrading the nova services. If reduced downtime is not a concern (or lower
complexity is desired), all services may be taken down and restarted at the
same time.
.. important::
- Nova does not currently support the coexistence of N and N+2 or greater
- :program:`nova-compute` or :program:`nova-conductor` services in the same
- deployment. The `nova-conductor`` service will fail to start when a
- ``nova-compute`` service that is older than the previous release (N-2 or
- greater) is detected. Similarly, in a :doc:`deployment with multiple cells
+ As of OpenStack 2023.1 (Antelope), Nova supports the coexistence of N and
+ N-2 (Yoga) :program:`nova-compute` or :program:`nova-conductor` services in
+ the same deployment. The `nova-conductor`` service will fail to start when
+ a ``nova-compute`` service that is older than the support envelope is
+ detected. This varies by release and the support envelope will be explained
+ in the release notes. Similarly, in a :doc:`deployment with multiple cells
</admin/cells>`, neither the super conductor service nor any per-cell
conductor service will start if any other conductor service in the
- deployment is older than the previous release.
+ deployment is older than the N-2 release.
+
+ Releases older than 2023.1 will only support rolling upgrades for a single
+ release difference between :program:`nova-compute` and
+ :program:`nova-conductor` services.
#. Before maintenance window:
diff --git a/doc/source/cli/nova-compute.rst b/doc/source/cli/nova-compute.rst
index f190949efa..1346dab92e 100644
--- a/doc/source/cli/nova-compute.rst
+++ b/doc/source/cli/nova-compute.rst
@@ -41,6 +41,8 @@ Files
* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
+* ``/etc/nova/compute_id``
+* ``/var/lib/nova/compute_id``
See Also
========
diff --git a/doc/source/contributor/ptl-guide.rst b/doc/source/contributor/ptl-guide.rst
index 813f1bc83e..b530b100bc 100644
--- a/doc/source/contributor/ptl-guide.rst
+++ b/doc/source/contributor/ptl-guide.rst
@@ -29,7 +29,11 @@ New PTL
* Get acquainted with the release schedule
- * Example: https://wiki.openstack.org/wiki/Nova/Stein_Release_Schedule
+ * Example: https://releases.openstack.org/antelope/schedule.html
+
+ * Also, note that we usually create a specific wiki page for each cycle like
+ https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule but it's
+ preferred to use the main release schedule above.
Project Team Gathering
----------------------
@@ -37,30 +41,34 @@ Project Team Gathering
* Create PTG planning etherpad, retrospective etherpad and alert about it in
nova meeting and dev mailing list
- * Example: https://etherpad.openstack.org/p/nova-ptg-stein
+ * Example: https://etherpad.opendev.org/p/nova-antelope-ptg
* Run sessions at the PTG
-* Have a priorities discussion at the PTG
+* Do a retro of the previous cycle
- * Example: https://etherpad.openstack.org/p/nova-ptg-stein-priorities
+* Make agreement on the agenda for this release, including but not exhaustively:
-* Sign up for group photo at the PTG (if applicable)
+ * Number of review days, for either specs or implementation
+ * Define the Spec approval and Feature freeze dates
+ * Modify the release schedule if needed by adding the new dates.
+ As an example : https://review.opendev.org/c/openstack/releases/+/877094
+
+* Discuss the implications of the `SLURP or non-SLURP`__ current release
-* Open review runways for the cycle
+.. __: https://governance.openstack.org/tc/resolutions/20220210-release-cadence-adjustment.html
+
+* Sign up for group photo at the PTG (if applicable)
- * Example: https://etherpad.openstack.org/p/nova-runways-stein
After PTG
---------
* Send PTG session summaries to the dev mailing list
-* Make sure the cycle priorities spec gets reviewed and merged
-
- * Example: https://specs.openstack.org/openstack/nova-specs/priorities/stein-priorities.html
+* Add `RFE bugs`__ if you have action items that are simple to do but without a owner yet.
-* Run the count-blueprints script daily to gather data for the cycle burndown chart
+.. __: https://bugs.launchpad.net/nova/+bugs?field.tag=rfe
A few weeks before milestone 1
------------------------------
@@ -70,12 +78,13 @@ A few weeks before milestone 1
* Periodically check the series goals others have proposed in the “Set series
goals” link:
- * Example: https://blueprints.launchpad.net/nova/stein/+setgoals
+ * Example: https://blueprints.launchpad.net/nova/antelope/+setgoals
Milestone 1
-----------
-* Do milestone release of nova and python-novaclient (in launchpad only)
+* Do milestone release of nova and python-novaclient (in launchpad only, can be
+ optional)
* This is launchpad bookkeeping only. With the latest release team changes,
projects no longer do milestone releases. See: https://releases.openstack.org/reference/release_models.html#cycle-with-milestones-legacy
@@ -87,6 +96,8 @@ Milestone 1
the minor version to leave room for future stable branch releases
* os-vif
+ * placement
+ * os-traits / os-resource-classes
* Release stable branches of nova
@@ -117,28 +128,26 @@ Summit
* Prepare the on-boarding session materials. Enlist help of others
+* Prepare the operator meet-and-greet session. Enlist help of others
+
A few weeks before milestone 2
------------------------------
* Plan a spec review day (optional)
-* Periodically check the series goals others have proposed in the “Set series
- goals” link:
-
- * Example: https://blueprints.launchpad.net/nova/stein/+setgoals
-
Milestone 2
-----------
-* Spec freeze
+* Spec freeze (if agreed)
-* Release nova and python-novaclient
+* Release nova and python-novaclient (if new features were merged)
* Release other libraries as needed
* Stable branch releases of nova
* For nova, set the launchpad milestone release as “released” with the date
+ (can be optional)
Shortly after spec freeze
-------------------------
@@ -146,7 +155,7 @@ Shortly after spec freeze
* Create a blueprint status etherpad to help track, especially non-priority
blueprint work, to help things get done by Feature Freeze (FF). Example:
- * https://etherpad.openstack.org/p/nova-stein-blueprint-status
+ * https://etherpad.opendev.org/p/nova-antelope-blueprint-status
* Create or review a patch to add the next release’s specs directory so people
can propose specs for next release after spec freeze for current release
@@ -155,13 +164,15 @@ Non-client library release freeze
---------------------------------
* Final release for os-vif
+* Final release for os-traits
+* Final release for os-resource-classes
Milestone 3
-----------
* Feature freeze day
-* Client library freeze, release python-novaclient
+* Client library freeze, release python-novaclient and osc-placement
* Close out all blueprints, including “catch all” blueprints like mox,
versioned notifications
@@ -170,7 +181,7 @@ Milestone 3
* For nova, set the launchpad milestone release as “released” with the date
-* Write the `cycle highlights
+* Start writing the `cycle highlights
<https://docs.openstack.org/project-team-guide/release-management.html#cycle-highlights>`__
Week following milestone 3
@@ -199,7 +210,7 @@ A few weeks before RC
* Make a RC1 todos etherpad and tag bugs as ``<release>-rc-potential`` and keep
track of them, example:
- * https://etherpad.openstack.org/p/nova-stein-rc-potential
+ * https://etherpad.opendev.org/p/nova-antelope-rc-potential
* Go through the bug list and identify any rc-potential bugs and tag them
@@ -242,7 +253,7 @@ RC
* Example: https://review.opendev.org/644412
-* Write the cycle-highlights in marketing-friendly sentences and propose to the
+* Push the cycle-highlights in marketing-friendly sentences and propose to the
openstack/releases repo. Usually based on reno prelude but made more readable
and friendly
@@ -257,11 +268,13 @@ Immediately after RC
* https://wiki.openstack.org/wiki/Nova/ReleaseChecklist
- * Drop old RPC compat code (if there was a RPC major version bump)
+ * Drop old RPC compat code (if there was a RPC major version bump and if
+ agreed on at the PTG)
* Example: https://review.opendev.org/543580
- * Bump the oldest supported compute service version
+ * Bump the oldest supported compute service version (if master branch is now
+ on a non-SLURP version)
* https://review.opendev.org/#/c/738482/
@@ -275,7 +288,9 @@ Immediately after RC
* Set the previous to last series status to “supported”
-* Repeat launchpad steps ^ for python-novaclient
+* Repeat launchpad steps ^ for python-novaclient (optional)
+
+* Repeat launchpad steps ^ for placement
* Register milestones in launchpad for the new cycle based on the new cycle
release schedule
@@ -293,7 +308,7 @@ Immediately after RC
* Create new release wiki:
- * Example: https://wiki.openstack.org/wiki/Nova/Train_Release_Schedule
+ * Example: https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule
* Update the contributor guide for the new cycle
diff --git a/mypy-files.txt b/mypy-files.txt
index 5a3b9ab339..391ed58d87 100644
--- a/mypy-files.txt
+++ b/mypy-files.txt
@@ -1,6 +1,7 @@
nova/compute/manager.py
nova/compute/pci_placement_translator.py
nova/crypto.py
+nova/filesystem.py
nova/limit/local.py
nova/limit/placement.py
nova/network/neutron.py
@@ -13,6 +14,9 @@ nova/virt/driver.py
nova/virt/hardware.py
nova/virt/libvirt/machine_type_utils.py
nova/virt/libvirt/__init__.py
+nova/virt/libvirt/cpu/__init__.py
+nova/virt/libvirt/cpu/api.py
+nova/virt/libvirt/cpu/core.py
nova/virt/libvirt/driver.py
nova/virt/libvirt/event.py
nova/virt/libvirt/guest.py
diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst
index b34510be5c..c7a2777d3a 100644
--- a/nova/api/openstack/compute/rest_api_version_history.rst
+++ b/nova/api/openstack/compute/rest_api_version_history.rst
@@ -1231,17 +1231,19 @@ image similar to the result of rebuilding an ephemeral disk.
2.94
----------------------
+----
The ``hostname`` parameter to the ``POST /servers`` (create server), ``PUT
/servers/{id}`` (update server) and ``POST /servers/{server_id}/action
(rebuild)`` (rebuild server) APIs is now allowed to be a Fully Qualified Domain
Name (FQDN).
+.. _microversion 2.95:
-2.95
----------------------
+2.95 (Maximum in 2023.1 Antelope)
+---------------------------------
Any evacuated instances will be now stopped at destination. This
-requires minimun compute version 27.0.0 (antelope 2023.1). Operators
-can still use previous microversion for older behavior.
+requires minimun nova release 27.0.0, OpenStack release 2023.1
+Antelope. Operators can still use previous microversion for older
+behavior.
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 0bf6fdfec5..5ea71827fc 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1473,7 +1473,7 @@ class ComputeManager(manager.Manager):
node.
"""
try:
- node_uuids = self.driver.get_available_node_uuids()
+ node_ids = self.driver.get_nodenames_by_uuid()
except exception.VirtDriverNotReady:
LOG.warning(
"Virt driver is not ready. If this is the first time this "
@@ -1481,7 +1481,8 @@ class ComputeManager(manager.Manager):
"this warning.")
return {}
- nodes = objects.ComputeNodeList.get_all_by_uuids(context, node_uuids)
+ nodes = objects.ComputeNodeList.get_all_by_uuids(context,
+ list(node_ids.keys()))
if not nodes:
# NOTE(danms): This should only happen if the compute_id is
# pre-provisioned on a host that has never started.
@@ -1489,9 +1490,18 @@ class ComputeManager(manager.Manager):
'database. If this is the first time this service is '
'starting on this host, then you can ignore this '
'warning.',
- node_uuids, self.host)
+ list(node_ids.keys()), self.host)
return {}
+ for node in nodes:
+ if node.hypervisor_hostname != node_ids.get(node.uuid):
+ raise exception.InvalidConfiguration(
+ ('My compute node %s has hypervisor_hostname %s '
+ 'but virt driver reports it should be %s. Possible '
+ 'rename detected, refusing to start!') % (
+ node.uuid, node.hypervisor_hostname,
+ node_ids.get(node.uuid)))
+
return {n.uuid: n for n in nodes}
def _ensure_existing_node_identity(self, service_ref):
@@ -1499,15 +1509,21 @@ class ComputeManager(manager.Manager):
to write our node identity uuid (if not already done) based on
nodes assigned to us in the database.
"""
- if service_ref.version >= service_obj.NODE_IDENTITY_VERSION:
- # Already new enough, nothing to do here
- return
-
if 'ironic' in CONF.compute_driver.lower():
# We do not persist a single local node identity for
# ironic
return
+ if service_ref.version >= service_obj.NODE_IDENTITY_VERSION:
+ # Already new enough, nothing to do here, but make sure that we
+ # have a UUID file already, as this is not our first time starting.
+ if nova.virt.node.read_local_node_uuid() is None:
+ raise exception.InvalidConfiguration(
+ ('No local node identity found, but this is not our '
+ 'first startup on this host. Refusing to start after '
+ 'potentially having lost that state!'))
+ return
+
if nova.virt.node.read_local_node_uuid():
# We already have a local node identity, no migration needed
return
@@ -1594,12 +1610,6 @@ class ComputeManager(manager.Manager):
# NOTE(gibi): At this point the compute_nodes of the resource tracker
# has not been populated yet so we cannot rely on the resource tracker
# here.
- # NOTE(gibi): If ironic and vcenter virt driver slow start time
- # becomes problematic here then we should consider adding a config
- # option or a driver flag to tell us if we should thread
- # _destroy_evacuated_instances and
- # _error_out_instances_whose_build_was_interrupted out in the
- # background on startup
context = nova.context.get_admin_context()
nodes_by_uuid = self._get_nodes(context)
@@ -1617,6 +1627,12 @@ class ComputeManager(manager.Manager):
self._validate_pinning_configuration(instances)
self._validate_vtpm_configuration(instances)
+ # NOTE(gibi): If ironic and vcenter virt driver slow start time
+ # becomes problematic here then we should consider adding a config
+ # option or a driver flag to tell us if we should thread
+ # _destroy_evacuated_instances and
+ # _error_out_instances_whose_build_was_interrupted out in the
+ # background on startup
try:
# checking that instance was not already evacuated to other host
evacuated_instances = self._destroy_evacuated_instances(
@@ -6858,6 +6874,9 @@ class ComputeManager(manager.Manager):
current_power_state = self._get_power_state(instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
+ ports_id = [vif['id'] for vif in network_info]
+ self.network_api.unbind_ports(context, ports_id, detach=False)
+
block_device_info = self._get_instance_block_device_info(context,
instance,
bdms=bdms)
@@ -8897,7 +8916,8 @@ class ComputeManager(manager.Manager):
# in order to be able to track and abort it in the future.
self._waiting_live_migrations[instance.uuid] = (None, None)
try:
- future = self._live_migration_executor.submit(
+ future = nova.utils.pass_context(
+ self._live_migration_executor.submit,
self._do_live_migration, context, dest, instance,
block_migration, migration, migrate_data)
self._waiting_live_migrations[instance.uuid] = (migration, future)
@@ -10181,7 +10201,9 @@ class ComputeManager(manager.Manager):
else:
LOG.debug('Triggering sync for uuid %s', uuid)
self._syncs_in_progress[uuid] = True
- self._sync_power_pool.spawn_n(_sync, db_instance)
+ nova.utils.pass_context(self._sync_power_pool.spawn_n,
+ _sync,
+ db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
@@ -10464,6 +10486,14 @@ class ComputeManager(manager.Manager):
LOG.exception(
"Error updating PCI resources for node %(node)s.",
{'node': nodename})
+ except exception.InvalidConfiguration as e:
+ if startup:
+ # If this happens during startup, we need to let it raise to
+ # abort our service startup.
+ raise
+ else:
+ LOG.error("Error updating resources for node %s: %s",
+ nodename, e)
except Exception:
LOG.exception("Error updating resources for node %(node)s.",
{'node': nodename})
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 70c56fd2e3..3f911f3708 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -728,7 +728,13 @@ class ResourceTracker(object):
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
- cn.create()
+ try:
+ cn.create()
+ except exception.DuplicateRecord:
+ raise exception.InvalidConfiguration(
+ 'Duplicate compute node record found for host %s node %s' % (
+ cn.host, cn.hypervisor_hostname))
+
# Only map the ComputeNode into compute_nodes if create() was OK
# because if create() fails, on the next run through here nodename
# would be in compute_nodes and we won't try to create again (because
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 4a97a90807..efc06300db 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -425,7 +425,7 @@ class ComputeAPI(object):
'xena': '6.0',
'yoga': '6.0',
'zed': '6.1',
- 'antilope': '6.2',
+ 'antelope': '6.2',
}
@property
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 8177519331..4b34b8339c 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -2096,8 +2096,8 @@ class ComputeTaskManager:
skipped_host(target_ctxt, host, image_ids)
continue
- fetch_pool.spawn_n(wrap_cache_images, target_ctxt, host,
- image_ids)
+ utils.pass_context(fetch_pool.spawn_n, wrap_cache_images,
+ target_ctxt, host, image_ids)
# Wait until all those things finish
fetch_pool.waitall()
diff --git a/nova/conf/libvirt.py b/nova/conf/libvirt.py
index 16a3f63090..204fe5c4b8 100644
--- a/nova/conf/libvirt.py
+++ b/nova/conf/libvirt.py
@@ -1478,6 +1478,23 @@ Related options:
"""),
]
+libvirt_cpu_mgmt_opts = [
+ cfg.BoolOpt('cpu_power_management',
+ default=False,
+ help='Use libvirt to manage CPU cores performance.'),
+ cfg.StrOpt('cpu_power_management_strategy',
+ choices=['cpu_state', 'governor'],
+ default='cpu_state',
+ help='Tuning strategy to reduce CPU power consumption when '
+ 'unused'),
+ cfg.StrOpt('cpu_power_governor_low',
+ default='powersave',
+ help='Governor to use in order '
+ 'to reduce CPU power consumption'),
+ cfg.StrOpt('cpu_power_governor_high',
+ default='performance',
+ help='Governor to use in order to have best CPU performance'),
+]
ALL_OPTS = list(itertools.chain(
libvirt_general_opts,
@@ -1499,6 +1516,7 @@ ALL_OPTS = list(itertools.chain(
libvirt_volume_nvmeof_opts,
libvirt_pmem_opts,
libvirt_vtpm_opts,
+ libvirt_cpu_mgmt_opts,
))
diff --git a/nova/conf/spice.py b/nova/conf/spice.py
index 59ed4e80a0..e5854946f1 100644
--- a/nova/conf/spice.py
+++ b/nova/conf/spice.py
@@ -85,6 +85,59 @@ Agent. With the Spice agent installed the following features are enabled:
needing to click inside the console or press keys to release it. The
performance of mouse movement is also improved.
"""),
+ cfg.StrOpt('image_compression',
+ advanced=True,
+ choices=[
+ ('auto_glz', 'enable image compression mode to choose between glz '
+ 'and quic algorithm, based on image properties'),
+ ('auto_lz', 'enable image compression mode to choose between lz '
+ 'and quic algorithm, based on image properties'),
+ ('quic', 'enable image compression based on the SFALIC algorithm'),
+ ('glz', 'enable image compression using lz with history based '
+ 'global dictionary'),
+ ('lz', 'enable image compression with the Lempel-Ziv algorithm'),
+ ('off', 'disable image compression')
+ ],
+ help="""
+Configure the SPICE image compression (lossless).
+"""),
+ cfg.StrOpt('jpeg_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable JPEG image compression automatically'),
+ ('never', 'disable JPEG image compression'),
+ ('always', 'enable JPEG image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossy for slow links).
+"""),
+ cfg.StrOpt('zlib_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable zlib image compression automatically'),
+ ('never', 'disable zlib image compression'),
+ ('always', 'enable zlib image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossless for slow links).
+"""),
+ cfg.BoolOpt('playback_compression',
+ advanced=True,
+ help="""
+Enable the SPICE audio stream compression (using celt).
+"""),
+ cfg.StrOpt('streaming_mode',
+ advanced=True,
+ choices=[
+ ('filter', 'SPICE server adds additional filters to decide if '
+ 'video streaming should be activated'),
+ ('all', 'any fast-refreshing window can be encoded into a video '
+ 'stream'),
+ ('off', 'no video detection and (lossy) compression is performed')
+ ],
+ help="""
+Configure the SPICE video stream detection and (lossy) compression.
+"""),
cfg.URIOpt('html5proxy_base_url',
default='http://127.0.0.1:6082/spice_auto.html',
help="""
diff --git a/nova/exception.py b/nova/exception.py
index 20c112b628..0c0ffa85a1 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -2512,8 +2512,12 @@ class InvalidNodeConfiguration(NovaException):
msg_fmt = _('Invalid node identity configuration: %(reason)s')
+class DuplicateRecord(NovaException):
+ msg_fmt = _('Unable to create duplicate record for %(target)s')
+
+
class NotSupportedComputeForEvacuateV295(NotSupported):
- msg_fmt = _("Starting to microversion 2.95, evacuate API will stop "
+ msg_fmt = _("Starting with microversion 2.95, evacuate API will stop "
"instance on destination. To evacuate before upgrades are "
"complete please use an older microversion. Required version "
"for compute %(expected), current version %(currently)s")
diff --git a/nova/filesystem.py b/nova/filesystem.py
new file mode 100644
index 0000000000..5394d2d835
--- /dev/null
+++ b/nova/filesystem.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Functions to address filesystem calls, particularly sysfs."""
+
+import os
+
+from oslo_log import log as logging
+
+from nova import exception
+
+LOG = logging.getLogger(__name__)
+
+
+SYS = '/sys'
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+def read_sys(path: str) -> str:
+ """Reads the content of a file in the sys filesystem.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't read that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='r') as data:
+ return data.read()
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+# In order to correctly use it, you need to decorate the caller with a specific
+# privsep entrypoint.
+def write_sys(path: str, data: str) -> None:
+ """Writes the content of a file in the sys filesystem with data.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :param data: the data to write.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't write that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='w') as fd:
+ fd.write(data)
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index 27e7d06455..6c9f19f010 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -612,10 +612,22 @@ class API:
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
+ def unbind_ports(self, context, ports, detach=True):
+ """Unbind and detach the given ports by clearing their
+ device_owner and dns_name.
+ The device_id will also be cleaned if detach=True.
+
+ :param context: The request context.
+ :param ports: list of port IDs.
+ """
+ neutron = get_client(context)
+ self._unbind_ports(context, ports, neutron, detach=detach)
+
def _unbind_ports(self, context, ports,
- neutron, port_client=None):
- """Unbind the given ports by clearing their device_id,
+ neutron, port_client=None, detach=True):
+ """Unbind and detach the given ports by clearing their
device_owner and dns_name.
+ The device_id will also be cleaned if detach=True.
:param context: The request context.
:param ports: list of port IDs.
@@ -638,11 +650,12 @@ class API:
port_req_body: ty.Dict[str, ty.Any] = {
'port': {
- 'device_id': '',
- 'device_owner': '',
constants.BINDING_HOST_ID: None,
}
}
+ if detach:
+ port_req_body['port']['device_id'] = ''
+ port_req_body['port']['device_owner'] = ''
try:
port = self._show_port(
context, port_id, neutron_client=neutron,
diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py
index 528cfc0776..dfc1b2ae28 100644
--- a/nova/objects/compute_node.py
+++ b/nova/objects/compute_node.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
@@ -339,7 +340,12 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
- db_compute = db.compute_node_create(self._context, updates)
+ try:
+ db_compute = db.compute_node_create(self._context, updates)
+ except db_exc.DBDuplicateEntry:
+ target = 'compute node %s:%s' % (updates['hypervisor_hostname'],
+ updates['uuid'])
+ raise exception.DuplicateRecord(target=target)
self._from_db_object(self._context, self, db_compute)
@base.remotable
diff --git a/nova/objects/service.py b/nova/objects/service.py
index 0ed443ef17..1a4629cc84 100644
--- a/nova/objects/service.py
+++ b/nova/objects/service.py
@@ -237,15 +237,30 @@ SERVICE_VERSION_HISTORY = (
# local node identity for single-node systems.
NODE_IDENTITY_VERSION = 65
-# This is used to raise an error at service startup if older than N-1 computes
-# are detected. Update this at the beginning of every release cycle to point to
-# the smallest service version that was added in N-1.
-OLDEST_SUPPORTED_SERVICE_VERSION = 'Yoga'
+# This is used to raise an error at service startup if older than supported
+# computes are detected.
+# NOTE(sbauza) : Please modify it this way :
+# * At the beginning of a non-SLURP release (eg. 2023.2 Bobcat) (or just after
+# the previous SLURP release RC1, like 2023.1 Antelope), please bump
+# OLDEST_SUPPORTED_SERVICE_VERSION to the previous SLURP release (in that
+# example, Antelope)
+# * At the beginning of a SLURP release (eg. 2024.1 C) (or just after the
+# previous non-SLURP release RC1, like 2023.2 Bobcat), please keep the
+# OLDEST_SUPPORTED_SERVICE_VERSION value using the previous SLURP release
+# (in that example, Antelope)
+# * At the end of any release (SLURP or non-SLURP), please modify
+# SERVICE_VERSION_ALIASES to add a key/value with key being the release name
+# and value be the latest service version that the release supports (for
+# example, before Bobcat RC1, please add 'Bobcat': XX where X is the latest
+# servion version that was added)
+OLDEST_SUPPORTED_SERVICE_VERSION = 'Antelope'
SERVICE_VERSION_ALIASES = {
'Victoria': 52,
'Wallaby': 54,
'Xena': 57,
'Yoga': 61,
+ 'Zed': 64,
+ 'Antelope': 66,
}
diff --git a/nova/pci/request.py b/nova/pci/request.py
index 27ada6c045..8ae2385549 100644
--- a/nova/pci/request.py
+++ b/nova/pci/request.py
@@ -168,7 +168,7 @@ def _get_alias_from_config() -> Alias:
def _translate_alias_to_requests(
- alias_spec: str, affinity_policy: str = None,
+ alias_spec: str, affinity_policy: ty.Optional[str] = None,
) -> ty.List['objects.InstancePCIRequest']:
"""Generate complete pci requests from pci aliases in extra_spec."""
pci_aliases = _get_alias_from_config()
@@ -255,7 +255,7 @@ def get_instance_pci_request_from_vif(
def get_pci_requests_from_flavor(
- flavor: 'objects.Flavor', affinity_policy: str = None,
+ flavor: 'objects.Flavor', affinity_policy: ty.Optional[str] = None,
) -> 'objects.InstancePCIRequests':
"""Validate and return PCI requests.
diff --git a/nova/pci/stats.py b/nova/pci/stats.py
index 5c5f7c669c..c6e4844b34 100644
--- a/nova/pci/stats.py
+++ b/nova/pci/stats.py
@@ -82,7 +82,7 @@ class PciDeviceStats(object):
self,
numa_topology: 'objects.NUMATopology',
stats: 'objects.PCIDevicePoolList' = None,
- dev_filter: whitelist.Whitelist = None,
+ dev_filter: ty.Optional[whitelist.Whitelist] = None,
) -> None:
self.numa_topology = numa_topology
self.pools = (
diff --git a/nova/pci/whitelist.py b/nova/pci/whitelist.py
index 8862a0ef4f..152cc29ca6 100644
--- a/nova/pci/whitelist.py
+++ b/nova/pci/whitelist.py
@@ -33,7 +33,7 @@ class Whitelist(object):
assignable.
"""
- def __init__(self, whitelist_spec: str = None) -> None:
+ def __init__(self, whitelist_spec: ty.Optional[str] = None) -> None:
"""White list constructor
For example, the following json string specifies that devices whose
diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py
index 1242752be1..7c14f3d7ef 100644
--- a/nova/scheduler/client/report.py
+++ b/nova/scheduler/client/report.py
@@ -1047,7 +1047,7 @@ class SchedulerReportClient(object):
context: nova_context.RequestContext,
rp_uuid: str,
traits: ty.Iterable[str],
- generation: int = None
+ generation: ty.Optional[int] = None
):
"""Replace a provider's traits with those specified.
diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py
index 74e24b7bc3..785a13279e 100644
--- a/nova/scheduler/filters/__init__.py
+++ b/nova/scheduler/filters/__init__.py
@@ -16,8 +16,12 @@
"""
Scheduler host filters
"""
+from oslo_log import log as logging
+
from nova import filters
+LOG = logging.getLogger(__name__)
+
class BaseHostFilter(filters.BaseFilter):
"""Base class for host filters."""
@@ -53,6 +57,43 @@ class BaseHostFilter(filters.BaseFilter):
raise NotImplementedError()
+class CandidateFilterMixin:
+ """Mixing that helps to implement a Filter that needs to filter host by
+ Placement allocation candidates.
+ """
+
+ def filter_candidates(self, host_state, filter_func):
+ """Checks still viable allocation candidates by the filter_func and
+ keep only those that are passing it.
+
+ :param host_state: HostState object holding the list of still viable
+ allocation candidates
+ :param filter_func: A callable that takes an allocation candidate and
+ returns a True like object if the candidate passed the filter or a
+ False like object if it doesn't.
+ """
+ good_candidates = []
+ for candidate in host_state.allocation_candidates:
+ LOG.debug(
+ f'{self.__class__.__name__} tries allocation candidate: '
+ f'{candidate}',
+ )
+ if filter_func(candidate):
+ LOG.debug(
+ f'{self.__class__.__name__} accepted allocation '
+ f'candidate: {candidate}',
+ )
+ good_candidates.append(candidate)
+ else:
+ LOG.debug(
+ f'{self.__class__.__name__} rejected allocation '
+ f'candidate: {candidate}',
+ )
+
+ host_state.allocation_candidates = good_candidates
+ return good_candidates
+
+
class HostFilterHandler(filters.BaseFilterHandler):
def __init__(self):
super(HostFilterHandler, self).__init__(BaseHostFilter)
diff --git a/nova/scheduler/filters/numa_topology_filter.py b/nova/scheduler/filters/numa_topology_filter.py
index 7ec9ca5648..ae50db90e5 100644
--- a/nova/scheduler/filters/numa_topology_filter.py
+++ b/nova/scheduler/filters/numa_topology_filter.py
@@ -20,7 +20,10 @@ from nova.virt import hardware
LOG = logging.getLogger(__name__)
-class NUMATopologyFilter(filters.BaseHostFilter):
+class NUMATopologyFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Filter on requested NUMA topology."""
# NOTE(sean-k-mooney): In change I0322d872bdff68936033a6f5a54e8296a6fb343
@@ -97,34 +100,19 @@ class NUMATopologyFilter(filters.BaseHostFilter):
if network_metadata:
limits.network_metadata = network_metadata
- good_candidates = []
- for candidate in host_state.allocation_candidates:
- LOG.debug(
- 'NUMATopologyFilter tries allocation candidate: %s, %s',
- candidate, requested_topology
- )
- instance_topology = (hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
limits=limits,
pci_requests=pci_requests,
pci_stats=host_state.pci_stats,
- provider_mapping=candidate['mappings'],
- ))
- if instance_topology:
- LOG.debug(
- 'NUMATopologyFilter accepted allocation candidate: %s',
- candidate
- )
- good_candidates.append(candidate)
- else:
- LOG.debug(
- 'NUMATopologyFilter rejected allocation candidate: %s',
- candidate
- )
-
- host_state.allocation_candidates = good_candidates
-
- if not host_state.allocation_candidates:
+ provider_mapping=candidate["mappings"],
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host)s, %(node)s fails NUMA topology "
"requirements. The instance does not fit on this "
"host.", {'host': host_state.host,
diff --git a/nova/scheduler/filters/pci_passthrough_filter.py b/nova/scheduler/filters/pci_passthrough_filter.py
index 36f0b5901c..992879072a 100644
--- a/nova/scheduler/filters/pci_passthrough_filter.py
+++ b/nova/scheduler/filters/pci_passthrough_filter.py
@@ -20,7 +20,10 @@ from nova.scheduler import filters
LOG = logging.getLogger(__name__)
-class PciPassthroughFilter(filters.BaseHostFilter):
+class PciPassthroughFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Pci Passthrough Filter based on PCI request
Filter that schedules instances on a host if the host has devices
@@ -54,28 +57,12 @@ class PciPassthroughFilter(filters.BaseHostFilter):
{'host_state': host_state, 'requests': pci_requests})
return False
- good_candidates = []
- for candidate in host_state.allocation_candidates:
- LOG.debug(
- 'PciPassthroughFilter tries allocation candidate: %s',
- candidate
- )
- if host_state.pci_stats.support_requests(
- pci_requests.requests,
- provider_mapping=candidate['mappings']
- ):
- LOG.debug(
- 'PciPassthroughFilter accepted allocation candidate: %s',
- candidate
- )
- good_candidates.append(candidate)
- else:
- LOG.debug(
- 'PciPassthroughFilter rejected allocation candidate: %s',
- candidate
- )
-
- host_state.allocation_candidates = good_candidates
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: host_state.pci_stats.support_requests(
+ pci_requests.requests, provider_mapping=candidate["mappings"]
+ ),
+ )
if not good_candidates:
LOG.debug("%(host_state)s doesn't have the required PCI devices"
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 11581c4f2d..620519d403 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -23,6 +23,7 @@ import collections
import copy
import random
+from keystoneauth1 import exceptions as ks_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
@@ -67,10 +68,42 @@ class SchedulerManager(manager.Manager):
self.host_manager = host_manager.HostManager()
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('scheduler')
- self.placement_client = report.report_client_singleton()
+ self._placement_client = None
+
+ try:
+ # Test our placement client during initialization
+ self.placement_client
+ except (ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure) as e:
+ # Non-fatal, likely transient (although not definitely);
+ # continue startup but log the warning so that when things
+ # fail later, it will be clear why we can not do certain
+ # things.
+ LOG.warning('Unable to initialize placement client (%s); '
+ 'Continuing with startup, but scheduling '
+ 'will not be possible.', e)
+ except (ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized) as e:
+ # This is almost definitely fatal mis-configuration. The
+ # Unauthorized error might be transient, but it is
+ # probably reasonable to consider it fatal.
+ LOG.error('Fatal error initializing placement client; '
+ 'config is incorrect or incomplete: %s', e)
+ raise
+ except Exception as e:
+ # Unknown/unexpected errors here are fatal
+ LOG.error('Fatal error initializing placement client: %s', e)
+ raise
super().__init__(service_name='scheduler', *args, **kwargs)
+ @property
+ def placement_client(self):
+ return report.report_client_singleton()
+
@periodic_task.periodic_task(
spacing=CONF.scheduler.discover_hosts_in_cells_interval,
run_immediately=True)
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index c7e6ffed97..02c44093bd 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -1080,6 +1080,17 @@ _SUPPORTS_SOFT_AFFINITY = None
_SUPPORTS_SOFT_ANTI_AFFINITY = None
+def reset_globals():
+ global _SUPPORTS_AFFINITY
+ _SUPPORTS_AFFINITY = None
+ global _SUPPORTS_ANTI_AFFINITY
+ _SUPPORTS_ANTI_AFFINITY = None
+ global _SUPPORTS_SOFT_AFFINITY
+ _SUPPORTS_SOFT_AFFINITY = None
+ global _SUPPORTS_SOFT_ANTI_AFFINITY
+ _SUPPORTS_SOFT_ANTI_AFFINITY = None
+
+
def _get_group_details(context, instance_uuid, user_group_hosts=None):
"""Provide group_hosts and group_policies sets related to instances if
those instances are belonging to a group and if corresponding filters are
diff --git a/nova/test.py b/nova/test.py
index 562bd2516e..e37967b06d 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -62,6 +62,7 @@ from nova import objects
from nova.objects import base as objects_base
from nova import quota
from nova.scheduler.client import report
+from nova.scheduler import utils as scheduler_utils
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import matchers
from nova import utils
@@ -171,6 +172,12 @@ class TestCase(base.BaseTestCase):
# base class when USES_DB is True.
NUMBER_OF_CELLS = 1
+ # The stable compute id stuff is intentionally singleton-ish, which makes
+ # it a nightmare for testing multiple host/node combinations in tests like
+ # we do. So, mock it out by default, unless the test is specifically
+ # designed to handle it.
+ STUB_COMPUTE_ID = True
+
def setUp(self):
"""Run before each test method to initialize test environment."""
# Ensure BaseTestCase's ConfigureLogging fixture is disabled since
@@ -301,7 +308,14 @@ class TestCase(base.BaseTestCase):
# Reset our local node uuid cache (and avoid writing to the
# local filesystem when we generate a new one).
- self.useFixture(nova_fixtures.ComputeNodeIdFixture())
+ if self.STUB_COMPUTE_ID:
+ self.useFixture(nova_fixtures.ComputeNodeIdFixture())
+
+ # Reset globals indicating affinity filter support. Some tests may set
+ # self.flags(enabled_filters=...) which could make the affinity filter
+ # support globals get set to a non-default configuration which affects
+ # all other tests.
+ scheduler_utils.reset_globals()
def _setup_cells(self):
"""Setup a normal cellsv2 environment.
diff --git a/nova/tests/fixtures/__init__.py b/nova/tests/fixtures/__init__.py
index df254608fd..9ff4a2a601 100644
--- a/nova/tests/fixtures/__init__.py
+++ b/nova/tests/fixtures/__init__.py
@@ -16,6 +16,8 @@ from .cast_as_call import CastAsCallFixture # noqa: F401
from .cinder import CinderFixture # noqa: F401
from .conf import ConfFixture # noqa: F401, F403
from .cyborg import CyborgFixture # noqa: F401
+from .filesystem import SysFileSystemFixture # noqa: F401
+from .filesystem import TempFileSystemFixture # noqa: F401
from .glance import GlanceFixture # noqa: F401
from .libvirt import LibvirtFixture # noqa: F401
from .libvirt_imagebackend import LibvirtImageBackendFixture # noqa: F401
diff --git a/nova/tests/fixtures/filesystem.py b/nova/tests/fixtures/filesystem.py
new file mode 100644
index 0000000000..932d42fe27
--- /dev/null
+++ b/nova/tests/fixtures/filesystem.py
@@ -0,0 +1,81 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+import fixtures
+
+from nova import filesystem
+from nova.virt.libvirt.cpu import core
+
+
+SYS = 'sys'
+
+
+class TempFileSystemFixture(fixtures.Fixture):
+ """Creates a fake / filesystem"""
+
+ def _setUp(self):
+ self.temp_dir = tempfile.TemporaryDirectory(prefix='fake_fs')
+ # NOTE(sbauza): I/O disk errors may raise an exception here, as we
+ # don't ignore them. If that's causing a problem in our CI jobs, the
+ # recommended solution is to use shutil.rmtree instead of cleanup()
+ # with ignore_errors parameter set to True (or wait for the minimum
+ # python version to be 3.10 as TemporaryDirectory will provide
+ # ignore_cleanup_errors parameter)
+ self.addCleanup(self.temp_dir.cleanup)
+
+
+class SysFileSystemFixture(TempFileSystemFixture):
+ """Creates a fake /sys filesystem"""
+
+ def __init__(self, cpus_supported=None):
+ self.cpus_supported = cpus_supported or 10
+
+ def _setUp(self):
+ super()._setUp()
+ self.sys_path = os.path.join(self.temp_dir.name, SYS)
+ self.addCleanup(shutil.rmtree, self.sys_path, ignore_errors=True)
+
+ sys_patcher = mock.patch(
+ 'nova.filesystem.SYS',
+ new_callable=mock.PropertyMock(return_value=self.sys_path))
+ self.sys_mock = sys_patcher.start()
+ self.addCleanup(sys_patcher.stop)
+
+ avail_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.AVAILABLE_PATH',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/present')))
+ self.avail_path_mock = avail_path_patcher.start()
+ self.addCleanup(avail_path_patcher.stop)
+
+ cpu_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/cpu%(core)s')))
+ self.cpu_path_mock = cpu_path_patcher.start()
+ self.addCleanup(cpu_path_patcher.stop)
+
+ for cpu_nr in range(self.cpus_supported):
+ cpu_dir = os.path.join(self.cpu_path_mock % {'core': cpu_nr})
+ os.makedirs(os.path.join(cpu_dir, 'cpufreq'))
+ filesystem.write_sys(
+ os.path.join(cpu_dir, 'cpufreq/scaling_governor'),
+ data='powersave')
+ filesystem.write_sys(core.AVAILABLE_PATH,
+ f'0-{self.cpus_supported - 1}')
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index 76dc63755d..5fd893e7dc 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -1863,3 +1863,7 @@ class ComputeNodeIdFixture(fixtures.Fixture):
self.useFixture(fixtures.MockPatch(
'nova.virt.node.write_local_node_uuid',
lambda uuid: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeManager.'
+ '_ensure_existing_node_identity',
+ mock.DEFAULT))
diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
index 6b8b254af9..098a0e857b 100644
--- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py
+++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
@@ -1549,7 +1549,11 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
'not supported for instance with vDPA ports',
ex.response.text)
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
def test_attach_interface_service_version_61(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=61
@@ -1578,7 +1582,11 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertEqual(hostname, port['binding:host_id'])
self.assertEqual(server['id'], port['device_id'])
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
def test_detach_interface_service_version_61(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=61
@@ -1612,10 +1620,7 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
# to any host but should still be owned by the vm
port = self.neutron.show_port(vdpa_port['id'])['port']
self.assertEqual(server['id'], port['device_id'])
- # FIXME(sean-k-mooney): we should be unbinding the port from
- # the host when we shelve offload but we don't today.
- # This is unrelated to vdpa port and is a general issue.
- self.assertEqual(hostname, port['binding:host_id'])
+ self.assertIsNone(port['binding:host_id'])
self.assertIn('binding:profile', port)
self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
self.assertIsNone(server['OS-EXT-SRV-ATTR:host'])
@@ -1637,9 +1642,7 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
port = self.neutron.show_port(vdpa_port['id'])['port']
- # FIXME(sean-k-mooney): shelve offload should unbind the port
- # self.assertEqual('', port['binding:host_id'])
- self.assertEqual(hostname, port['binding:host_id'])
+ self.assertIsNone(port['binding:host_id'])
server = self._unshelve_server(server)
self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
@@ -1670,9 +1673,7 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
port = self.neutron.show_port(vdpa_port['id'])['port']
- # FIXME(sean-k-mooney): shelve should unbind the port
- # self.assertEqual('', port['binding:host_id'])
- self.assertEqual(source, port['binding:host_id'])
+ self.assertIsNone(port['binding:host_id'])
# force the unshelve to the other host
self.api.put_service(
@@ -1871,7 +1872,11 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertEqual(
dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
def test_suspend_and_resume_service_version_62(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=62
@@ -1890,7 +1895,11 @@ class VDPAServersTest(_PCIServersWithMigrationTestBase):
self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
self.assertEqual('ACTIVE', server['status'])
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
def test_live_migrate_service_version_62(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=62
@@ -3926,17 +3935,7 @@ class RemoteManagedServersTest(_PCIServersWithMigrationTestBase):
port = self.neutron.show_port(uuids.dpu_tunnel_port)['port']
self.assertIn('binding:profile', port)
- self.assertEqual(
- {
- 'pci_vendor_info': '15b3:101e',
- 'pci_slot': '0000:82:00.4',
- 'physical_network': None,
- 'pf_mac_address': '52:54:00:1e:59:02',
- 'vf_num': 3,
- 'card_serial_number': 'MT0000X00002',
- },
- port['binding:profile'],
- )
+ self.assertEqual({}, port['binding:profile'])
def test_suspend(self):
self.start_compute()
diff --git a/nova/tests/functional/libvirt/test_power_manage.py b/nova/tests/functional/libvirt/test_power_manage.py
new file mode 100644
index 0000000000..9f80446bd6
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_power_manage.py
@@ -0,0 +1,270 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+import fixtures
+
+from nova import context as nova_context
+from nova import exception
+from nova import objects
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import base
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import api as cpu_api
+
+
+class PowerManagementTestsBase(base.ServersTestBase):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter']
+
+ ADMIN_API = True
+
+ def setUp(self):
+ super(PowerManagementTestsBase, self).setUp()
+
+ self.ctxt = nova_context.get_admin_context()
+
+ # Mock the 'NUMATopologyFilter' filter, as most tests need to inspect
+ # this
+ host_manager = self.scheduler.manager.host_manager
+ numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
+ host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
+ _p = mock.patch('nova.scheduler.filters'
+ '.numa_topology_filter.NUMATopologyFilter.host_passes',
+ side_effect=host_pass_mock)
+ self.mock_filter = _p.start()
+ self.addCleanup(_p.stop)
+
+ # for the sake of resizing, we need to patch the two methods below
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_instance_disk_info',
+ return_value=[]))
+ self.useFixture(fixtures.MockPatch('os.rename'))
+
+ self.useFixture(nova_fixtures.PrivsepFixture())
+
+ # Defining the main flavor for 4 vCPUs all pinned
+ self.extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'hw:cpu_thread_policy': 'prefer',
+ }
+ self.pcpu_flavor_id = self._create_flavor(
+ vcpu=4, extra_spec=self.extra_spec)
+
+ def _assert_server_cpus_state(self, server, expected='online'):
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ if not inst.numa_topology:
+ self.fail('Instance should have a NUMA topology in order to know '
+ 'its physical CPUs')
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ self._assert_cpu_set_state(instance_pcpus, expected=expected)
+ return instance_pcpus
+
+ def _assert_cpu_set_state(self, cpu_set, expected='online'):
+ for i in cpu_set:
+ core = cpu_api.Core(i)
+ if expected == 'online':
+ self.assertTrue(core.online, f'{i} is not online')
+ elif expected == 'offline':
+ self.assertFalse(core.online, f'{i} is online')
+ elif expected == 'powersave':
+ self.assertEqual('powersave', core.governor)
+ elif expected == 'performance':
+ self.assertEqual('performance', core.governor)
+
+
+class PowerManagementTests(PowerManagementTestsBase):
+ """Test suite for a single host with 9 dedicated cores and 1 used for OS"""
+
+ def setUp(self):
+ super(PowerManagementTests, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # All cores are shutdown at startup, let's check.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ def test_hardstop_compute_service_if_wrong_opt(self):
+ self.flags(cpu_dedicated_set=None, cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.assertRaises(exception.InvalidConfiguration,
+ self.start_compute, host_info=self.host_info,
+ hostname='compute2')
+
+ def test_create_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # Let's verify that the pinned CPUs are now online
+ self._assert_server_cpus_state(server, expected='online')
+
+ # Verify that the unused CPUs are still offline
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ unused_cpus = cpu_dedicated_set - instance_pcpus
+ self._assert_cpu_set_state(unused_cpus, expected='offline')
+
+ def test_stop_start_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+
+ server = self._stop_server(server)
+ # Let's verify that the pinned CPUs are now stopped...
+ self._assert_server_cpus_state(server, expected='offline')
+
+ server = self._start_server(server)
+ # ...and now, they should be back.
+ self._assert_server_cpus_state(server, expected='online')
+
+ def test_resize(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ server_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+
+ new_flavor_id = self._create_flavor(
+ vcpu=5, extra_spec=self.extra_spec)
+ self._resize_server(server, new_flavor_id)
+ server2_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+ # Even if the resize is not confirmed yet, the original guest is now
+ # destroyed so the cores are now offline.
+ self._assert_cpu_set_state(server_pcpus, expected='offline')
+
+ # let's revert the resize
+ self._revert_resize(server)
+ # So now the original CPUs will be online again, while the previous
+ # cores should be back offline.
+ self._assert_cpu_set_state(server_pcpus, expected='online')
+ self._assert_cpu_set_state(server2_pcpus, expected='offline')
+
+ def test_changing_strategy_fails(self):
+ # As a reminder, all cores have been shutdown before.
+ # Now we want to change the strategy and then we restart the service
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ # See, this is not possible as we would have offline CPUs.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementTestsGovernor(PowerManagementTestsBase):
+ """Test suite for speific governor usage (same 10-core host)"""
+
+ def setUp(self):
+ super(PowerManagementTestsGovernor, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ def test_create(self):
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ # With the governor strategy, cores are still online but run with a
+ # powersave governor.
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='powersave')
+
+ # Now, start an instance
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # When pinned cores are run, the governor state is now performance
+ self._assert_server_cpus_state(server, expected='performance')
+
+ def test_changing_strategy_fails(self):
+ # Arbitratly set a core governor strategy to be performance
+ cpu_api.Core(1).set_high_governor()
+ # and then forget about it while changing the strategy.
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ # This time, this wouldn't be acceptable as some core would have a
+ # difference performance while Nova would only online/offline it.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementMixedInstances(PowerManagementTestsBase):
+ """Test suite for a single host with 6 dedicated cores, 3 shared and one
+ OS-restricted.
+ """
+
+ def setUp(self):
+ super(PowerManagementMixedInstances, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining 6 CPUs to be dedicated, not all of them in a series.
+ self.flags(cpu_dedicated_set='1-3,5-7', cpu_shared_set='4,8-9',
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # Make sure only 6 are offline now
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ # cores 4 and 8-9 should be online
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ def test_standard_server_works_and_passes(self):
+
+ std_flavor_id = self._create_flavor(vcpu=2)
+ self._create_server(flavor_id=std_flavor_id, expected_state='ACTIVE')
+
+ # Since this is an instance with floating vCPUs on the shared set, we
+ # can only lookup the host CPUs and see they haven't changed state.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ # We can now try to boot an instance with pinned CPUs to test the mix
+ pinned_server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # We'll see that its CPUs are now online
+ self._assert_server_cpus_state(pinned_server, expected='online')
+ # but it doesn't change the shared set
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index 43208aa812..5887c99081 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -6526,3 +6526,41 @@ class PortAndFlavorAccelsServerCreateTest(AcceleratorServerBase):
binding_profile = neutronapi.get_binding_profile(updated_port)
self.assertNotIn('arq_uuid', binding_profile)
self.assertNotIn('pci_slot', binding_profile)
+
+
+class PortBindingShelvedServerTest(integrated_helpers._IntegratedTestBase):
+ """Tests for servers with ports."""
+
+ compute_driver = 'fake.SmallFakeDriver'
+
+ def setUp(self):
+ super(PortBindingShelvedServerTest, self).setUp()
+ self.flavor_id = self._create_flavor(
+ disk=10, ephemeral=20, swap=5 * 1024)
+
+ def test_shelve_offload_with_port(self):
+ # Do not wait before offloading
+ self.flags(shelved_offload_time=0)
+
+ server = self._create_server(
+ flavor_id=self.flavor_id,
+ networks=[{'port': self.neutron.port_1['id']}])
+
+ port = self.neutron.show_port(self.neutron.port_1['id'])['port']
+
+ # Assert that the port is actually associated to the instance
+ self.assertEqual(port['device_id'], server['id'])
+ self.assertEqual(port['binding:host_id'], 'compute')
+ self.assertEqual(port['binding:status'], 'ACTIVE')
+
+ # Do shelve
+ server = self._shelve_server(server, 'SHELVED_OFFLOADED')
+
+ # Retrieve the updated port
+ port = self.neutron.show_port(self.neutron.port_1['id'])['port']
+
+ # Assert that the port is still associated to the instance
+ # but the binding is not on the compute anymore
+ self.assertEqual(port['device_id'], server['id'])
+ self.assertIsNone(port['binding:host_id'])
+ self.assertNotIn('binding:status', port)
diff --git a/nova/tests/functional/test_service.py b/nova/tests/functional/test_service.py
index 65b41594bd..21e9a519ee 100644
--- a/nova/tests/functional/test_service.py
+++ b/nova/tests/functional/test_service.py
@@ -10,8 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
from unittest import mock
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import context as nova_context
from nova import exception
from nova.objects import service
@@ -19,6 +23,7 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
+from nova.virt import node
class ServiceTestCase(test.TestCase,
@@ -137,3 +142,83 @@ class TestOldComputeCheck(
return_value=old_version):
self.assertRaises(
exception.TooOldComputeService, self._start_compute, 'host1')
+
+
+class TestComputeStartupChecks(test.TestCase):
+ STUB_COMPUTE_ID = False
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.RealPolicyFixture())
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+
+ self._local_uuid = str(uuids.node)
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.get_local_node_uuid',
+ functools.partial(self.local_uuid, True)))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ self.local_uuid))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ mock.DEFAULT))
+ self.flags(compute_driver='fake.FakeDriverWithoutFakeNodes')
+
+ def local_uuid(self, get=False):
+ if get and not self._local_uuid:
+ # Simulate the get_local_node_uuid behavior of calling write once
+ self._local_uuid = str(uuids.node)
+ node.write_local_node_uuid(self._local_uuid)
+ return self._local_uuid
+
+ def test_compute_node_identity_greenfield(self):
+ # Level-set test case to show that starting and re-starting without
+ # any error cases works as expected.
+
+ # Start with no local compute_id
+ self._local_uuid = None
+ self.start_service('compute')
+
+ # Start should have generated and written a compute id
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ # Starting again should succeed and not cause another write
+ self.start_service('compute')
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ def test_compute_node_identity_deleted(self):
+ self.start_service('compute')
+
+ # Simulate the compute_id file being deleted
+ self._local_uuid = None
+
+ # Should refuse to start because it's not our first time and the file
+ # being missing is a hard error.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('lost that state', str(exc))
+
+ def test_compute_node_hostname_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Starting with a different hostname should trigger the abort
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute', host='other')
+ self.assertIn('hypervisor_hostname', str(exc))
+
+ def test_compute_node_uuid_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Simulate a changed local compute_id file
+ self._local_uuid = str(uuids.othernode)
+
+ # We should fail to create the compute node record again, but with a
+ # useful error message about why.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('Duplicate compute node record', str(exc))
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index c923b0e967..1c69cd8f1c 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -91,6 +91,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
# os-brick>=5.1 now uses external file system locks instead of internal
# locks so we need to set up locking
REQUIRES_LOCKING = True
+ STUB_COMPUTE_ID = False
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
@@ -1162,24 +1163,48 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_get_nodes.return_value.keys())
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_uuid):
- mock_driver_get_nodes.return_value = ['fake_node1', 'fake_node2']
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host',
+ uuids.node_fake_node2: 'host'}
# NOTE(danms): The fake driver, by default, uses
- # uuidsentinel.$node_name, so we can predict the uuids it will
+ # uuidsentinel.node_$node_name, so we can predict the uuids it will
# return here.
- cn1 = objects.ComputeNode(uuid=uuids.fake_node1)
- cn2 = objects.ComputeNode(uuid=uuids.fake_node2)
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host')
mock_get_by_uuid.return_value = [cn1, cn2]
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.fake_node1: cn1, uuids.fake_node2: cn2}, nodes)
+ self.assertEqual({uuids.node_fake_node1: cn1,
+ uuids.node_fake_node2: cn2}, nodes)
mock_driver_get_nodes.assert_called_once_with()
mock_get_by_uuid.assert_called_once_with(self.context,
- [uuids.fake_node1,
- uuids.fake_node2])
+ [uuids.node_fake_node1,
+ uuids.node_fake_node2])
+
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes_mismatch(self, mock_driver_get_nodes, mock_get_by_uuid):
+ # Virt driver reports a (hypervisor_) hostname of 'host1'
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host1',
+ uuids.node_fake_node2: 'host1'}
+
+ # The database records for our compute nodes (by UUID) show a
+ # hypervisor_hostname of 'host2'
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host2')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host2')
+ mock_get_by_uuid.return_value = [cn1, cn2]
+
+ # Possible hostname (as reported by the virt driver) rename,
+ # which should abort our startup
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._get_nodes, self.context)
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(
@@ -1202,11 +1227,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_node_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
def test_get_nodes_node_not_found(
self, mock_driver_get_nodes, mock_get_all_by_uuids,
mock_log_warning):
- mock_driver_get_nodes.return_value = ['fake-node1']
+ mock_driver_get_nodes.return_value = {uuids.node_1: 'fake-node1'}
mock_get_all_by_uuids.return_value = []
nodes = self.compute._get_nodes(self.context)
@@ -1215,11 +1240,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_driver_get_nodes.assert_called_once_with()
mock_get_all_by_uuids.assert_called_once_with(self.context,
- ['fake-node1'])
+ [uuids.node_1])
mock_log_warning.assert_called_once_with(
"Compute nodes %s for host %s were not found in the database. "
"If this is the first time this service is starting on this host, "
- "then you can ignore this warning.", ['fake-node1'], 'fake-mini')
+ "then you can ignore this warning.", [uuids.node_1], 'fake-mini')
def test_init_host_disk_devices_configuration_failure(self):
self.flags(max_disk_devices_to_attach=0, group='compute')
@@ -6337,13 +6362,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
'two-image': 'existing'}, r)
@mock.patch.object(virt_node, 'write_local_node_uuid')
- def test_ensure_node_uuid_not_needed_version(self, mock_node):
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_version(self, mock_read, mock_write):
# Make sure an up-to-date service bypasses the persistence
service_ref = service_obj.Service()
self.assertEqual(service_obj.SERVICE_VERSION, service_ref.version)
- mock_node.assert_not_called()
+ mock_read.return_value = 'not none'
+ mock_write.assert_not_called()
self.compute._ensure_existing_node_identity(service_ref)
- mock_node.assert_not_called()
+ mock_write.assert_not_called()
@mock.patch.object(virt_node, 'write_local_node_uuid')
def test_ensure_node_uuid_not_needed_ironic(self, mock_node):
@@ -6428,6 +6455,20 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
mock_write_node.assert_called_once_with(str(uuids.compute))
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_missing_file_ironic(self, mock_read):
+ mock_service = mock.MagicMock(
+ version=service_obj.NODE_IDENTITY_VERSION)
+ mock_read.return_value = None
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ mock_service)
+ mock_read.assert_called_once_with()
+
+ # Now make sure that ironic causes this exact configuration to pass
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(mock_service)
+
def test_ensure_node_uuid_called_by_init_host(self):
# test_init_host() above ensures that we do not call
# _ensure_existing_node_identity() in the service_ref=None case.
@@ -9607,9 +9648,15 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertEqual(driver_console.get_connection_info.return_value,
console)
+ @mock.patch('nova.utils.pass_context')
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_live_migration')
- def _test_max_concurrent_live(self, mock_lm):
+ def _test_max_concurrent_live(self, mock_lm, mock_pass_context):
+ # pass_context wraps the function, which doesn't work with a mock
+ # So we simply mock it too
+ def _mock_pass_context(runner, func, *args, **kwargs):
+ return runner(func, *args, **kwargs)
+ mock_pass_context.side_effect = _mock_pass_context
@mock.patch('nova.objects.Migration.save')
def _do_it(mock_mig_save):
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index dfea323a9a..cd36b8987f 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -1552,6 +1552,20 @@ class TestInitComputeNode(BaseTestCase):
self.assertEqual('fake-host', node.host)
mock_update.assert_called()
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_get_compute_node',
+ return_value=None)
+ @mock.patch('nova.objects.compute_node.ComputeNode.create')
+ def test_create_failed_conflict(self, mock_create, mock_getcn):
+ self._setup_rt()
+ resources = {'hypervisor_hostname': 'node1',
+ 'uuid': uuids.node1}
+ mock_create.side_effect = exc.DuplicateRecord(target='foo')
+ self.assertRaises(exc.InvalidConfiguration,
+ self.rt._init_compute_node,
+ mock.MagicMock,
+ resources)
+
@ddt.ddt
class TestUpdateComputeNode(BaseTestCase):
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index f95a722ced..0a1e3f54fc 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -209,6 +209,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance = self._shelve_offload(clean_shutdown=False)
mock_power_off.assert_called_once_with(instance, 0, 0)
+ @mock.patch.object(neutron_api.API, 'unbind_ports')
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
@@ -225,7 +226,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def _shelve_offload(self, mock_notify, mock_notify_instance_usage,
mock_get_power_state, mock_update_resource_tracker,
mock_delete_alloc, mock_terminate, mock_get_bdms,
- mock_event, clean_shutdown=True):
+ mock_event, mock_unbind_ports, clean_shutdown=True):
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
instance.task_state = task_states.SHELVING
@@ -278,6 +279,9 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance.uuid,
graceful_exit=False)
+ mock_unbind_ports.assert_called_once_with(
+ self.context, mock.ANY, detach=False)
+
return instance
@mock.patch('nova.compute.utils.'
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
index 63b070c543..84c4e87785 100644
--- a/nova/tests/unit/objects/test_compute_node.py
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -16,6 +16,7 @@ import copy
from unittest import mock
import netaddr
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
@@ -341,6 +342,14 @@ class _TestComputeNodeObject(object):
'uuid': uuidsentinel.fake_compute_node}
mock_create.assert_called_once_with(self.context, param_dict)
+ @mock.patch('nova.db.main.api.compute_node_create')
+ def test_create_duplicate(self, mock_create):
+ mock_create.side_effect = db_exc.DBDuplicateEntry
+ compute = compute_node.ComputeNode(context=self.context)
+ compute.service_id = 456
+ compute.hypervisor_hostname = 'node1'
+ self.assertRaises(exception.DuplicateRecord, compute.create)
+
@mock.patch.object(db, 'compute_node_update')
@mock.patch(
'nova.db.main.api.compute_node_get', return_value=fake_compute_node)
diff --git a/nova/tests/unit/scheduler/test_manager.py b/nova/tests/unit/scheduler/test_manager.py
index e7866069b3..e992fe6034 100644
--- a/nova/tests/unit/scheduler/test_manager.py
+++ b/nova/tests/unit/scheduler/test_manager.py
@@ -19,6 +19,7 @@ Tests For Scheduler
from unittest import mock
+from keystoneauth1 import exceptions as ks_exc
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -1688,6 +1689,41 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
mock_log_warning.assert_not_called()
mock_log_debug.assert_called_once_with(msg)
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
+ @mock.patch.object(manager, 'LOG')
+ @mock.patch('nova.scheduler.host_manager.HostManager')
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.rpc.get_notifier')
+ def test_init_lazy_placement_client(self, mock_rpc, mock_sg, mock_hm,
+ mock_log, mock_report):
+ # Simulate keytone or placement being offline at startup
+ mock_report.side_effect = ks_exc.RequestTimeout
+ mgr = manager.SchedulerManager()
+ mock_report.assert_called_once_with()
+ self.assertTrue(mock_log.warning.called)
+
+ # Make sure we're raising the actual error to subsequent callers
+ self.assertRaises(ks_exc.RequestTimeout, lambda: mgr.placement_client)
+
+ # Simulate recovery of the keystone or placement service
+ mock_report.reset_mock(side_effect=True)
+ mgr.placement_client
+ mock_report.assert_called_once_with()
+
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
+ @mock.patch('nova.scheduler.host_manager.HostManager')
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.rpc.get_notifier')
+ def test_init_lazy_placement_client_failures(self, mock_rpc, mock_sg,
+ mock_hm, mock_report):
+ # Certain keystoneclient exceptions are fatal
+ mock_report.side_effect = ks_exc.Unauthorized
+ self.assertRaises(ks_exc.Unauthorized, manager.SchedulerManager)
+
+ # Anything else is fatal
+ mock_report.side_effect = test.TestingException
+ self.assertRaises(test.TestingException, manager.SchedulerManager)
+
class SchedulerManagerAllocationCandidateTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/test_filesystem.py b/nova/tests/unit/test_filesystem.py
new file mode 100644
index 0000000000..85f16157ee
--- /dev/null
+++ b/nova/tests/unit/test_filesystem.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+
+from nova import exception
+from nova import filesystem
+from nova import test
+
+
+class TestFSCommon(test.NoDBTestCase):
+
+ def test_read_sys(self):
+ open_mock = mock.mock_open(read_data='bar')
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertEqual('bar', filesystem.read_sys('foo'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_read_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.read_sys, 'foo')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_write_sys(self):
+ open_mock = mock.mock_open()
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertIsNone(filesystem.write_sys('foo', 'bar'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
+ open_mock().write.assert_called_once_with('bar')
+
+ def test_write_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('fake_error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.write_sys, 'foo', 'bar')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
diff --git a/nova/tests/unit/virt/libvirt/cpu/__init__.py b/nova/tests/unit/virt/libvirt/cpu/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/__init__.py
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_api.py b/nova/tests/unit/virt/libvirt/cpu/test_api.py
new file mode 100644
index 0000000000..b5bcb762f3
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_api.py
@@ -0,0 +1,194 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import objects
+from nova import test
+from nova.virt.libvirt.cpu import api
+from nova.virt.libvirt.cpu import core
+
+
+class TestAPI(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAPI, self).setUp()
+ self.core_1 = api.Core(1)
+
+ # Create a fake instance with two pinned CPUs but only one is on the
+ # dedicated set
+ numa_topology = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(cpu_pinning_raw={'0': '0', '2': '2'}),
+ ])
+ self.fake_inst = objects.Instance(numa_topology=numa_topology)
+
+ @mock.patch.object(core, 'get_online')
+ def test_online(self, mock_get_online):
+ mock_get_online.return_value = True
+ self.assertTrue(self.core_1.online)
+ mock_get_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_online')
+ def test_set_online(self, mock_set_online):
+ self.core_1.online = True
+ mock_set_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_offline')
+ def test_set_offline(self, mock_set_offline):
+ self.core_1.online = False
+ mock_set_offline.assert_called_once_with(self.core_1.ident)
+
+ def test_hash(self):
+ self.assertEqual(hash(self.core_1.ident), hash(self.core_1))
+
+ @mock.patch.object(core, 'get_governor')
+ def test_governor(self, mock_get_governor):
+ mock_get_governor.return_value = 'fake_governor'
+ self.assertEqual('fake_governor', self.core_1.governor)
+ mock_get_governor.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_low(self, mock_set_governor):
+ self.flags(cpu_power_governor_low='fake_low_gov', group='libvirt')
+ self.core_1.set_low_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_low_gov')
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_high(self, mock_set_governor):
+ self.flags(cpu_power_governor_high='fake_high_gov', group='libvirt')
+ self.core_1.set_high_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_high_gov')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_online(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_online.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_up_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'performance')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped(self, mock_online):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_up(self.fake_inst)
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped_if_standard_instance(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_up(objects.Instance(numa_topology=None))
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_offline.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'powersave')
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down(self.fake_inst)
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped_if_standard_instance(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_down(objects.Instance(numa_topology=None))
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_offline.assert_has_calls([mock.call(0), mock.call(1)])
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_all_dedicated_cpus_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_set_governor.assert_has_calls([mock.call(0, 'powersave'),
+ mock.call(1, 'powersave')])
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down_all_dedicated_cpus()
+ mock_offline.assert_not_called()
+
+ def test_power_down_all_dedicated_cpus_wrong_config(self):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set=None, group='compute')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.power_down_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_governor(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ mock_get_governor.return_value = 'performance'
+ mock_get_online.side_effect = (True, False)
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_cpu_state(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ mock_get_online.return_value = True
+ mock_get_governor.side_effect = ('powersave', 'performance')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_core.py b/nova/tests/unit/virt/libvirt/cpu/test_core.py
new file mode 100644
index 0000000000..a3cba00d3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_core.py
@@ -0,0 +1,122 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures
+from nova.virt.libvirt.cpu import core
+
+
+class TestCore(test.NoDBTestCase):
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores(self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = '1-2'
+ mock_parse_cpu_spec.return_value = set([1, 2])
+ self.assertEqual(set([1, 2]), core.get_available_cores())
+ mock_read_sys.assert_called_once_with(core.AVAILABLE_PATH)
+ mock_parse_cpu_spec.assert_called_once_with('1-2')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores_none(
+ self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = ''
+ self.assertEqual(set(), core.get_available_cores())
+ mock_parse_cpu_spec.assert_not_called()
+
+ @mock.patch.object(core, 'get_available_cores')
+ def test_exists(self, mock_get_available_cores):
+ mock_get_available_cores.return_value = set([1])
+ self.assertTrue(core.exists(1))
+ mock_get_available_cores.assert_called_once_with()
+ self.assertFalse(core.exists(2))
+
+ @mock.patch.object(
+ core, 'CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(return_value='/sys/blah%(core)s'))
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path(self, mock_exists, mock_cpu_path):
+ mock_exists.return_value = True
+ self.assertEqual('/sys/blah1', core.gen_cpu_path(1))
+ mock_exists.assert_called_once_with(1)
+
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path_raises(self, mock_exists):
+ mock_exists.return_value = False
+ self.assertRaises(ValueError, core.gen_cpu_path, 1)
+ self.assertIn('Unable to access CPU: 1', self.stdlog.logger.output)
+
+
+class TestCoreHelpers(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestCoreHelpers, self).setUp()
+ self.useFixture(fixtures.PrivsepFixture())
+ _p1 = mock.patch.object(core, 'exists', return_value=True)
+ self.mock_exists = _p1.start()
+ self.addCleanup(_p1.stop)
+
+ _p2 = mock.patch.object(core, 'gen_cpu_path',
+ side_effect=lambda x: '/fakesys/blah%s' % x)
+ self.mock_gen_cpu_path = _p2.start()
+ self.addCleanup(_p2.stop)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online(self, mock_read_sys):
+ mock_read_sys.return_value = '1'
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online_not_exists(self, mock_read_sys):
+ mock_read_sys.side_effect = exception.FileNotFound(file_path='foo')
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_online(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = True
+ self.assertTrue(core.set_online(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='1')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_offline(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = False
+ self.assertTrue(core.set_offline(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='0')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_governor(self, mock_read_sys):
+ mock_read_sys.return_value = 'fake_gov'
+ self.assertEqual('fake_gov', core.get_governor(1))
+ mock_read_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor')
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core.filesystem, 'write_sys')
+ def test_set_governor(self, mock_write_sys, mock_get_governor):
+ mock_get_governor.return_value = 'fake_gov'
+ self.assertEqual('fake_gov',
+ core.set_governor(1, 'fake_gov'))
+ mock_write_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor', data='fake_gov')
+ mock_get_governor.assert_called_once_with(1)
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
index 8f840e8859..3d0b5ae685 100644
--- a/nova/tests/unit/virt/libvirt/test_config.py
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -1537,7 +1537,7 @@ class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
- def test_config_graphics(self):
+ def test_config_graphics_vnc(self):
obj = config.LibvirtConfigGuestGraphics()
obj.type = "vnc"
obj.autoport = True
@@ -1549,6 +1549,30 @@ class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
""")
+ def test_config_graphics_spice(self):
+ obj = config.LibvirtConfigGuestGraphics()
+ obj.type = "spice"
+ obj.autoport = False
+ obj.keymap = "en_US"
+ obj.listen = "127.0.0.1"
+
+ obj.image_compression = "auto_glz"
+ obj.jpeg_compression = "auto"
+ obj.zlib_compression = "always"
+ obj.playback_compression = True
+ obj.streaming_mode = "filter"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <graphics type="spice" autoport="no" keymap="en_US" listen="127.0.0.1">
+ <image compression="auto_glz"/>
+ <jpeg compression="auto"/>
+ <zlib compression="always"/>
+ <playback compression="on"/>
+ <streaming mode="filter"/>
+ </graphics>
+ """)
+
class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index e9b7a2133e..2b58c7df8b 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -3402,7 +3402,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(
"Memory encryption requested by hw:mem_encryption extra spec in "
"m1.fake flavor but image fake_image doesn't have "
- "'hw_firmware_type' property set to 'uefi'", str(exc))
+ "'hw_firmware_type' property set to 'uefi' or volume-backed "
+ "instance was requested", str(exc))
def test_sev_enabled_host_extra_spec_no_machine_type(self):
exc = self.assertRaises(exception.InvalidMachineType,
@@ -5839,6 +5840,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'vnc')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
@@ -5869,6 +5875,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, 'vnc')
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
def test_get_guest_config_with_spice_and_tablet(self):
@@ -5905,6 +5916,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'spice')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
@mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@@ -5964,8 +5980,57 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[3].type, 'spicevmc')
self.assertEqual(cfg.devices[4].type, "spice")
+ self.assertIsNone(cfg.devices[4].image_compression)
+ self.assertIsNone(cfg.devices[4].jpeg_compression)
+ self.assertIsNone(cfg.devices[4].zlib_compression)
+ self.assertIsNone(cfg.devices[4].playback_compression)
+ self.assertIsNone(cfg.devices[4].streaming_mode)
self.assertEqual(cfg.devices[5].type, video_type)
+ def test_get_guest_config_with_spice_compression(self):
+ self.flags(enabled=False, group='vnc')
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=False,
+ image_compression='auto_lz',
+ jpeg_compression='never',
+ zlib_compression='always',
+ playback_compression=False,
+ streaming_mode='all',
+ server_listen='10.0.0.1',
+ group='spice')
+ self.flags(pointer_model='usbtablet')
+
+ cfg = self._get_guest_config_with_graphics()
+
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestUSBHostController)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[3].type, 'spice')
+ self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
+ self.assertEqual(cfg.devices[3].image_compression, 'auto_lz')
+ self.assertEqual(cfg.devices[3].jpeg_compression, 'never')
+ self.assertEqual(cfg.devices[3].zlib_compression, 'always')
+ self.assertFalse(cfg.devices[3].playback_compression)
+ self.assertEqual(cfg.devices[3].streaming_mode, 'all')
+
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@@ -9190,6 +9255,34 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([0, 1, 2, 3]))
+ def test_get_pcpu_available_for_power_mgmt(self, get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set and power management is defined.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='2-3', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ pcpus = drvr._get_pcpu_available()
+ self.assertEqual(set([2, 3]), pcpus)
+
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([4, 5]))
+ def test_get_pcpu_available__cpu_dedicated_set_invalid_for_pm(self,
+ get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set but it's invalid with power management set.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='4-6', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus',
return_value=set([0, 1, 2, 3]))
def test_get_vcpu_available(self, get_online_cpus):
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index 3afd6c139d..631b10d81a 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -1052,6 +1052,12 @@ Active: 8381604 kB
'iowait': 6121490000000},
stats)
+ @mock.patch.object(fakelibvirt.virConnect, "getCPUMap")
+ def test_get_available_cpus(self, mock_map):
+ mock_map.return_value = (4, [True, True, False, False], None)
+ result = self.host.get_available_cpus()
+ self.assertEqual(result, {0, 1, 2, 3})
+
@mock.patch.object(fakelibvirt.virConnect, "defineXML")
def test_write_instance_config(self, mock_defineXML):
fake_dom_xml = """
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 016c478f8c..753ee41550 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -5364,7 +5364,7 @@ class MemEncryptionRequestedWithoutUEFITestCase(
expected_error = (
"Memory encryption requested by %(requesters)s but image "
"%(image_name)s doesn't have 'hw_firmware_type' property "
- "set to 'uefi'"
+ "set to 'uefi' or volume-backed instance was requested"
)
def _test_encrypted_memory_support_no_uefi(self, enc_extra_spec,
@@ -5491,6 +5491,25 @@ class MemEncryptionRequiredTestCase(test.NoDBTestCase):
(self.flavor_name, self.image_id)
)
+ def test_encrypted_memory_support_flavor_for_volume(self):
+ extra_specs = {'hw:mem_encryption': True}
+
+ flavor = objects.Flavor(name=self.flavor_name,
+ extra_specs=extra_specs)
+ # Following image_meta is typical for root Cinder volume
+ image_meta = objects.ImageMeta.from_dict({
+ 'min_disk': 0,
+ 'min_ram': 0,
+ 'properties': {},
+ 'size': 0,
+ 'status': 'active'})
+ # Confirm that exception.FlavorImageConflict is raised when
+ # flavor with hw:mem_encryption flag is used to create
+ # volume-backed instance
+ self.assertRaises(exception.FlavorImageConflict,
+ hw.get_mem_encryption_constraint, flavor,
+ image_meta)
+
class PCINUMAAffinityPolicyTest(test.NoDBTestCase):
diff --git a/nova/utils.py b/nova/utils.py
index 664056a09f..b5d45c58b5 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -632,15 +632,13 @@ def _serialize_profile_info():
return trace_info
-def spawn(func, *args, **kwargs):
- """Passthrough method for eventlet.spawn.
-
- This utility exists so that it can be stubbed for testing without
- interfering with the service spawns.
+def pass_context(runner, func, *args, **kwargs):
+ """Generalised passthrough method
- It will also grab the context from the threadlocal store and add it to
- the store on the new thread. This allows for continuity in logging the
- context when using this method to spawn a new thread.
+ It will grab the context from the threadlocal store and add it to
+ the store on the runner. This allows for continuity in logging the
+ context when using this method to spawn a new thread through the
+ runner function
"""
_context = common_context.get_current()
profiler_info = _serialize_profile_info()
@@ -655,11 +653,11 @@ def spawn(func, *args, **kwargs):
profiler.init(**profiler_info)
return func(*args, **kwargs)
- return eventlet.spawn(context_wrapper, *args, **kwargs)
+ return runner(context_wrapper, *args, **kwargs)
-def spawn_n(func, *args, **kwargs):
- """Passthrough method for eventlet.spawn_n.
+def spawn(func, *args, **kwargs):
+ """Passthrough method for eventlet.spawn.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
@@ -668,25 +666,26 @@ def spawn_n(func, *args, **kwargs):
the store on the new thread. This allows for continuity in logging the
context when using this method to spawn a new thread.
"""
- _context = common_context.get_current()
- profiler_info = _serialize_profile_info()
- @functools.wraps(func)
- def context_wrapper(*args, **kwargs):
- # NOTE: If update_store is not called after spawn_n it won't be
- # available for the logger to pull from threadlocal storage.
- if _context is not None:
- _context.update_store()
- if profiler_info and profiler:
- profiler.init(**profiler_info)
- func(*args, **kwargs)
+ return pass_context(eventlet.spawn, func, *args, **kwargs)
+
+
+def spawn_n(func, *args, **kwargs):
+ """Passthrough method for eventlet.spawn_n.
+
+ This utility exists so that it can be stubbed for testing without
+ interfering with the service spawns.
- eventlet.spawn_n(context_wrapper, *args, **kwargs)
+ It will also grab the context from the threadlocal store and add it to
+ the store on the new thread. This allows for continuity in logging the
+ context when using this method to spawn a new thread.
+ """
+ pass_context(eventlet.spawn_n, func, *args, **kwargs)
def tpool_execute(func, *args, **kwargs):
"""Run func in a native thread"""
- tpool.execute(func, *args, **kwargs)
+ return pass_context(tpool.execute, func, *args, **kwargs)
def is_none_string(val):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index b6297bb785..5d42a392d8 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -1596,8 +1596,10 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
- def get_available_node_uuids(self, refresh=False):
- return [nova.virt.node.get_local_node_uuid()]
+ def get_nodenames_by_uuid(self, refresh=False):
+ """Returns a dict of {uuid: nodename} for all managed nodes."""
+ nodename = self.get_available_nodes()[0]
+ return {nova.virt.node.get_local_node_uuid(): nodename}
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f86db8fcb2..bf7dc8fc72 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -49,6 +49,7 @@ from nova.objects import migrate_data
from nova.virt import driver
from nova.virt import hardware
from nova.virt.ironic import driver as ironic
+import nova.virt.node
from nova.virt import virtapi
CONF = nova.conf.CONF
@@ -510,7 +511,7 @@ class FakeDriver(driver.ComputeDriver):
# control our node uuids. Make sure we return a unique and
# consistent uuid for each node we are responsible for to
# avoid the persistent local node identity from taking over.
- host_status['uuid'] = str(getattr(uuids, nodename))
+ host_status['uuid'] = str(getattr(uuids, 'node_%s' % nodename))
return host_status
def update_provider_tree(self, provider_tree, nodename, allocations=None):
@@ -653,8 +654,9 @@ class FakeDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return self._nodes
- def get_available_node_uuids(self, refresh=False):
- return [str(getattr(uuids, n)) for n in self.get_available_nodes()]
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {str(getattr(uuids, 'node_%s' % n)): n
+ for n in self.get_available_nodes()}
def instance_on_disk(self, instance):
return False
@@ -1129,3 +1131,22 @@ class EphEncryptionDriverPLAIN(MediumFakeDriver):
FakeDriver.capabilities,
supports_ephemeral_encryption=True,
supports_ephemeral_encryption_plain=True)
+
+
+class FakeDriverWithoutFakeNodes(FakeDriver):
+ """FakeDriver that behaves like a real single-node driver.
+
+ This behaves like a real virt driver from the perspective of its
+ nodes, with a stable nodename and use of the global node identity
+ stuff to provide a stable node UUID.
+ """
+
+ def get_available_resource(self, nodename):
+ resources = super().get_available_resource(nodename)
+ resources['uuid'] = nova.virt.node.get_local_node_uuid()
+ return resources
+
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {
+ nova.virt.node.get_local_node_uuid(): self.get_available_nodes()[0]
+ }
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 96a7198db2..c8f8bb2481 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -1213,10 +1213,13 @@ def _check_for_mem_encryption_requirement_conflicts(
"image %(image_name)s which has hw_mem_encryption property "
"explicitly set to %(image_val)s"
)
+ # image_meta.name is not set if image object represents root
+ # Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {
'flavor_name': flavor.name,
'flavor_val': flavor_mem_enc_str,
- 'image_name': image_meta.name,
+ 'image_name': image_name,
'image_val': image_mem_enc,
}
raise exception.FlavorImageConflict(emsg % data)
@@ -1228,10 +1231,15 @@ def _check_mem_encryption_uses_uefi_image(requesters, image_meta):
emsg = _(
"Memory encryption requested by %(requesters)s but image "
- "%(image_name)s doesn't have 'hw_firmware_type' property set to 'uefi'"
+ "%(image_name)s doesn't have 'hw_firmware_type' property set to "
+ "'uefi' or volume-backed instance was requested"
)
+ # image_meta.name is not set if image object represents root Cinder
+ # volume, for this case FlavorImageConflict should be raised, but
+ # image_meta.name can't be extracted.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {'requesters': " and ".join(requesters),
- 'image_name': image_meta.name}
+ 'image_name': image_name}
raise exception.FlavorImageConflict(emsg % data)
@@ -1260,12 +1268,14 @@ def _check_mem_encryption_machine_type(image_meta, machine_type=None):
if mach_type is None:
return
+ # image_meta.name is not set if image object represents root Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
# Could be something like pc-q35-2.11 if a specific version of the
# machine type is required, so do substring matching.
if 'q35' not in mach_type:
raise exception.InvalidMachineType(
mtype=mach_type,
- image_id=image_meta.id, image_name=image_meta.name,
+ image_id=image_meta.id, image_name=image_name,
reason=_("q35 type is required for SEV to work"))
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index b437b4a959..77fefb81ea 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -839,8 +839,12 @@ class IronicDriver(virt_driver.ComputeDriver):
return node_uuids
- def get_available_node_uuids(self, refresh=False):
- return self.get_available_nodes(refresh=refresh)
+ def get_nodenames_by_uuid(self, refresh=False):
+ nodes = self.get_available_nodes(refresh=refresh)
+ # We use the uuid for compute_node.uuid and
+ # compute_node.hypervisor_hostname, so the dict keys and values are
+ # the same.
+ return dict(zip(nodes, nodes))
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider and
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 0db2dc6b67..231283b8dd 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -2047,6 +2047,12 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
self.keymap = None
self.listen = None
+ self.image_compression = None
+ self.jpeg_compression = None
+ self.zlib_compression = None
+ self.playback_compression = None
+ self.streaming_mode = None
+
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
@@ -2057,6 +2063,24 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
if self.listen:
dev.set("listen", self.listen)
+ if self.type == "spice":
+ if self.image_compression is not None:
+ dev.append(etree.Element(
+ 'image', compression=self.image_compression))
+ if self.jpeg_compression is not None:
+ dev.append(etree.Element(
+ 'jpeg', compression=self.jpeg_compression))
+ if self.zlib_compression is not None:
+ dev.append(etree.Element(
+ 'zlib', compression=self.zlib_compression))
+ if self.playback_compression is not None:
+ dev.append(etree.Element(
+ 'playback', compression=self.get_on_off_str(
+ self.playback_compression)))
+ if self.streaming_mode is not None:
+ dev.append(etree.Element(
+ 'streaming', mode=self.streaming_mode))
+
return dev
diff --git a/nova/virt/libvirt/cpu/__init__.py b/nova/virt/libvirt/cpu/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/virt/libvirt/cpu/__init__.py
diff --git a/nova/virt/libvirt/cpu/api.py b/nova/virt/libvirt/cpu/api.py
new file mode 100644
index 0000000000..1c17458d6b
--- /dev/null
+++ b/nova/virt/libvirt/cpu/api.py
@@ -0,0 +1,157 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dataclasses import dataclass
+
+from oslo_log import log as logging
+
+import nova.conf
+from nova import exception
+from nova.i18n import _
+from nova import objects
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import core
+
+LOG = logging.getLogger(__name__)
+
+CONF = nova.conf.CONF
+
+
+@dataclass
+class Core:
+ """Class to model a CPU core as reported by sysfs.
+
+ It may be a physical CPU core or a hardware thread on a shared CPU core
+ depending on if the system supports SMT.
+ """
+
+ # NOTE(sbauza): ident is a mandatory field.
+ # The CPU core id/number
+ ident: int
+
+ @property
+ def online(self) -> bool:
+ return core.get_online(self.ident)
+
+ @online.setter
+ def online(self, state: bool) -> None:
+ if state:
+ core.set_online(self.ident)
+ else:
+ core.set_offline(self.ident)
+
+ def __hash__(self):
+ return hash(self.ident)
+
+ def __eq__(self, other):
+ return self.ident == other.ident
+
+ def __str__(self):
+ return str(self.ident)
+
+ @property
+ def governor(self) -> str:
+ return core.get_governor(self.ident)
+
+ def set_high_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_high)
+
+ def set_low_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_low)
+
+
+def power_up(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_up = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = True
+ else:
+ pcpu.set_high_governor()
+ powered_up.add(str(pcpu))
+ LOG.debug("Cores powered up : %s", powered_up)
+
+
+def power_down(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_down = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ powered_down.add(str(pcpu))
+ LOG.debug("Cores powered down : %s", powered_down)
+
+
+def power_down_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if (CONF.libvirt.cpu_power_management and
+ not CONF.compute.cpu_dedicated_set
+ ):
+ msg = _("'[compute]/cpu_dedicated_set' is mandatory to be set if "
+ "'[libvirt]/cpu_power_management' is set."
+ "Please provide the CPUs that can be pinned or don't use the "
+ "power management if you only use shared CPUs.")
+ raise exception.InvalidConfiguration(msg)
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ LOG.debug("Cores powered down : %s", cpu_dedicated_set)
+
+
+def validate_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ governors = set()
+ cpu_states = set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ # we need to collect the governors strategy and the CPU states
+ governors.add(pcpu.governor)
+ cpu_states.add(pcpu.online)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ # all the cores need to have the same governor strategy
+ if len(governors) > 1:
+ msg = _("All the cores need to have the same governor strategy"
+ "before modifying the CPU states. You can reboot the "
+ "compute node if you prefer.")
+ raise exception.InvalidConfiguration(msg)
+ elif CONF.libvirt.cpu_power_management_strategy == 'governor':
+ # all the cores need to be online
+ if False in cpu_states:
+ msg = _("All the cores need to be online before modifying the "
+ "governor strategy.")
+ raise exception.InvalidConfiguration(msg)
diff --git a/nova/virt/libvirt/cpu/core.py b/nova/virt/libvirt/cpu/core.py
new file mode 100644
index 0000000000..782f028fee
--- /dev/null
+++ b/nova/virt/libvirt/cpu/core.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import typing as ty
+
+from oslo_log import log as logging
+
+from nova import exception
+from nova import filesystem
+import nova.privsep
+from nova.virt import hardware
+
+LOG = logging.getLogger(__name__)
+
+AVAILABLE_PATH = '/sys/devices/system/cpu/present'
+
+CPU_PATH_TEMPLATE = '/sys/devices/system/cpu/cpu%(core)s'
+
+
+def get_available_cores() -> ty.Set[int]:
+ cores = filesystem.read_sys(AVAILABLE_PATH)
+ return hardware.parse_cpu_spec(cores) if cores else set()
+
+
+def exists(core: int) -> bool:
+ return core in get_available_cores()
+
+
+def gen_cpu_path(core: int) -> str:
+ if not exists(core):
+ LOG.warning('Unable to access CPU: %s', core)
+ raise ValueError('CPU: %(core)s does not exist', core)
+ return CPU_PATH_TEMPLATE % {'core': core}
+
+
+def get_online(core: int) -> bool:
+ try:
+ online = filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'online')).strip()
+ except exception.FileNotFound:
+ # The online file may not exist if we haven't written it yet.
+ # By default, this means that the CPU is online.
+ online = '1'
+ return online == '1'
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_online(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='1')
+ return get_online(core)
+
+
+def set_offline(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='0')
+ return not get_online(core)
+
+
+def get_governor(core: int) -> str:
+ return filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor')).strip()
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_governor(core: int, governor: str) -> str:
+ filesystem.write_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor'),
+ data=governor)
+ return get_governor(core)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 4b34759bb7..fe48960296 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -114,6 +114,7 @@ from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt.cpu import api as libvirt_cpu
from nova.virt.libvirt import designer
from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
@@ -817,6 +818,18 @@ class LibvirtDriver(driver.ComputeDriver):
"force_raw_images to True.")
raise exception.InvalidConfiguration(msg)
+ # NOTE(sbauza): We verify first if the dedicated CPU performances were
+ # modified by Nova before. Note that it can provide an exception if
+ # either the governor strategies are different between the cores or if
+ # the cores are offline.
+ libvirt_cpu.validate_all_dedicated_cpus()
+ # NOTE(sbauza): We powerdown all dedicated CPUs but if some instances
+ # exist that are pinned for some CPUs, then we'll later powerup those
+ # CPUs when rebooting the instance in _init_instance()
+ # Note that it can provide an exception if the config options are
+ # wrongly modified.
+ libvirt_cpu.power_down_all_dedicated_cpus()
+
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
self._recreate_assigned_mediated_devices()
@@ -1512,6 +1525,8 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
+ # We're sure the instance is gone, we can shutdown the core if so
+ libvirt_cpu.power_down(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, destroy_secrets=True):
@@ -3164,6 +3179,7 @@ class LibvirtDriver(driver.ComputeDriver):
current_power_state = guest.get_power_state(self._host)
+ libvirt_cpu.power_up(instance)
# TODO(stephenfin): Any reason we couldn't use 'self.resume' here?
guest.launch(pause=current_power_state == power_state.PAUSED)
@@ -7300,6 +7316,11 @@ class LibvirtDriver(driver.ComputeDriver):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.listen = CONF.spice.server_listen
+ graphics.image_compression = CONF.spice.image_compression
+ graphics.jpeg_compression = CONF.spice.jpeg_compression
+ graphics.zlib_compression = CONF.spice.zlib_compression
+ graphics.playback_compression = CONF.spice.playback_compression
+ graphics.streaming_mode = CONF.spice.streaming_mode
guest.add_device(graphics)
add_video_driver = True
@@ -7615,7 +7636,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance: 'objects.Instance',
power_on: bool = True,
pause: bool = False,
- post_xml_callback: ty.Callable = None,
+ post_xml_callback: ty.Optional[ty.Callable] = None,
) -> libvirt_guest.Guest:
"""Create a Guest from XML.
@@ -7641,6 +7662,7 @@ class LibvirtDriver(driver.ComputeDriver):
post_xml_callback()
if power_on or pause:
+ libvirt_cpu.power_up(instance)
guest.launch(pause=pause)
return guest
@@ -7675,7 +7697,7 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info: ty.Optional[ty.Dict[str, ty.Any]],
power_on: bool = True,
vifs_already_plugged: bool = False,
- post_xml_callback: ty.Callable = None,
+ post_xml_callback: ty.Optional[ty.Callable] = None,
external_events: ty.Optional[ty.List[ty.Tuple[str, str]]] = None,
cleanup_instance_dir: bool = False,
cleanup_instance_disks: bool = False,
@@ -7745,15 +7767,18 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.compute.cpu_dedicated_set:
return set()
- online_cpus = self._host.get_online_cpus()
+ if CONF.libvirt.cpu_power_management:
+ available_cpus = self._host.get_available_cpus()
+ else:
+ available_cpus = self._host.get_online_cpus()
dedicated_cpus = hardware.get_cpu_dedicated_set()
- if not dedicated_cpus.issubset(online_cpus):
+ if not dedicated_cpus.issubset(available_cpus):
msg = _("Invalid '[compute] cpu_dedicated_set' config: one or "
- "more of the configured CPUs is not online. Online "
- "cpuset(s): %(online)s, configured cpuset(s): %(req)s")
+ "more of the configured CPUs is not available. Available "
+ "cpuset(s): %(available)s, configured cpuset(s): %(req)s")
raise exception.Invalid(msg % {
- 'online': sorted(online_cpus),
+ 'available': sorted(available_cpus),
'req': sorted(dedicated_cpus)})
return dedicated_cpus
@@ -11332,8 +11357,8 @@ class LibvirtDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
- def get_available_node_uuids(self, refresh=False):
- return [self._host.get_node_uuid()]
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {self._host.get_node_uuid(): self._host.get_hostname()}
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
diff --git a/nova/virt/libvirt/event.py b/nova/virt/libvirt/event.py
index a7d2a3624f..56951dc11c 100644
--- a/nova/virt/libvirt/event.py
+++ b/nova/virt/libvirt/event.py
@@ -9,6 +9,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import typing as ty
+
from nova.virt import event
@@ -22,7 +24,10 @@ class LibvirtEvent(event.InstanceEvent):
class DeviceEvent(LibvirtEvent):
"""Base class for device related libvirt events"""
- def __init__(self, uuid: str, dev: str, timestamp: float = None):
+ def __init__(self,
+ uuid: str,
+ dev: str,
+ timestamp: ty.Optional[float] = None):
super().__init__(uuid, timestamp)
self.dev = dev
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index b986702401..9658a5791d 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -740,6 +740,14 @@ class Host(object):
return doms
+ def get_available_cpus(self):
+ """Get the set of CPUs that exist on the host.
+
+ :returns: set of CPUs, raises libvirtError on error
+ """
+ cpus, cpu_map, online = self.get_connection().getCPUMap()
+ return {cpu for cpu in range(cpus)}
+
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index adb2ec45a1..e1298ee5c8 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -261,8 +261,8 @@ def copy_image(
dest: str,
host: ty.Optional[str] = None,
receive: bool = False,
- on_execute: ty.Callable = None,
- on_completion: ty.Callable = None,
+ on_execute: ty.Optional[ty.Callable] = None,
+ on_completion: ty.Optional[ty.Callable] = None,
compression: bool = True,
) -> None:
"""Copy a disk image to an existing directory
@@ -639,7 +639,7 @@ def mdev_name2uuid(mdev_name: str) -> str:
return str(uuid.UUID(mdev_uuid))
-def mdev_uuid2name(mdev_uuid: str, parent: str = None) -> str:
+def mdev_uuid2name(mdev_uuid: str, parent: ty.Optional[str] = None) -> str:
"""Convert an mdev uuid (of the form 8-4-4-4-12) and optionally its parent
device to a name (of the form mdev_<uuid_with_underscores>[_<pciid>]).
diff --git a/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
new file mode 100644
index 0000000000..b370889171
--- /dev/null
+++ b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - |
+ The following SPICE-related options are added to the ``spice``
+ configuration group of a Nova configuration:
+
+ - ``image_compression``
+ - ``jpeg_compression``
+ - ``zlib_compression``
+ - ``playback_compression``
+ - ``streaming_mode``
+
+ These configuration options can be used to enable and set the
+ SPICE compression settings for libvirt (QEMU/KVM) provisioned
+ instances. Each configuration option is optional and can be set
+ explictly to configure the associated SPICE compression setting
+ for libvirt. If all configuration options are not set, then none
+ of the SPICE compression settings will be configured for libvirt,
+ which corresponds to the behavior before this change. In this case,
+ the built-in defaults from the libvirt backend (e.g. QEMU) are used.
+
+ Note that those options are only taken into account if SPICE support
+ is enabled (and the VNC support is disabled).
diff --git a/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml b/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml
new file mode 100644
index 0000000000..66890684af
--- /dev/null
+++ b/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml
@@ -0,0 +1,51 @@
+---
+prelude: |
+ The OpenStack 2023.1 (Nova 27.0.0) release includes many new features and
+ bug fixes. Please be sure to read the upgrade section which describes the
+ required actions to upgrade your cloud from 26.0.0 (Zed) to 27.0.0 (2023.1).
+ As a reminder, OpenStack 2023.1 is our first `Skip-Level-Upgrade Release`__
+ (starting from now, we name it a `SLURP release`) where you can
+ rolling-upgrade your compute services from OpenStack Yoga as an experimental
+ feature. Next SLURP release will be 2024.1.
+
+ .. __: https://governance.openstack.org/tc/resolutions/20220210-release-cadence-adjustment.html
+
+ There are a few major changes worth mentioning. This is not an exhaustive
+ list:
+
+ - The latest Compute API microversion supported for 2023.1 is `v2.95`__.
+
+ .. __: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-2023.1
+
+ - `PCI devices can now be scheduled <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ by Nova using the Placement API on a opt-in basis. This will help the
+ nova-scheduler service to better schedule flavors that use PCI
+ (non-Neutron related) resources, will generate less reschedules if an
+ instance cannot be created on a candidate and will help the nova-scheduler
+ to not miss valid candidates if the list was too large.
+
+ - Operators can now ask Nova to `manage the power consumption of dedicated
+ CPUs <https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#configuring-cpu-power-management-for-dedicated-cores>`_
+ so as to either offline them or change their governor if they're
+ currently not in use by any instance or if the instance is stopped.
+
+ - Nova will prevent unexpected compute service renames by `persisting a unique
+ compute UUID on local disk <https://docs.openstack.org/nova/latest/admin/compute-node-identification.html>`_.
+ This stored UUID will be considered the source of truth for knowing whether
+ the compute service hostame has been modified or not. As a reminder,
+ changing a compute hostname is forbidden, particularly when this compute is
+ currently running instances on top of it.
+
+ - `SPICE consoles <https://docs.openstack.org/nova/latest/admin/remote-console-access.html#spice-console>`_
+ can now be configured with compression settings which include choices of the
+ compression algorithm and the compression mode.
+
+ - Fully-Qualified Domain Names are now considered valid for an instance
+ hostname if you use the 2.94 API microversion.
+
+ - By opting into 2.95 API microversion, evacuated instances will remain
+ stopped on the destination host until manually started.
+
+ - Nova APIs now `by default support new RBAC policies <https://docs.openstack.org/nova/latest/configuration/policy.html>`
+ and scopes. See our `Policy Concepts documention <https://docs.openstack.org/nova/latest/configuration/policy-concepts.html>`
+ for further details.
diff --git a/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
new file mode 100644
index 0000000000..95422fce67
--- /dev/null
+++ b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
@@ -0,0 +1,18 @@
+---
+features:
+ - |
+ This is now possible to configure nova-compute services using libvirt driver
+ by setting ``[libvirt]cpu_power_management`` to ``True`` in order to let the
+ service to powering down or up physical CPUs depending on whether those CPUs
+ are pinned or not to instances. In order on to support this feature, the
+ compute service needs to be set with ``[compute]cpu_dedicated_set``. If so,
+ all the related CPUs will be powering down until they are used by an
+ instance where the related pinned CPU will be powering up just before
+ starting the guest. If ``[compute]cpu_dedicated_set`` isn't set, then the
+ compute service will refuse to start.
+ By default the power strategy will offline CPUs when powering down and
+ online the CPUs on powering up but another strategy is possible by using
+ ``[libvirt]cpu_power_management_strategy=governor`` which will rather modify
+ the related CPU governor using ``[libvirt]cpu_power_governor_low`` and
+ ``[libvirt]cpu_power_governor_high`` configuration values (respective
+ defaults being ``powersave`` and ``performance``)
diff --git a/releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml b/releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml
new file mode 100644
index 0000000000..7e2dccbbf4
--- /dev/null
+++ b/releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ [`bug 1983471 <https://bugs.launchpad.net/nova/+bug/1983471>`_]
+ When offloading a shelved instance, the compute will now remove the
+ binding so instance ports will appear as "unbound" in neutron.
diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst
new file mode 100644
index 0000000000..d1238479ba
--- /dev/null
+++ b/releasenotes/source/2023.1.rst
@@ -0,0 +1,6 @@
+===========================
+2023.1 Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/2023.1
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 6bff00e25a..ed6f8c2d07 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@ Nova Release Notes
:maxdepth: 1
unreleased
+ 2023.1
zed
yoga
xena
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index d90391af7c..c0bd8bc9a8 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -2,15 +2,16 @@
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
# Andi Chandler <andi@gowling.com>, 2022. #zanata
+# Andi Chandler <andi@gowling.com>, 2023. #zanata
msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-09-16 12:59+0000\n"
+"POT-Creation-Date: 2023-03-06 19:02+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-09-15 09:04+0000\n"
+"PO-Revision-Date: 2023-01-26 10:17+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -382,9 +383,6 @@ msgstr "20.5.0"
msgid "20.6.1"
msgstr "20.6.1"
-msgid "20.6.1-29"
-msgstr "20.6.1-29"
-
msgid "204 NoContent on success"
msgstr "204 NoContent on success"
@@ -409,9 +407,6 @@ msgstr "21.2.2"
msgid "21.2.3"
msgstr "21.2.3"
-msgid "21.2.4-12"
-msgstr "21.2.4-12"
-
msgid "22.0.0"
msgstr "22.0.0"
@@ -433,9 +428,6 @@ msgstr "22.3.0"
msgid "22.4.0"
msgstr "22.4.0"
-msgid "22.4.0-6"
-msgstr "22.4.0-6"
-
msgid "23.0.0"
msgstr "23.0.0"
@@ -451,8 +443,8 @@ msgstr "23.2.0"
msgid "23.2.1"
msgstr "23.2.1"
-msgid "23.2.1-13"
-msgstr "23.2.1-13"
+msgid "23.2.2"
+msgstr "23.2.2"
msgid "24.0.0"
msgstr "24.0.0"
@@ -463,8 +455,8 @@ msgstr "24.1.0"
msgid "24.1.1"
msgstr "24.1.1"
-msgid "24.1.1-7"
-msgstr "24.1.1-7"
+msgid "24.2.0"
+msgstr "24.2.0"
msgid "25.0.0"
msgstr "25.0.0"
@@ -472,8 +464,14 @@ msgstr "25.0.0"
msgid "25.0.1"
msgstr "25.0.1"
-msgid "25.0.1-5"
-msgstr "25.0.1-5"
+msgid "25.1.0"
+msgstr "25.1.0"
+
+msgid "26.0.0"
+msgstr "26.0.0"
+
+msgid "26.1.0"
+msgstr "26.1.0"
msgid "400 for unknown param for query param and for request body."
msgstr "400 for unknown param for query param and for request body."
@@ -488,6 +486,24 @@ msgid "409 Conflict if inventory in use or if some other request concurrently"
msgstr "409 Conflict if inventory in use or if some other request concurrently"
msgid ""
+"A ``--dry-run`` option has been added to the ``nova-manage placement "
+"heal_allocations`` CLI which allows running the command to get output "
+"without committing any changes to placement."
+msgstr ""
+"A ``--dry-run`` option has been added to the ``nova-manage placement "
+"heal_allocations`` CLI which allows running the command to get output "
+"without committing any changes to placement."
+
+msgid ""
+"A ``--force`` flag is provided to skip the above checks but caution should "
+"be taken as this could easily lead to the underlying ABI of the instance "
+"changing when moving between machine types."
+msgstr ""
+"A ``--force`` flag is provided to skip the above checks but caution should "
+"be taken as this could easily lead to the underlying ABI of the instance "
+"changing when moving between machine types."
+
+msgid ""
"A ``default_floating_pool`` configuration option has been added in the "
"``[neutron]`` group. The existing ``default_floating_pool`` option in the "
"``[DEFAULT]`` group is retained and should be used by nova-network users. "
@@ -571,6 +587,24 @@ msgid "Stein Series Release Notes"
msgstr "Stein Series Release Notes"
msgid ""
+"The XenServer configuration option 'iqn_prefix' has been removed. It was not "
+"used anywhere and has no effect on any code, so there should be no impact."
+msgstr ""
+"The XenServer configuration option 'iqn_prefix' has been removed. It was not "
+"used anywhere and has no effect on any code, so there should be no impact."
+
+msgid ""
+"The ``api_rate_limit`` configuration option has been removed. The option was "
+"disabled by default back in the Havana release since it's effectively broken "
+"for more than one API worker. It has been removed because the legacy v2 API "
+"code that was using it has also been removed."
+msgstr ""
+"The ``api_rate_limit`` configuration option has been removed. The option was "
+"disabled by default back in the Havana release since it's effectively broken "
+"for more than one API worker. It has been removed because the legacy v2 API "
+"code that was using it has also been removed."
+
+msgid ""
"The ``nova-manage vm list`` command is deprecated and will be removed in the "
"15.0.0 Ocata release. Use the ``nova list`` command from python-novaclient "
"instead."
@@ -580,6 +614,24 @@ msgstr ""
"instead."
msgid ""
+"The default flavors that nova has previously had are no longer created as "
+"part of the first database migration. New deployments will need to create "
+"appropriate flavors before first use."
+msgstr ""
+"The default flavours that Nova previously had are no longer created as part "
+"of the first database migration. New deployments will need to create "
+"appropriate flavours before first use."
+
+msgid ""
+"The network configuration option 'fake_call' has been removed. It hasn't "
+"been used for several cycles, and has no effect on any code, so there should "
+"be no impact."
+msgstr ""
+"The network configuration option 'fake_call' has been removed. It hasn't "
+"been used for several cycles, and has no effect on any code, so there should "
+"be no impact."
+
+msgid ""
"These commands only work with nova-network which is itself deprecated in "
"favor of Neutron."
msgstr ""
@@ -611,6 +663,9 @@ msgstr "Xena Series Release Notes"
msgid "Yoga Series Release Notes"
msgstr "Yoga Series Release Notes"
+msgid "Zed Series Release Notes"
+msgstr "Zed Series Release Notes"
+
msgid "kernels 3.x: 8"
msgstr "kernels 3.x: 8"
diff --git a/tox.ini b/tox.ini
index 097edbe827..77c0d9b9d3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -42,7 +42,7 @@ commands =
env TEST_OSPROFILER=1 stestr run --combine --no-discover 'nova.tests.unit.test_profiler'
stestr slowest
-[testenv:functional{,-py38,-py39,-py310}]
+[testenv:functional{,-py38,-py39,-py310,-py311}]
description =
Run functional tests.
# As nova functional tests import the PlacementFixture from the placement