From 5d7e747d06d85ac64fa349435bf928c900f25ab4 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Wed, 1 Mar 2023 17:14:02 -0500 Subject: sources/azure: fix regressions in IMDS behavior (#2041) There are effectively two regressions in the recent IMDS refactor: 1. The metadata check len(imds_md["interface"]) in _check_if_nic_is_primary() is no longer correct as the refactor switched URLs and did not update this call to account for the fact that this metadata now lives under "network". 2. Network metadata was fetched with infinite=True and is now limited to ten retries. This callback had the twist of only allowing up to ten connection errors but otherwise would retry indefinetely. For check_if_nic_is_primary(): - Drop the interface count check for _check_if_nic_is_primary(), we don't need it anyways. - Fix/update the unit tests mocks that allowed the tests to pass, adding another test to verify max retries for http and connection errors. - Use 300 retries. We do want to hit a case where we spin forever, but this should be more than enough time for IMDS to respond in the Savable PPS case (~5 minutes). For IMDS: - Consolidate IMDS retry handlers into a new ReadUrlRetryHandler class that supports the options required for each variant of request. - Minor tweaks to log and expand logging checks in unit tests. - Move all unit tests to mocking via mock_requests_session_request and replace mock_readurl fixture with wrapped_readurl to improve consistency between tests. Note that this change drops usage of `retry_on_url_exc` and can probably be removed altogether as it is no longer used AFAICT. Signed-off-by: Chris Patterson --- cloudinit/sources/DataSourceAzure.py | 28 +-- cloudinit/sources/azure/imds.py | 131 ++++++++------ tests/unittests/sources/azure/test_imds.py | 282 ++++++++++++++++++++++------- tests/unittests/sources/test_azure.py | 93 +++++----- 4 files changed, 343 insertions(+), 191 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b7d3e5a3..87f718bf 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -680,9 +680,9 @@ class DataSourceAzure(sources.DataSource): return crawled_data @azure_ds_telemetry_reporter - def get_metadata_from_imds(self) -> Dict: + def get_metadata_from_imds(self, retries: int = 10) -> Dict: try: - return imds.fetch_metadata_with_api_fallback() + return imds.fetch_metadata_with_api_fallback(retries=retries) except (UrlError, ValueError) as error: report_diagnostic_event( "Ignoring IMDS metadata due to: %s" % error, @@ -979,11 +979,8 @@ class DataSourceAzure(sources.DataSource): raise BrokenAzureDataSource("Shutdown failure for PPS disk.") @azure_ds_telemetry_reporter - def _check_if_nic_is_primary(self, ifname): - """Check if a given interface is the primary nic or not. If it is the - primary nic, then we also get the expected total nic count from IMDS. - IMDS will process the request and send a response only for primary NIC. - """ + def _check_if_nic_is_primary(self, ifname: str) -> bool: + """Check if a given interface is the primary nic or not.""" # For now, only a VM's primary NIC can contact IMDS and WireServer. If # DHCP fails for a NIC, we have no mechanism to determine if the NIC is # primary or secondary. In this case, retry DHCP until successful. @@ -992,18 +989,11 @@ class DataSourceAzure(sources.DataSource): # Primary nic detection will be optimized in the future. The fact that # primary nic is being attached first helps here. Otherwise each nic # could add several seconds of delay. - imds_md = self.get_metadata_from_imds() + imds_md = self.get_metadata_from_imds(retries=300) if imds_md: # Only primary NIC will get a response from IMDS. LOG.info("%s is the primary nic", ifname) - - # Set the expected nic count based on the response received. - expected_nic_count = len(imds_md["interface"]) - report_diagnostic_event( - "Expected nic count: %d" % expected_nic_count, - logger_func=LOG.info, - ) - return True, expected_nic_count + return True # If we are not the primary nic, then clean the dhcp context. LOG.warning( @@ -1012,7 +1002,7 @@ class DataSourceAzure(sources.DataSource): ifname, ) self._teardown_ephemeral_networking() - return False, -1 + return False @azure_ds_telemetry_reporter def _wait_for_hot_attached_primary_nic(self, nl_sock): @@ -1055,9 +1045,7 @@ class DataSourceAzure(sources.DataSource): # won't be in primary_nic_found = false state for long. if not primary_nic_found: LOG.info("Checking if %s is the primary nic", ifname) - primary_nic_found, _ = self._check_if_nic_is_primary( - ifname - ) + primary_nic_found = self._check_if_nic_is_primary(ifname) # Exit criteria: check if we've discovered primary nic if primary_nic_found: diff --git a/cloudinit/sources/azure/imds.py b/cloudinit/sources/azure/imds.py index 54fc9a05..5e4046d0 100644 --- a/cloudinit/sources/azure/imds.py +++ b/cloudinit/sources/azure/imds.py @@ -2,7 +2,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import functools from typing import Dict import requests @@ -10,25 +9,69 @@ import requests from cloudinit import log as logging from cloudinit import util from cloudinit.sources.helpers.azure import report_diagnostic_event -from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc +from cloudinit.url_helper import UrlError, readurl LOG = logging.getLogger(__name__) IMDS_URL = "http://169.254.169.254/metadata" -_readurl_exception_callback = functools.partial( - retry_on_url_exc, - retry_codes=( - 404, # not found (yet) - 410, # gone / unavailable (yet) - 429, # rate-limited/throttled - 500, # server error - ), - retry_instances=( - requests.ConnectionError, - requests.Timeout, - ), -) + +class ReadUrlRetryHandler: + def __init__( + self, + *, + retry_codes=( + 404, # not found (yet) + 410, # gone / unavailable (yet) + 429, # rate-limited/throttled + 500, # server error + ), + max_connection_errors: int = 10, + logging_backoff: float = 1.0, + ) -> None: + self.logging_backoff = logging_backoff + self.max_connection_errors = max_connection_errors + self.retry_codes = retry_codes + self._logging_threshold = 1.0 + self._request_count = 0 + + def exception_callback(self, req_args, exception) -> bool: + self._request_count += 1 + if not isinstance(exception, UrlError): + report_diagnostic_event( + "Polling IMDS failed with unexpected exception: %r" + % (exception), + logger_func=LOG.warning, + ) + return False + + log = True + retry = True + + # Check for connection errors which may occur early boot, but + # are otherwise indicative that we are not connecting with the + # primary NIC. + if isinstance( + exception.cause, (requests.ConnectionError, requests.Timeout) + ): + self.max_connection_errors -= 1 + if self.max_connection_errors < 0: + retry = False + elif exception.code not in self.retry_codes: + retry = False + + if self._request_count >= self._logging_threshold: + self._logging_threshold *= self.logging_backoff + else: + log = False + + if log or not retry: + report_diagnostic_event( + "Polling IMDS failed attempt %d with exception: %r" + % (self._request_count, exception), + logger_func=LOG.info, + ) + return retry def _fetch_url( @@ -38,11 +81,12 @@ def _fetch_url( :raises UrlError: on error fetching metadata. """ + handler = ReadUrlRetryHandler() try: response = readurl( url, - exception_cb=_readurl_exception_callback, + exception_cb=handler.exception_callback, headers={"Metadata": "true"}, infinite=False, log_req_resp=log_response, @@ -61,13 +105,14 @@ def _fetch_url( def _fetch_metadata( url: str, + retries: int = 10, ) -> Dict: """Fetch IMDS metadata. :raises UrlError: on error fetching metadata. :raises ValueError: on error parsing metadata. """ - metadata = _fetch_url(url) + metadata = _fetch_url(url, retries=retries) try: return util.load_json(metadata) @@ -79,7 +124,7 @@ def _fetch_metadata( raise -def fetch_metadata_with_api_fallback() -> Dict: +def fetch_metadata_with_api_fallback(retries: int = 10) -> Dict: """Fetch extended metadata, falling back to non-extended as required. :raises UrlError: on error fetching metadata. @@ -87,7 +132,7 @@ def fetch_metadata_with_api_fallback() -> Dict: """ try: url = IMDS_URL + "/instance?api-version=2021-08-01&extended=true" - return _fetch_metadata(url) + return _fetch_metadata(url, retries=retries) except UrlError as error: if error.code == 400: report_diagnostic_event( @@ -95,7 +140,7 @@ def fetch_metadata_with_api_fallback() -> Dict: logger_func=LOG.warning, ) url = IMDS_URL + "/instance?api-version=2019-06-01" - return _fetch_metadata(url) + return _fetch_metadata(url, retries=retries) raise @@ -106,43 +151,17 @@ def fetch_reprovision_data() -> bytes: """ url = IMDS_URL + "/reprovisiondata?api-version=2019-06-01" - logging_threshold = 1 - poll_counter = 0 - - def exception_callback(msg, exception): - nonlocal logging_threshold - nonlocal poll_counter - - poll_counter += 1 - if not isinstance(exception, UrlError): - report_diagnostic_event( - "Polling IMDS failed with unexpected exception: %r" - % (exception), - logger_func=LOG.warning, - ) - return False - - log = True - retry = False - if exception.code in (404, 410): - retry = True - if poll_counter >= logging_threshold: - # Exponential back-off on logging. - logging_threshold *= 2 - else: - log = False - - if log: - report_diagnostic_event( - "Polling IMDS failed with exception: %r count: %d" - % (exception, poll_counter), - logger_func=LOG.info, - ) - return retry - + handler = ReadUrlRetryHandler( + logging_backoff=2.0, + max_connection_errors=0, + retry_codes=( + 404, + 410, + ), + ) response = readurl( url, - exception_cb=exception_callback, + exception_cb=handler.exception_callback, headers={"Metadata": "true"}, infinite=True, log_req_resp=False, @@ -150,7 +169,7 @@ def fetch_reprovision_data() -> bytes: ) report_diagnostic_event( - f"Polled IMDS {poll_counter+1} time(s)", + f"Polled IMDS {handler._request_count+1} time(s)", logger_func=LOG.debug, ) return response.contents diff --git a/tests/unittests/sources/azure/test_imds.py b/tests/unittests/sources/azure/test_imds.py index b5a72645..03f66502 100644 --- a/tests/unittests/sources/azure/test_imds.py +++ b/tests/unittests/sources/azure/test_imds.py @@ -3,20 +3,30 @@ import json import logging import math +import re from unittest import mock import pytest import requests from cloudinit.sources.azure import imds -from cloudinit.url_helper import UrlError +from cloudinit.url_helper import UrlError, readurl -MOCKPATH = "cloudinit.sources.azure.imds." +LOG_PATH = "cloudinit.sources.azure.imds" +MOCK_PATH = "cloudinit.sources.azure.imds." + + +class StringMatch: + def __init__(self, regex) -> None: + self.regex = regex + + def __eq__(self, other) -> bool: + return bool(re.match("^" + self.regex + "$", other)) @pytest.fixture -def mock_readurl(): - with mock.patch(MOCKPATH + "readurl", autospec=True) as m: +def wrapped_readurl(): + with mock.patch.object(imds, "readurl", wraps=readurl) as m: yield m @@ -56,54 +66,63 @@ class TestFetchMetadataWithApiFallback: def test_basic( self, caplog, - mock_readurl, + mock_requests_session_request, + wrapped_readurl, ): fake_md = {"foo": {"bar": []}} - mock_readurl.side_effect = [ - mock.Mock(contents=json.dumps(fake_md).encode()), + mock_requests_session_request.side_effect = [ + mock.Mock(content=json.dumps(fake_md)), ] md = imds.fetch_metadata_with_api_fallback() assert md == fake_md - assert mock_readurl.mock_calls == [ + assert wrapped_readurl.mock_calls == [ mock.call( self.default_url, timeout=self.timeout, headers=self.headers, retries=self.retries, - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, infinite=False, log_req_resp=True, - ), + ) ] - - warnings = [ - x.message for x in caplog.records if x.levelno == logging.WARNING + assert caplog.record_tuples == [ + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"\[0/11\] open.*"), + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch("Read from.*"), + ), ] - assert warnings == [] def test_basic_fallback( self, caplog, - mock_readurl, + mock_requests_session_request, + wrapped_readurl, ): fake_md = {"foo": {"bar": []}} - mock_readurl.side_effect = [ + mock_requests_session_request.side_effect = [ UrlError("No IMDS version", code=400), - mock.Mock(contents=json.dumps(fake_md).encode()), + mock.Mock(content=json.dumps(fake_md)), ] md = imds.fetch_metadata_with_api_fallback() assert md == fake_md - assert mock_readurl.mock_calls == [ + assert wrapped_readurl.mock_calls == [ mock.call( self.default_url, timeout=self.timeout, headers=self.headers, retries=self.retries, - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, infinite=False, log_req_resp=True, ), @@ -112,18 +131,38 @@ class TestFetchMetadataWithApiFallback: timeout=self.timeout, headers=self.headers, retries=self.retries, - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, infinite=False, log_req_resp=True, ), ] - warnings = [ - x.message for x in caplog.records if x.levelno == logging.WARNING - ] - assert warnings == [ - "Failed to fetch metadata from IMDS: No IMDS version", - "Falling back to IMDS api-version: 2019-06-01", + assert caplog.record_tuples == [ + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"\[0/11\] open.*"), + ), + ( + LOG_PATH, + logging.WARNING, + "Failed to fetch metadata from IMDS: No IMDS version", + ), + ( + LOG_PATH, + logging.WARNING, + "Falling back to IMDS api-version: 2019-06-01", + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"\[0/11\] open.*"), + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch("Read from.*"), + ), ] @pytest.mark.parametrize( @@ -155,11 +194,36 @@ class TestFetchMetadataWithApiFallback: assert md == fake_md assert len(mock_requests_session_request.mock_calls) == 2 assert mock_url_helper_time_sleep.mock_calls == [mock.call(1)] - - warnings = [ - x.message for x in caplog.records if x.levelno == logging.WARNING + assert caplog.record_tuples == [ + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"\[0/11\] open.*"), + ), + ( + LOG_PATH, + logging.INFO, + StringMatch( + "Polling IMDS failed attempt 1 with exception:" + f".*{error!s}.*" + ), + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch("Please wait 1 second.*"), + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"\[1/11\] open.*"), + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch("Read from.*"), + ), ] - assert warnings == [] def test_will_retry_errors_on_fallback( self, @@ -180,13 +244,58 @@ class TestFetchMetadataWithApiFallback: assert md == fake_md assert len(mock_requests_session_request.mock_calls) == 3 assert mock_url_helper_time_sleep.mock_calls == [mock.call(1)] - - warnings = [ - x.message for x in caplog.records if x.levelno == logging.WARNING - ] - assert warnings == [ - "Failed to fetch metadata from IMDS: fake error", - "Falling back to IMDS api-version: 2019-06-01", + assert caplog.record_tuples == [ + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"\[0/11\] open.*"), + ), + ( + LOG_PATH, + logging.INFO, + StringMatch( + "Polling IMDS failed attempt 1 with exception:" + f".*{error!s}.*" + ), + ), + ( + LOG_PATH, + logging.WARNING, + "Failed to fetch metadata from IMDS: fake error", + ), + ( + LOG_PATH, + logging.WARNING, + "Falling back to IMDS api-version: 2019-06-01", + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"\[0/11\] open.*"), + ), + ( + LOG_PATH, + logging.INFO, + StringMatch( + "Polling IMDS failed attempt 1 with exception:" + f".*{error!s}.*" + ), + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch("Please wait 1 second.*"), + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"\[1/11\] open.*"), + ), + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch("Read from.*"), + ), ] @pytest.mark.parametrize( @@ -221,10 +330,24 @@ class TestFetchMetadataWithApiFallback: == [mock.call(1)] * self.retries ) - warnings = [ - x.message for x in caplog.records if x.levelno == logging.WARNING + logs = [x for x in caplog.record_tuples if x[0] == LOG_PATH] + assert logs == [ + ( + LOG_PATH, + logging.INFO, + StringMatch( + f"Polling IMDS failed attempt {i} with exception:" + f".*{error!s}.*" + ), + ) + for i in range(1, 12) + ] + [ + ( + LOG_PATH, + logging.WARNING, + f"Failed to fetch metadata from IMDS: {error!s}", + ) ] - assert warnings == [f"Failed to fetch metadata from IMDS: {error!s}"] @pytest.mark.parametrize( "error", @@ -253,30 +376,47 @@ class TestFetchMetadataWithApiFallback: assert len(mock_requests_session_request.mock_calls) == 1 assert mock_url_helper_time_sleep.mock_calls == [] - warnings = [ - x.message for x in caplog.records if x.levelno == logging.WARNING + assert caplog.record_tuples == [ + ( + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"\[0/11\] open.*"), + ), + ( + LOG_PATH, + logging.INFO, + StringMatch( + "Polling IMDS failed attempt 1 with exception:" + f".*{error!s}.*" + ), + ), + ( + LOG_PATH, + logging.WARNING, + f"Failed to fetch metadata from IMDS: {error!s}", + ), ] - assert warnings == [f"Failed to fetch metadata from IMDS: {error!s}"] def test_non_json_repsonse( self, caplog, - mock_readurl, + mock_requests_session_request, + wrapped_readurl, ): - mock_readurl.side_effect = [ - mock.Mock(contents=b"bad data"), + mock_requests_session_request.side_effect = [ + mock.Mock(content=b"bad data") ] with pytest.raises(ValueError): imds.fetch_metadata_with_api_fallback() - assert mock_readurl.mock_calls == [ + assert wrapped_readurl.mock_calls == [ mock.call( self.default_url, timeout=self.timeout, headers=self.headers, retries=self.retries, - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, infinite=False, log_req_resp=True, ), @@ -304,17 +444,18 @@ class TestFetchReprovisionData: def test_basic( self, caplog, - mock_readurl, + mock_requests_session_request, + wrapped_readurl, ): content = b"ovf content" - mock_readurl.side_effect = [ - mock.Mock(contents=content), + mock_requests_session_request.side_effect = [ + mock.Mock(content=content), ] ovf = imds.fetch_reprovision_data() assert ovf == content - assert mock_readurl.mock_calls == [ + assert wrapped_readurl.mock_calls == [ mock.call( self.url, timeout=self.timeout, @@ -327,10 +468,15 @@ class TestFetchReprovisionData: assert caplog.record_tuples == [ ( - "cloudinit.sources.azure.imds", + "cloudinit.url_helper", + logging.DEBUG, + StringMatch(r"Read from.*"), + ), + ( + LOG_PATH, logging.DEBUG, "Polled IMDS 1 time(s)", - ) + ), ] @pytest.mark.parametrize( @@ -370,10 +516,10 @@ class TestFetchReprovisionData: ) backoff_logs = [ ( - "cloudinit.sources.azure.imds", + LOG_PATH, logging.INFO, - "Polling IMDS failed with exception: " - f"{wrapped_error!r} count: {i}", + f"Polling IMDS failed attempt {i} with exception: " + f"{wrapped_error!r}", ) for i in range(1, failures + 1) if i == 1 or math.log2(i).is_integer() @@ -382,10 +528,10 @@ class TestFetchReprovisionData: ( "cloudinit.url_helper", logging.DEBUG, - mock.ANY, + StringMatch(r"Read from.*"), ), ( - "cloudinit.sources.azure.imds", + LOG_PATH, logging.DEBUG, f"Polled IMDS {failures+1} time(s)", ), @@ -437,20 +583,20 @@ class TestFetchReprovisionData: backoff_logs = [ ( - "cloudinit.sources.azure.imds", + LOG_PATH, logging.INFO, - "Polling IMDS failed with exception: " - f"{wrapped_error!r} count: {i}", + f"Polling IMDS failed attempt {i} with exception: " + f"{wrapped_error!r}", ) for i in range(1, failures + 1) if i == 1 or math.log2(i).is_integer() ] assert caplog.record_tuples == backoff_logs + [ ( - "cloudinit.sources.azure.imds", + LOG_PATH, logging.INFO, - "Polling IMDS failed with exception: " - f"{exc_info.value!r} count: {failures+1}", + f"Polling IMDS failed attempt {failures+1} with exception: " + f"{exc_info.value!r}", ), ] @@ -483,9 +629,9 @@ class TestFetchReprovisionData: assert caplog.record_tuples == [ ( - "cloudinit.sources.azure.imds", + LOG_PATH, logging.INFO, - "Polling IMDS failed with exception: " - f"{exc_info.value!r} count: 1", + "Polling IMDS failed attempt 1 with exception: " + f"{exc_info.value!r}", ), ] diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index b5fe2672..8371d4b8 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -16,7 +16,6 @@ from cloudinit.net import dhcp from cloudinit.sources import UNSET from cloudinit.sources import DataSourceAzure as dsaz from cloudinit.sources import InvalidMetaDataException -from cloudinit.sources.azure import imds from cloudinit.sources.helpers import netlink from cloudinit.util import ( MountFailedError, @@ -299,6 +298,15 @@ def patched_reported_ready_marker_path(azure_ds, patched_markers_dir_path): yield reported_ready_marker +def fake_http_error_for_code(status_code: int): + response_failure = requests.Response() + response_failure.status_code = status_code + return requests.exceptions.HTTPError( + "fake error", + response=response_failure, + ) + + def construct_ovf_env( *, custom_data=None, @@ -2887,36 +2895,38 @@ class TestPreprovisioningHotAttachNics(CiTestCase): "unknown-245": "624c3620", } - # Simulate two NICs by adding the same one twice. - md = { - "interface": [ - IMDS_NETWORK_METADATA["interface"][0], - IMDS_NETWORK_METADATA["interface"][0], - ] - } - - m_req = mock.Mock(content=json.dumps(md)) - m_request.side_effect = [ - requests.Timeout("Fake connection timeout"), - requests.ConnectionError("Fake Network Unreachable"), - m_req, - ] + m_req = mock.Mock(content=json.dumps({"not": "empty"})) + m_request.side_effect = ( + [requests.Timeout("Fake connection timeout")] * 5 + + [requests.ConnectionError("Fake Network Unreachable")] * 5 + + 290 * [fake_http_error_for_code(410)] + + [m_req] + ) m_dhcpv4.return_value.lease = lease - is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0") + is_primary = dsa._check_if_nic_is_primary("eth0") self.assertEqual(True, is_primary) - self.assertEqual(2, expected_nic_count) - assert len(m_request.mock_calls) == 3 + assert len(m_request.mock_calls) == 301 - # Re-run tests to verify max retries. + # Re-run tests to verify max http failures. + m_request.reset_mock() + m_request.side_effect = 305 * [fake_http_error_for_code(410)] + + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + + is_primary = dsa._check_if_nic_is_primary("eth1") + self.assertEqual(False, is_primary) + assert len(m_request.mock_calls) == 301 + + # Re-run tests to verify max connection error retries. m_request.reset_mock() m_request.side_effect = [ requests.Timeout("Fake connection timeout") - ] * 6 + [requests.ConnectionError("Fake Network Unreachable")] * 6 + ] * 9 + [requests.ConnectionError("Fake Network Unreachable")] * 9 dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1") + is_primary = dsa._check_if_nic_is_primary("eth1") self.assertEqual(False, is_primary) assert len(m_request.mock_calls) == 11 @@ -3596,15 +3606,6 @@ class TestEphemeralNetworking: assert azure_ds._ephemeral_dhcp_ctx is None -def fake_http_error_for_code(status_code: int): - response_failure = requests.Response() - response_failure.status_code = status_code - return requests.exceptions.HTTPError( - "fake error", - response=response_failure, - ) - - class TestInstanceId: def test_metadata(self, azure_ds, mock_dmi_read_dmi_data): azure_ds.metadata = {"instance-id": "test-id"} @@ -3709,7 +3710,7 @@ class TestProvisioning: timeout=2, headers={"Metadata": "true"}, retries=10, - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, infinite=False, log_req_resp=True, ), @@ -3769,7 +3770,7 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, @@ -3788,7 +3789,7 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, @@ -3860,9 +3861,7 @@ class TestProvisioning: ) self.mock_readurl.side_effect = [ mock.MagicMock(contents=json.dumps(self.imds_md).encode()), - mock.MagicMock( - contents=json.dumps(self.imds_md["network"]).encode() - ), + mock.MagicMock(contents=json.dumps(self.imds_md).encode()), mock.MagicMock(contents=construct_ovf_env().encode()), mock.MagicMock(contents=json.dumps(self.imds_md).encode()), ] @@ -3874,7 +3873,7 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, @@ -3884,11 +3883,11 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, - retries=10, + retries=300, timeout=2, ), mock.call( @@ -3903,7 +3902,7 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, @@ -4025,7 +4024,7 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, @@ -4035,11 +4034,11 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, - retries=10, + retries=300, timeout=2, ), mock.call( @@ -4054,7 +4053,7 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, @@ -4134,7 +4133,7 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, @@ -4153,7 +4152,7 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, @@ -4215,7 +4214,7 @@ class TestProvisioning: mock.call( "http://169.254.169.254/metadata/instance?" "api-version=2021-08-01&extended=true", - exception_cb=imds._readurl_exception_callback, + exception_cb=mock.ANY, headers={"Metadata": "true"}, infinite=False, log_req_resp=True, -- cgit v1.2.1 From faad547c9c4d9b0202827b9404d736e52cf3b8c0 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 2 Mar 2023 10:19:37 -0700 Subject: source: Force OpenStack when it is only option (#2045) Running on OpenStack Ironic was broken in 1efa8a0a0, which prevented a system configured to run on only Openstack from actually running this ds. This change also prevents the kernel commandline definition from working. This change was required to prevent unnecessarily probing OpenStack on Ec2, and is therefore still required. This commit reverts an earlier attempt[1][2] to automatically detect OpenStack, due to regression it caused. Additionally, this change allows a system that defines a datasource list containing only [OpenStack] or [OpenStack, None] to attempt running on OpenStack, overriding ds_detect(). A datasource list that defines [OpenStack, None] still falls back to DataSourceNone if OpenStack fails to reach the IMDS. This change also lays groundwork for the following future work: 1. Add support for other datasources 2. Also override datasource checking when the kernel command line defines a datasource. This work needs to be done manually to support non-systemd systems. Besides forcing OpenStack to run when it is the only datasource in the datasource list, this commit also: [1] 0220295 (it breaks some use cases) [2] 29faf66 (no longer used) LP: #2008727 --- cloudinit/distros/__init__.py | 31 ------- cloudinit/distros/freebsd.py | 39 +-------- cloudinit/sources/DataSourceOpenStack.py | 39 ++------- cloudinit/sources/__init__.py | 29 ++++++- doc/examples/cloud-config-datasources.txt | 5 -- tests/unittests/distros/test__init__.py | 96 ---------------------- tests/unittests/sources/test_openstack.py | 132 ++++++++++-------------------- tests/unittests/test_ds_identify.py | 7 +- tests/unittests/util.py | 5 -- tools/ds-identify | 7 -- 10 files changed, 78 insertions(+), 312 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 940b689e..12fae0ac 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -992,37 +992,6 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): **kwargs, ) - @property - def is_virtual(self) -> Optional[bool]: - """Detect if running on a virtual machine or bare metal. - - If the detection fails, it returns None. - """ - if not uses_systemd(): - # For non systemd systems the method should be - # implemented in the distro class. - LOG.warning("is_virtual should be implemented on distro class") - return None - - try: - detect_virt_path = subp.which("systemd-detect-virt") - if detect_virt_path: - out, _ = subp.subp( - [detect_virt_path], capture=True, rcs=[0, 1] - ) - - return not out.strip() == "none" - else: - err_msg = "detection binary not found" - except subp.ProcessExecutionError as e: - err_msg = str(e) - - LOG.warning( - "Failed to detect virtualization with systemd-detect-virt: %s", - err_msg, - ) - return None - def _apply_hostname_transformations_to_url(url: str, transformations: list): """ diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 706d0743..4268abe6 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -6,9 +6,7 @@ import os import re -from functools import lru_cache from io import StringIO -from typing import Optional import cloudinit.distros.bsd from cloudinit import log as logging @@ -194,40 +192,5 @@ class Distro(cloudinit.distros.bsd.BSD): freq=PER_INSTANCE, ) - @lru_cache() - def is_container(self) -> bool: - """return whether we're running in a container. - Cached, because it's unlikely to change.""" - jailed, _ = subp.subp(["sysctl", "-n", "security.jail.jailed"]) - if jailed.strip() == "0": - return False - return True - - @lru_cache() - def virtual(self) -> str: - """return the kind of virtualisation system we're running under. - Cached, because it's unlikely to change.""" - if self.is_container(): - return "jail" - # map FreeBSD's kern.vm_guest to systemd-detect-virt, just like we do - # in ds-identify - VM_GUEST_TO_SYSTEMD = { - "hv": "microsoft", - "vbox": "oracle", - "generic": "vm-other", - } - vm, _ = subp.subp(["sysctl", "-n", "kern.vm_guest"]) - vm = vm.strip() - if vm in VM_GUEST_TO_SYSTEMD: - return VM_GUEST_TO_SYSTEMD[vm] - return vm - - @property - def is_virtual(self) -> Optional[bool]: - """Detect if running on a virtual machine or bare metal. - This can fail on some platforms, so the signature is Optional[bool] - """ - if self.virtual() == "none": - return False - return True +# vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 86ed3dd5..6a5012b5 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -73,7 +73,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) return mstr - def wait_for_metadata_service(self, max_wait=None, timeout=None): + def wait_for_metadata_service(self): urls = self.ds_cfg.get("metadata_urls", DEF_MD_URLS) filtered = [x for x in urls if util.is_resolvable_url(x)] if set(filtered) != set(urls): @@ -90,23 +90,16 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls = [] url2base = {} for url in urls: - # Wait for a specific openstack metadata url md_url = url_helper.combine_url(url, "openstack") md_urls.append(md_url) url2base[md_url] = url url_params = self.get_url_params() - if max_wait is None: - max_wait = url_params.max_wait_seconds - - if timeout is None: - timeout = url_params.timeout_seconds - start_time = time.time() avail_url, _response = url_helper.wait_for_url( urls=md_urls, - max_wait=max_wait, - timeout=timeout, + max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, connect_synchronously=False, ) if avail_url: @@ -157,7 +150,6 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): False when unable to contact metadata service or when metadata format is invalid or disabled. """ - oracle_considered = "Oracle" in self.sys_cfg.get("datasource_list") if self.perform_dhcp_setup: # Setup networking in init-local stage. try: @@ -165,14 +157,6 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.fallback_interface, tmp_dir=self.distro.get_tmp_exec_path(), ): - if not self.detect_openstack( - accept_oracle=not oracle_considered - ): - LOG.debug( - "OpenStack datasource not running" - " on OpenStack (dhcp)" - ) - return False results = util.log_time( logfunc=LOG.debug, @@ -183,13 +167,6 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): util.logexc(LOG, str(e)) return False else: - if not self.detect_openstack(accept_oracle=not oracle_considered): - LOG.debug( - "OpenStack datasource not running" - " on OpenStack (non-dhcp)" - ) - return False - try: results = self._crawl_metadata() except sources.InvalidMetaDataException as e: @@ -268,8 +245,9 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): raise sources.InvalidMetaDataException(msg) from e return result - def detect_openstack(self, accept_oracle=False): + def ds_detect(self): """Return True when a potential OpenStack platform is detected.""" + accept_oracle = "Oracle" in self.sys_cfg.get("datasource_list") if not util.is_x86(): # Non-Intel cpus don't properly report dmi product names return True @@ -283,13 +261,6 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): return True elif util.get_proc_env(1).get("product_name") == DMI_PRODUCT_NOVA: return True - # On bare metal hardware, the product name is not set like - # in a virtual OpenStack vm. We check if the system is virtual - # and if the openstack specific metadata service has been found. - elif not self.distro.is_virtual and self.wait_for_metadata_service( - max_wait=15, timeout=5 - ): - return True return False diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 12430401..565e1754 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -307,6 +307,33 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): def __str__(self): return type_utils.obj_name(self) + def ds_detect(self) -> bool: + """Check if running on this datasource""" + return True + + def override_ds_detect(self): + """Override if either: + - only a single datasource defined (nothing to fall back to) + - TODO: commandline argument is used (ci.ds=OpenStack) + """ + return self.sys_cfg.get("datasource_list", []) in ( + [self.dsname], + [self.dsname, "None"], + ) + + def _check_and_get_data(self): + """Overrides runtime datasource detection""" + if self.override_ds_detect(): + LOG.debug( + "Machine is configured to run on single datasource %s.", self + ) + elif self.ds_detect(): + LOG.debug("Machine is running on %s.", self) + else: + LOG.debug("Datasource type %s is not detected.", self) + return False + return self._get_data() + def _get_standardized_metadata(self, instance_data): """Return a dictionary of standardized metadata keys.""" local_hostname = self.get_hostname().hostname @@ -370,7 +397,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): Minimally, the datasource should return a boolean True on success. """ self._dirty_cache = True - return_value = self._get_data() + return_value = self._check_and_get_data() if not return_value: return return_value self.persist_instance_data() diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 9b5df6b0..43b34418 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -16,11 +16,6 @@ datasource: - http://169.254.169.254:80 - http://instance-data:8773 - OpenStack: - # The default list of metadata services to check for OpenStack. - metadata_urls: - - http://169.254.169.254 - MAAS: timeout : 50 max_wait : 120 diff --git a/tests/unittests/distros/test__init__.py b/tests/unittests/distros/test__init__.py index ea017d58..7c5187fd 100644 --- a/tests/unittests/distros/test__init__.py +++ b/tests/unittests/distros/test__init__.py @@ -221,102 +221,6 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): ["pw", "usermod", "myuser", "-p", "01-Jan-1970"] ) - @mock.patch("cloudinit.distros.uses_systemd") - @mock.patch( - "cloudinit.distros.subp.which", - ) - @mock.patch( - "cloudinit.distros.subp.subp", - ) - def test_virtualization_detected(self, m_subp, m_which, m_uses_systemd): - m_uses_systemd.return_value = True - m_which.return_value = "/usr/bin/systemd-detect-virt" - m_subp.return_value = ("kvm", None) - - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.assertTrue(d.is_virtual) - - @mock.patch("cloudinit.distros.uses_systemd") - @mock.patch( - "cloudinit.distros.subp.subp", - ) - def test_virtualization_not_detected(self, m_subp, m_uses_systemd): - m_uses_systemd.return_value = True - m_subp.return_value = ("none", None) - - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.assertFalse(d.is_virtual) - - @mock.patch("cloudinit.distros.uses_systemd") - def test_virtualization_unknown(self, m_uses_systemd): - m_uses_systemd.return_value = True - - from cloudinit.subp import ProcessExecutionError - - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - with mock.patch( - "cloudinit.distros.subp.which", - return_value=None, - ): - self.assertIsNone( - d.is_virtual, - "Reflect unknown state when detection" - " binary cannot be found", - ) - - with mock.patch( - "cloudinit.distros.subp.subp", - side_effect=ProcessExecutionError(), - ): - self.assertIsNone( - d.is_virtual, "Reflect unknown state on ProcessExecutionError" - ) - - def test_virtualization_on_freebsd(self): - # This test function is a bit unusual: - # We need to first mock away the `ifconfig -a` subp call - # Then, we can use side-effects to get the results of two subp calls - # needed for is_container()/virtual() which is_virtual depends on. - # We also have to clear cache between each of those assertions. - - cls = distros.fetch("freebsd") - with mock.patch( - "cloudinit.distros.subp.subp", return_value=("", None) - ): - d = cls("freebsd", {}, None) - # This mock is called by `sysctl -n security.jail.jailed` - with mock.patch( - "cloudinit.distros.subp.subp", - side_effect=[("0\n", None), ("literaly any truthy value", None)], - ): - self.assertFalse(d.is_container()) - d.is_container.cache_clear() - self.assertTrue(d.is_container()) - d.is_container.cache_clear() - - # This mock is called by `sysctl -n kern.vm_guest` - with mock.patch( - "cloudinit.distros.subp.subp", - # fmt: off - side_effect=[ - ("0\n", None), ("hv\n", None), # virtual - ("0\n", None), ("none\n", None), # physical - ("0\n", None), ("hv\n", None) # virtual - ], - # fmt: on - ): - self.assertEqual(d.virtual(), "microsoft") - d.is_container.cache_clear() - d.virtual.cache_clear() - self.assertEqual(d.virtual(), "none") - d.is_container.cache_clear() - d.virtual.cache_clear() - - self.assertTrue(d.is_virtual) - class TestGetPackageMirrors: def return_first(self, mlist): diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py index 02516772..2b3d83f4 100644 --- a/tests/unittests/sources/test_openstack.py +++ b/tests/unittests/sources/test_openstack.py @@ -301,12 +301,12 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase): responses_mock=self.responses, ) distro = mock.MagicMock(spec=Distro) - distro.is_virtual = False ds_os = ds.DataSourceOpenStack( settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) ) self.assertIsNone(ds_os.version) - self.assertTrue(ds_os.get_data()) + with mock.patch.object(ds_os, "ds_detect", return_value=True): + self.assertTrue(ds_os.get_data()) self.assertEqual(2, ds_os.version) md = dict(ds_os.metadata) md.pop("instance-id", None) @@ -351,7 +351,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase): self.assertIsNone(ds_os_local.version) with test_helpers.mock.patch.object( - ds_os_local, "detect_openstack" + ds_os_local, "ds_detect" ) as m_detect_os: m_detect_os.return_value = True found = ds_os_local.get_data() @@ -383,9 +383,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase): settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) ) self.assertIsNone(ds_os.version) - with test_helpers.mock.patch.object( - ds_os, "detect_openstack" - ) as m_detect_os: + with test_helpers.mock.patch.object(ds_os, "ds_detect") as m_detect_os: m_detect_os.return_value = True found = ds_os.get_data() self.assertFalse(found) @@ -414,7 +412,8 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase): "timeout": 0, } self.assertIsNone(ds_os.version) - self.assertFalse(ds_os.get_data()) + with mock.patch.object(ds_os, "ds_detect", return_value=True): + self.assertFalse(ds_os.get_data()) self.assertIsNone(ds_os.version) def test_network_config_disabled_by_datasource_config(self): @@ -489,9 +488,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase): "timeout": 0, } self.assertIsNone(ds_os.version) - with test_helpers.mock.patch.object( - ds_os, "detect_openstack" - ) as m_detect_os: + with test_helpers.mock.patch.object(ds_os, "ds_detect") as m_detect_os: m_detect_os.return_value = True found = ds_os.get_data() self.assertFalse(found) @@ -589,53 +586,17 @@ class TestDetectOpenStack(test_helpers.CiTestCase): settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) ) - def test_detect_openstack_non_intel_x86(self, m_is_x86): + def test_ds_detect_non_intel_x86(self, m_is_x86): """Return True on non-intel platforms because dmi isn't conclusive.""" m_is_x86.return_value = False self.assertTrue( - self._fake_ds().detect_openstack(), - "Expected detect_openstack == True", + self._fake_ds().ds_detect(), + "Expected ds_detect == True", ) - def test_detect_openstack_bare_metal(self, m_is_x86): - """Return True if the distro is non-virtual.""" - m_is_x86.return_value = True - - distro = mock.MagicMock(spec=Distro) - distro.is_virtual = False - - fake_ds = self._fake_ds() - fake_ds.distro = distro - - self.assertFalse( - fake_ds.distro.is_virtual, - "Expected distro.is_virtual == False", - ) - - with test_helpers.mock.patch.object( - fake_ds, "wait_for_metadata_service" - ) as m_wait_for_metadata_service: - m_wait_for_metadata_service.return_value = True - - self.assertTrue( - fake_ds.wait_for_metadata_service(), - "Expected wait_for_metadata_service == True", - ) - - self.assertTrue( - fake_ds.detect_openstack(), "Expected detect_openstack == True" - ) - - self.assertTrue( - m_wait_for_metadata_service.called, - "Expected wait_for_metadata_service to be called", - ) - @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env") @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_not_detect_openstack_intel_x86_ec2( - self, m_dmi, m_proc_env, m_is_x86 - ): + def test_not_ds_detect_intel_x86_ec2(self, m_dmi, m_proc_env, m_is_x86): """Return False on EC2 platforms.""" m_is_x86.return_value = True # No product_name in proc/1/environ @@ -650,15 +611,13 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_dmi.side_effect = fake_dmi_read self.assertFalse( - self._fake_ds().detect_openstack(), - "Expected detect_openstack == False on EC2", + self._fake_ds().ds_detect(), + "Expected ds_detect == False on EC2", ) m_proc_env.assert_called_with(1) @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_detect_openstack_intel_product_name_compute( - self, m_dmi, m_is_x86 - ): + def test_ds_detect_intel_product_name_compute(self, m_dmi, m_is_x86): """Return True on OpenStack compute and nova instances.""" m_is_x86.return_value = True openstack_product_names = ["OpenStack Nova", "OpenStack Compute"] @@ -666,12 +625,12 @@ class TestDetectOpenStack(test_helpers.CiTestCase): for product_name in openstack_product_names: m_dmi.return_value = product_name self.assertTrue( - self._fake_ds().detect_openstack(), - "Failed to detect_openstack", + self._fake_ds().ds_detect(), + "Failed to ds_detect", ) @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_detect_openstack_opentelekomcloud_chassis_asset_tag( + def test_ds_detect_opentelekomcloud_chassis_asset_tag( self, m_dmi, m_is_x86 ): """Return True on OpenStack reporting OpenTelekomCloud asset-tag.""" @@ -686,14 +645,12 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_dmi.side_effect = fake_dmi_read self.assertTrue( - self._fake_ds().detect_openstack(), - "Expected detect_openstack == True on OpenTelekomCloud", + self._fake_ds().ds_detect(), + "Expected ds_detect == True on OpenTelekomCloud", ) @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_detect_openstack_sapccloud_chassis_asset_tag( - self, m_dmi, m_is_x86 - ): + def test_ds_detect_sapccloud_chassis_asset_tag(self, m_dmi, m_is_x86): """Return True on OpenStack reporting SAP CCloud VM asset-tag.""" m_is_x86.return_value = True @@ -706,14 +663,12 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_dmi.side_effect = fake_dmi_read self.assertTrue( - self._fake_ds().detect_openstack(), - "Expected detect_openstack == True on SAP CCloud VM", + self._fake_ds().ds_detect(), + "Expected ds_detect == True on SAP CCloud VM", ) @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_detect_openstack_huaweicloud_chassis_asset_tag( - self, m_dmi, m_is_x86 - ): + def test_ds_detect_huaweicloud_chassis_asset_tag(self, m_dmi, m_is_x86): """Return True on OpenStack reporting Huawei Cloud VM asset-tag.""" m_is_x86.return_value = True @@ -726,14 +681,12 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_dmi.side_effect = fake_asset_tag_dmi_read self.assertTrue( - self._fake_ds().detect_openstack(), - "Expected detect_openstack == True on Huawei Cloud VM", + self._fake_ds().ds_detect(), + "Expected ds_detect == True on Huawei Cloud VM", ) @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_detect_openstack_oraclecloud_chassis_asset_tag( - self, m_dmi, m_is_x86 - ): + def test_ds_detect_oraclecloud_chassis_asset_tag(self, m_dmi, m_is_x86): """Return True on OpenStack reporting Oracle cloud asset-tag.""" m_is_x86.return_value = True @@ -745,16 +698,19 @@ class TestDetectOpenStack(test_helpers.CiTestCase): assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read + ds = self._fake_ds() + ds.sys_cfg = {"datasource_list": ["Oracle"]} self.assertTrue( - self._fake_ds().detect_openstack(accept_oracle=True), - "Expected detect_openstack == True on OracleCloud.com", + ds.ds_detect(), + "Expected ds_detect == True on OracleCloud.com", ) + ds.sys_cfg = {"datasource_list": []} self.assertFalse( - self._fake_ds().detect_openstack(accept_oracle=False), - "Expected detect_openstack == False.", + ds.ds_detect(), + "Expected ds_detect == False.", ) - def _test_detect_openstack_nova_compute_chassis_asset_tag( + def _test_ds_detect_nova_compute_chassis_asset_tag( self, m_dmi, m_is_x86, chassis_tag ): """Return True on OpenStack reporting generic asset-tag.""" @@ -769,27 +725,25 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_dmi.side_effect = fake_dmi_read self.assertTrue( - self._fake_ds().detect_openstack(), - "Expected detect_openstack == True on Generic OpenStack Platform", + self._fake_ds().ds_detect(), + "Expected ds_detect == True on Generic OpenStack Platform", ) @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, m_is_x86): - self._test_detect_openstack_nova_compute_chassis_asset_tag( + def test_ds_detect_nova_chassis_asset_tag(self, m_dmi, m_is_x86): + self._test_ds_detect_nova_compute_chassis_asset_tag( m_dmi, m_is_x86, "OpenStack Nova" ) @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, m_is_x86): - self._test_detect_openstack_nova_compute_chassis_asset_tag( + def test_ds_detect_compute_chassis_asset_tag(self, m_dmi, m_is_x86): + self._test_ds_detect_nova_compute_chassis_asset_tag( m_dmi, m_is_x86, "OpenStack Compute" ) @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env") @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_detect_openstack_by_proc_1_environ( - self, m_dmi, m_proc_env, m_is_x86 - ): + def test_ds_detect_by_proc_1_environ(self, m_dmi, m_proc_env, m_is_x86): """Return True when nova product_name specified in /proc/1/environ.""" m_is_x86.return_value = True # Nova product_name in proc/1/environ @@ -807,8 +761,8 @@ class TestDetectOpenStack(test_helpers.CiTestCase): m_dmi.side_effect = fake_dmi_read self.assertTrue( - self._fake_ds().detect_openstack(), - "Expected detect_openstack == True on OpenTelekomCloud", + self._fake_ds().ds_detect(), + "Expected ds_detect == True on OpenTelekomCloud", ) m_proc_env.assert_called_with(1) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 03be0c92..cc75209e 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -950,7 +950,7 @@ class TestOracle(DsIdentifyBase): """Simple negative test of Oracle.""" mycfg = copy.deepcopy(VALID_CFG["Oracle"]) mycfg["files"][P_CHASSIS_ASSET_TAG] = "Not Oracle" - self._check_via_dict(mycfg, ds=["openstack", "none"], rc=RC_FOUND) + self._check_via_dict(mycfg, rc=RC_NOT_FOUND) def blkid_out(disks=None): @@ -1056,7 +1056,6 @@ VALID_CFG = { "Ec2-brightbox-negative": { "ds": "Ec2", "files": {P_PRODUCT_SERIAL: "tricky-host.bobrightbox.com\n"}, - "mocks": [MOCK_VIRT_IS_KVM], }, "GCE": { "ds": "GCE", @@ -1598,7 +1597,6 @@ VALID_CFG = { "Ec2-E24Cloud-negative": { "ds": "Ec2", "files": {P_SYS_VENDOR: "e24cloudyday\n"}, - "mocks": [MOCK_VIRT_IS_KVM], }, "VMware-NoValidTransports": { "ds": "VMware", @@ -1757,7 +1755,6 @@ VALID_CFG = { "VMware-GuestInfo-NoVirtID": { "ds": "VMware", "mocks": [ - MOCK_VIRT_IS_KVM, { "name": "vmware_has_rpctool", "ret": 0, @@ -1863,7 +1860,6 @@ VALID_CFG = { P_PRODUCT_NAME: "3DS Outscale VM\n", P_SYS_VENDOR: "Not 3DS Outscale\n", }, - "mocks": [MOCK_VIRT_IS_KVM], }, "Ec2-Outscale-negative-productname": { "ds": "Ec2", @@ -1871,7 +1867,6 @@ VALID_CFG = { P_PRODUCT_NAME: "Not 3DS Outscale VM\n", P_SYS_VENDOR: "3DS Outscale\n", }, - "mocks": [MOCK_VIRT_IS_KVM], }, } diff --git a/tests/unittests/util.py b/tests/unittests/util.py index da04c6b2..e7094ec5 100644 --- a/tests/unittests/util.py +++ b/tests/unittests/util.py @@ -1,5 +1,4 @@ # This file is part of cloud-init. See LICENSE file for license information. -from typing import Optional from unittest import mock from cloudinit import cloud, distros, helpers @@ -146,10 +145,6 @@ class MockDistro(distros.Distro): def package_command(self, command, args=None, pkgs=None): pass - @property - def is_virtual(self) -> Optional[bool]: - return True - def update_package_sources(self): return (True, "yay") diff --git a/tools/ds-identify b/tools/ds-identify index da23e836..cd07565d 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -1262,13 +1262,6 @@ dscheck_OpenStack() { *) return ${DS_MAYBE};; esac - # If we are on bare metal, then we maybe are on a - # bare metal Ironic environment. - detect_virt - if [ "${_RET}" = "none" ]; then - return ${DS_MAYBE} - fi - return ${DS_NOT_FOUND} } -- cgit v1.2.1 From 993ac641abb7ac2696f6e89993d87c35f0936c98 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 2 Mar 2023 11:35:39 -0600 Subject: Release 23.1.1 Bump the version in cloudinit/version.py to 23.1.1 and update ChangeLog. --- ChangeLog | 5 +++++ cloudinit/version.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 1044b7d5..ea3ed8c2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +23.1.1 + - source: Force OpenStack when it is only option (#2045) + - sources/azure: fix regressions in IMDS behavior (#2041) + [Chris Patterson] + 23.1 - Support transactional-updates for SUSE based distros (#1997) [Robert Schweikert] diff --git a/cloudinit/version.py b/cloudinit/version.py index fd83ebf6..a9bfffed 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "23.1" +__VERSION__ = "23.1.1" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ -- cgit v1.2.1