diff options
34 files changed, 404 insertions, 168 deletions
diff --git a/doc/source/admin/config-dhcp-ha.rst b/doc/source/admin/config-dhcp-ha.rst index 82780551b7..fcfbc87d40 100644 --- a/doc/source/admin/config-dhcp-ha.rst +++ b/doc/source/admin/config-dhcp-ha.rst @@ -441,6 +441,38 @@ To test the HA of DHCP agent: #. Start DHCP agent on HostB. The VM gets the wanted IP again. +No HA for metadata service on isolated networks +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +All Neutron backends using the DHCP agent can also provide `metadata service +<https://docs.openstack.org/nova/latest/user/metadata.html>`_ in isolated +networks (i.e. networks without a router). In this case the DHCP agent manages +the metadata service (see config option `enable_isolated_metadata +<https://docs.openstack.org/neutron/latest/configuration/dhcp-agent.html#DEFAULT.enable_isolated_metadata>`_). + +Note however that the metadata service is only redundant for IPv4, and not +IPv6, even when the DHCP service is configured to be highly available +(config option `dhcp_agents_per_network +<https://docs.openstack.org/neutron/latest/configuration/neutron.html#DEFAULT.dhcp_agents_per_network>`_ +> 1). This is because the DHCP agent will insert a route to the well known +metadata IPv4 address (`169.254.169.254`) via its own IP address, so it will +be reachable as long as the DHCP service is available at that IP address. +This also means that recovery after a failure is tied to the renewal of the +DHCP lease, since that route will only change if the DHCP server for a VM +changes. + +With IPv6, the well known metadata IPv6 address (`fe80::a9fe:a9fe`) is used, +but directly configured in the DHCP agent network namespace. +Due to the enforcement of duplicate address detection (DAD), this address +can only be configured in at most one DHCP network namespaces at any time. +See `RFC 4862 <https://www.rfc-editor.org/rfc/rfc4862#section-5.4>`_ for +details on the DAD process. + +For this reason, even when you have multiple DHCP agents, an arbitrary one +(where the metadata IPv6 address is not in `dadfailed` state) will serve all +metadata requests over IPv6. When that metadata service instance becomes +unreachable there is no failover and the service will become unreachable. + Disabling and removing an agent ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/admin/config-qos.rst b/doc/source/admin/config-qos.rst index b5713dba79..b46273fc97 100644 --- a/doc/source/admin/config-qos.rst +++ b/doc/source/admin/config-qos.rst @@ -126,10 +126,11 @@ updated: Valid DSCP Marks ---------------- -Valid DSCP mark values are even numbers between 0 and 56, except 2-6, 42, 44, -and 50-54. The full list of valid DSCP marks is: +Valid DSCP mark values are even numbers between 0 and 56, except 2-6, 42, and +50-54. The full list of valid DSCP marks is: -0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 46, 48, 56 +0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 44, 46, +48, 56 L3 QoS support @@ -165,7 +166,7 @@ traffic directions (from the VM point of view) for **bandwidth limiting**. Rule \\ L3 service L3 router OVN L3 ==================== =================== =================== Floating IP Egress \\ Ingress Egress \\ Ingress - Gateway IP Egress \\ Ingress - + Gateway IP Egress \\ Ingress Egress \\ Ingress ==================== =================== =================== diff --git a/doc/source/contributor/testing/ci_scenario_jobs.rst b/doc/source/contributor/testing/ci_scenario_jobs.rst index af81cef608..6d0e030374 100644 --- a/doc/source/contributor/testing/ci_scenario_jobs.rst +++ b/doc/source/contributor/testing/ci_scenario_jobs.rst @@ -139,11 +139,6 @@ Currently we have in that queue jobs like listed below. | |(only tests related to | | | | | | | | | | | |Neutron and Nova) | | | | | | | | | | +----------------------------------------------+----------------------------------+-------+------------------+-------------+-----------------+----------+-------+--------+------------+-------------+ - |neutron-tempest-with-uwsgi-loki |tempest.api (without slow tests) | 1 | Ubuntu Jammy | openvswitch | openvswitch | legacy | False | False | True | No | - |(non-voting) |tempest.scenario | | | | | | | | | | - | |(only tests related to | | | | | | | | | | - | |Neutron and Nova) | | | | | | | | | | - +----------------------------------------------+----------------------------------+-------+------------------+-------------+-----------------+----------+-------+--------+------------+-------------+ |neutron-ovn-tempest-ipv6-only-ovs-master |tempest.api (without slow tests) | 1 | Ubuntu Jammy | ovn | ovn | --- | False | False | True | Yes | | |(only tests related to | | | | | | | | | | | |Neutron and Nova) | | | | | | | | | | diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index 84d444c659..4f19d6cc73 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -41,6 +41,7 @@ from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.cmd import runtime_checks as checks +from neutron.common import _constants as common_constants from neutron.common import utils as common_utils from neutron.ipam import utils as ipam_utils from neutron.privileged.agent.linux import dhcp as priv_dhcp @@ -1841,7 +1842,7 @@ class DeviceManager(object): if self.conf.force_metadata or self.conf.enable_isolated_metadata: ip_cidrs.append(constants.METADATA_CIDR) if netutils.is_ipv6_enabled(): - ip_cidrs.append(constants.METADATA_V6_CIDR) + ip_cidrs.append(common_constants.METADATA_V6_CIDR) self.driver.init_l3(interface_name, ip_cidrs, namespace=network.namespace) diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index 4d664381f8..9953729016 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -102,6 +102,10 @@ class AddressNotReady(exceptions.NeutronException): "become ready: %(reason)s") +class DADFailed(AddressNotReady): + pass + + InvalidArgument = privileged.InvalidArgument @@ -592,7 +596,7 @@ class IpAddrCommand(IpDeviceCommandBase): """Wait until an address is no longer marked 'tentative' or 'dadfailed' raises AddressNotReady if times out, address not present on interface - or DAD fails + raises DADFailed if Duplicate Address Detection fails """ def is_address_ready(): try: @@ -604,7 +608,7 @@ class IpAddrCommand(IpDeviceCommandBase): # Since both 'dadfailed' and 'tentative' will be set if DAD fails, # check 'dadfailed' first just to be explicit if addr_info['dadfailed']: - raise AddressNotReady( + raise DADFailed( address=address, reason=_('Duplicate address detected')) if addr_info['tentative']: return False diff --git a/neutron/agent/metadata/driver.py b/neutron/agent/metadata/driver.py index b7e69696a0..a4a62444e2 100644 --- a/neutron/agent/metadata/driver.py +++ b/neutron/agent/metadata/driver.py @@ -33,6 +33,7 @@ from neutron.agent.l3 import namespaces from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.agent.linux import utils as linux_utils +from neutron.common import _constants as common_constants from neutron.common import coordination from neutron.common import metadata as comm_meta from neutron.common import utils as common_utils @@ -241,9 +242,30 @@ class MetadataDriver(object): # HAProxy cannot bind() until IPv6 Duplicate Address Detection # completes. We must wait until the address leaves its 'tentative' # state. - ip_lib.IpAddrCommand( - parent=ip_lib.IPDevice(name=bind_interface, namespace=ns_name) - ).wait_until_address_ready(address=bind_address_v6) + try: + ip_lib.IpAddrCommand( + parent=ip_lib.IPDevice(name=bind_interface, + namespace=ns_name) + ).wait_until_address_ready(address=bind_address_v6) + except ip_lib.DADFailed as exc: + # This failure means that another DHCP agent has already + # configured this metadata address, so all requests will + # be via that single agent. + LOG.info('DAD failed for address %(address)s on interface ' + '%(interface)s in namespace %(namespace)s on network ' + '%(network)s, deleting it. Exception: %(exception)s', + {'address': bind_address_v6, + 'interface': bind_interface, + 'namespace': ns_name, + 'network': network_id, + 'exception': str(exc)}) + try: + ip_lib.delete_ip_address(bind_address_v6, bind_interface, + namespace=ns_name) + except Exception as exc: + # do not re-raise a delete failure, just log + LOG.info('Address deletion failure: %s', str(exc)) + return pm.enable() monitor.register(uuid, METADATA_SERVICE_NAME, pm) cls.monitors[router_id] = pm @@ -338,6 +360,6 @@ def apply_metadata_nat_rules(router, proxy): if netutils.is_ipv6_enabled(): for c, r in proxy.metadata_nat_rules( proxy.metadata_port, - metadata_address=(constants.METADATA_V6_IP + '/128')): + metadata_address=(common_constants.METADATA_V6_CIDR)): router.iptables_manager.ipv6['nat'].add_rule(c, r) router.iptables_manager.apply() diff --git a/neutron/common/_constants.py b/neutron/common/_constants.py index 55fc718c49..03c07eb71a 100644 --- a/neutron/common/_constants.py +++ b/neutron/common/_constants.py @@ -86,3 +86,6 @@ TRAIT_NETWORK_TUNNEL = 'CUSTOM_NETWORK_TUNNEL_PROVIDER' # The lowest binding index for L3 agents and DHCP agents. LOWEST_AGENT_BINDING_INDEX = 1 + +# Neutron-lib defines this with a /64 but it should be /128 +METADATA_V6_CIDR = constants.METADATA_V6_IP + '/128' diff --git a/neutron/common/ovn/constants.py b/neutron/common/ovn/constants.py index 2a4afbf4a9..71d2460410 100644 --- a/neutron/common/ovn/constants.py +++ b/neutron/common/ovn/constants.py @@ -32,8 +32,8 @@ OVN_ROUTER_NAME_EXT_ID_KEY = 'neutron:router_name' OVN_ROUTER_ID_EXT_ID_KEY = 'neutron:router_id' OVN_AZ_HINTS_EXT_ID_KEY = 'neutron:availability_zone_hints' OVN_ROUTER_IS_EXT_GW = 'neutron:is_ext_gw' -OVN_GW_PORT_EXT_ID_KEY = 'neutron:gw_port_id' -OVN_GW_NETWORK_EXT_ID_KEY = 'neutron:gw_network_id' +OVN_GW_PORT_EXT_ID_KEY = 'neutron:gw_port_id' # DEPRECATED, DON'T USE +OVN_GW_NETWORK_EXT_ID_KEY = 'neutron:gw_network_id' # DEPRECATED, DON'T USE OVN_SUBNET_EXT_ID_KEY = 'neutron:subnet_id' OVN_SUBNET_EXT_IDS_KEY = 'neutron:subnet_ids' OVN_SUBNET_POOL_EXT_ADDR_SCOPE4_KEY = 'neutron:subnet_pool_addr_scope4' @@ -434,4 +434,5 @@ OVN_SUPPORTED_VNIC_TYPES = [portbindings.VNIC_NORMAL, portbindings.VNIC_VHOST_VDPA, portbindings.VNIC_REMOTE_MANAGED, portbindings.VNIC_BAREMETAL, + portbindings.VNIC_VIRTIO_FORWARDER, ] diff --git a/neutron/conf/agent/database/agentschedulers_db.py b/neutron/conf/agent/database/agentschedulers_db.py index f58e0b2771..a46a18a262 100644 --- a/neutron/conf/agent/database/agentschedulers_db.py +++ b/neutron/conf/agent/database/agentschedulers_db.py @@ -33,7 +33,9 @@ AGENTS_SCHEDULER_OPTS = [ 'network. If this number is greater than 1, the ' 'scheduler automatically assigns multiple DHCP agents ' 'for a given tenant network, providing high ' - 'availability for the DHCP service.')), + 'availability for the DHCP service. However this does ' + 'not provide high availability for the IPv6 metadata ' + 'service in isolated networks.')), cfg.BoolOpt('enable_services_on_agents_with_admin_state_down', default=False, help=_('Enable services on an agent with admin_state_up ' diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py index ef16d06227..00178067da 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py @@ -214,6 +214,10 @@ class OVNMechanismDriver(api.MechanismDriver): portbindings.CAP_PORT_FILTER: self.sg_enabled, portbindings.VIF_DETAILS_CONNECTIVITY: self.connectivity, }, + portbindings.VIF_TYPE_AGILIO_OVS: { + portbindings.CAP_PORT_FILTER: self.sg_enabled, + portbindings.VIF_DETAILS_CONNECTIVITY: self.connectivity, + }, portbindings.VIF_TYPE_VHOST_USER: { portbindings.CAP_PORT_FILTER: False, portbindings.VHOST_USER_MODE: @@ -1025,6 +1029,17 @@ class OVNMechanismDriver(api.MechanismDriver): vif_details = dict(self.vif_details[vif_type]) vif_details[portbindings.VHOST_USER_SOCKET] = ( vhost_user_socket) + elif (vnic_type == portbindings.VNIC_VIRTIO_FORWARDER): + vhost_user_socket = ovn_utils.ovn_vhu_sockpath( + ovn_conf.get_ovn_vhost_sock_dir(), port['id']) + vif_type = portbindings.VIF_TYPE_AGILIO_OVS + port[portbindings.VIF_DETAILS].update({ + portbindings.VHOST_USER_SOCKET: vhost_user_socket}) + vif_details = dict(self.vif_details[vif_type]) + vif_details[portbindings.VHOST_USER_SOCKET] = ( + vhost_user_socket) + vif_details[portbindings.VHOST_USER_MODE] = ( + portbindings.VHOST_USER_MODE_CLIENT) else: vif_type = portbindings.VIF_TYPE_OVS vif_details = self.vif_details[vif_type] diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py index 10cdc3f031..b3a7bdca80 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py @@ -836,19 +836,8 @@ class DeleteLRouterExtGwCommand(command.BaseCommand): lrouter.delvalue('nat', nat) nat.delete() - lrouter_ext_ids = getattr(lrouter, 'external_ids', {}) - gw_port_id = lrouter_ext_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY) - if not gw_port_id: - return - - try: - lrouter_port = idlutils.row_by_value( - self.api.idl, 'Logical_Router_Port', 'name', - utils.ovn_lrouter_port_name(gw_port_id)) - except idlutils.RowNotFound: - return - - lrouter.delvalue('ports', lrouter_port) + for gw_port in self.api.get_lrouter_gw_ports(lrouter.name): + lrouter.delvalue('ports', gw_port) class SetLSwitchPortToVirtualTypeCommand(command.BaseCommand): diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py index 45da53c93d..1b663c5a64 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py @@ -425,13 +425,6 @@ class OVNClientQosExtension(object): def disassociate_floatingip(self, txn, floatingip): self.delete_floatingip(txn, floatingip) - def _delete_gateway_ip_qos_rules(self, txn, router_id, network_id): - if network_id: - lswitch_name = utils.ovn_name(network_id) - txn.add(self.nb_idl.qos_del_ext_ids( - lswitch_name, - {ovn_const.OVN_ROUTER_ID_EXT_ID_KEY: router_id})) - def create_router(self, txn, router): self.update_router(txn, router) @@ -465,10 +458,6 @@ class OVNClientQosExtension(object): # Delete, if exists, the QoS rule in this direction. txn.add(self.nb_idl.qos_del(**ovn_rule, if_exists=True)) - def delete_router(self, txn, router): - self._delete_gateway_ip_qos_rules(txn, router['id'], - router['gw_network_id']) - def update_policy(self, context, policy): updated_port_ids = set([]) updated_fip_ids = set([]) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py index be5e7db367..8c74661c15 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py @@ -785,6 +785,22 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): result = lrp.execute(check_error=True) return result[0] if result else None + def get_lrouter_gw_ports(self, lrouter_name): + lr = self.get_lrouter(lrouter_name) + gw_ports = [] + for lrp in getattr(lr, 'ports', []): + lrp_ext_ids = getattr(lrp, 'external_ids', {}) + if (ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY not in lrp_ext_ids or + utils.ovn_name(lrp_ext_ids[ + ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY]) != lr.name): + continue + lrp_ha_cfg = (getattr(lrp, 'gateway_chassis', None) or + getattr(lrp, 'options', {}).get( + ovn_const.OVN_GATEWAY_CHASSIS_KEY)) + if lrp_ha_cfg: + gw_ports.append(lrp) + return gw_ports + def delete_lrouter_ext_gw(self, lrouter_name, if_exists=True): return cmd.DeleteLRouterExtGwCommand(self, lrouter_name, if_exists) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py index 29a3848b28..8f2e933a68 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py @@ -775,30 +775,31 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): txn.add(cmd) raise periodics.NeverAgain() - # TODO(ralonsoh): Remove this in the Z+3 cycle. This method adds the - # "external_ids:OVN_GW_NETWORK_EXT_ID_KEY" to each router that has - # a gateway (that means, that has "external_ids:OVN_GW_PORT_EXT_ID_KEY"). + # TODO(fnordahl): Remove this in the A+3 cycle. This method removes the + # now redundant "external_ids:OVN_GW_NETWORK_EXT_ID_KEY" and + # "external_ids:OVN_GW_PORT_EXT_ID_KEY" from to each router. # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). @periodics.periodic(spacing=600, run_immediately=True) - def update_logical_router_with_gateway_network_id(self): - """Update all OVN logical router registers with the GW network ID""" + def remove_gw_ext_ids_from_logical_router(self): + """Remove `gw_port_id` and `gw_network_id` external_ids from LRs""" if not self.has_lock: return cmds = [] - context = n_context.get_admin_context() for lr in self._nb_idl.lr_list().execute(check_error=True): - gw_port = lr.external_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY) - gw_net = lr.external_ids.get(ovn_const.OVN_GW_NETWORK_EXT_ID_KEY) - if not gw_port or (gw_port and gw_net): - # This router does not have a gateway network assigned yet or - # it has a gateway port and its corresponding network. + if (ovn_const.OVN_GW_PORT_EXT_ID_KEY not in lr.external_ids and + ovn_const.OVN_GW_NETWORK_EXT_ID_KEY not in + lr.external_ids): + # This router have none of the deprecated external_ids. continue - port = self._ovn_client._plugin.get_port(context, gw_port) - external_ids = { - ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: port['network_id']} + external_ids = lr.external_ids.copy() + for k in (ovn_const.OVN_GW_PORT_EXT_ID_KEY, + ovn_const.OVN_GW_NETWORK_EXT_ID_KEY): + if k in external_ids: + del(external_ids[k]) + cmds.append(self._nb_idl.db_set( 'Logical_Router', lr.uuid, ('external_ids', external_ids))) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py index 0e2603919d..37898652df 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py @@ -1329,20 +1329,17 @@ class OVNClient(object): 'device_owner': [const.DEVICE_OWNER_ROUTER_GW], 'device_id': [router_id]}) - def _get_router_ports(self, context, router_id, get_gw_port=False): + def _get_router_ports(self, context, router_id): # _get_router() will raise a RouterNotFound error if there's no router # with the router_id router_db = self._l3_plugin._get_router(context, router_id) - if get_gw_port: - return [p.port for p in router_db.attached_ports] - else: - # When the existing deployment is migrated to OVN - # we may need to consider other port types - DVR_INTERFACE/HA_INTF. - return [p.port for p in router_db.attached_ports - if p.port_type in [const.DEVICE_OWNER_ROUTER_INTF, - const.DEVICE_OWNER_DVR_INTERFACE, - const.DEVICE_OWNER_HA_REPLICATED_INT, - const.DEVICE_OWNER_ROUTER_HA_INTF]] + # When the existing deployment is migrated to OVN + # we may need to consider other port types - DVR_INTERFACE/HA_INTF. + return [p.port for p in router_db.attached_ports + if p.port_type in [const.DEVICE_OWNER_ROUTER_INTF, + const.DEVICE_OWNER_DVR_INTERFACE, + const.DEVICE_OWNER_HA_REPLICATED_INT, + const.DEVICE_OWNER_ROUTER_HA_INTF]] def _get_v4_network_for_router_port(self, context, port): cidr = None @@ -1366,18 +1363,13 @@ class OVNClient(object): return networks def _gen_router_ext_ids(self, router): - gw_net_id = (router.get('external_gateway_info') or - {}).get('network_id') or '' return { ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: router.get('name', 'no_router_name'), - ovn_const.OVN_GW_PORT_EXT_ID_KEY: - router.get('gw_port_id') or '', ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number( router, ovn_const.TYPE_ROUTERS)), ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ','.join(common_utils.get_az_hints(router)), - ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: gw_net_id, } def create_router(self, context, router, add_external_gateway=True): @@ -1498,13 +1490,8 @@ class OVNClient(object): def delete_router(self, context, router_id): """Delete a logical router.""" lrouter_name = utils.ovn_name(router_id) - ovn_router = self._nb_idl.get_lrouter(lrouter_name) - gw_network_id = ovn_router.external_ids.get( - ovn_const.OVN_GW_NETWORK_EXT_ID_KEY) if ovn_router else None - router_dict = {'id': router_id, 'gw_network_id': gw_network_id} with self._nb_idl.transaction(check_error=True) as txn: txn.add(self._nb_idl.delete_lrouter(lrouter_name)) - self._qos_driver.delete_router(txn, router_dict) db_rev.delete_revision(context, router_id, ovn_const.TYPE_ROUTERS) def get_candidates_for_scheduling(self, physnet, cms=None, diff --git a/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py b/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py index bfa4747038..d0d4aebac3 100644 --- a/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py +++ b/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py @@ -51,6 +51,18 @@ class MetadataAgentHealthEvent(event.WaitEvent): ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY, 0)) >= self.sb_cfg +class MetadataPortCreateEvent(event.WaitEvent): + event_name = 'MetadataPortCreateEvent' + + def __init__(self, metadata_port, timeout=5): + table = 'Port_Binding' + events = (self.ROW_CREATE,) + conditions = (('logical_port', '=', metadata_port),) + super(MetadataPortCreateEvent, self).__init__( + events, table, conditions, timeout=timeout + ) + + class TestMetadataAgent(base.TestOVNFunctionalBase): OVN_BRIDGE = 'br-int' FAKE_CHASSIS_HOST = 'ovn-host-fake' @@ -132,8 +144,8 @@ class TestMetadataAgent(base.TestOVNFunctionalBase): # chassis with the nb_cfg, 1 revisions when listing the agents. self.assertTrue(row_event.wait()) - def _create_metadata_port(self, txn, lswitch_name): - mdt_port_name = 'ovn-mdt-' + uuidutils.generate_uuid() + def _create_metadata_port(self, txn, lswitch_name, port_name=None): + mdt_port_name = port_name or 'ovn-mdt-' + uuidutils.generate_uuid() txn.add( self.nb_api.lsp_add( lswitch_name, @@ -144,7 +156,6 @@ class TestMetadataAgent(base.TestOVNFunctionalBase): ovn_const.OVN_CIDRS_EXT_ID_KEY: '192.168.122.123/24', ovn_const.OVN_DEVID_EXT_ID_KEY: 'ovnmeta-' + lswitch_name })) - return mdt_port_name def _update_metadata_port_ip(self, metadata_port_name): external_ids = { @@ -224,7 +235,14 @@ class TestMetadataAgent(base.TestOVNFunctionalBase): if update and type_ == ovn_const.LSP_TYPE_LOCALPORT: with self.nb_api.transaction( check_error=True, log_errors=True) as txn: - mdt_port_name = self._create_metadata_port(txn, lswitch_name) + mdt_port_name = 'ovn-mdt-' + uuidutils.generate_uuid() + metadata_port_create_event = MetadataPortCreateEvent( + mdt_port_name) + self.agent.sb_idl.idl.notify_handler.watch_event( + metadata_port_create_event) + self._create_metadata_port(txn, lswitch_name, mdt_port_name) + self.assertTrue(metadata_port_create_event.wait()) + self.sb_api.lsp_bind(mdt_port_name, self.chassis_name).execute( check_error=True, log_errors=True) self._update_metadata_port_ip(mdt_port_name) diff --git a/neutron/tests/functional/base.py b/neutron/tests/functional/base.py index 8321c2a6ee..8ef793989e 100644 --- a/neutron/tests/functional/base.py +++ b/neutron/tests/functional/base.py @@ -21,13 +21,9 @@ from unittest import mock import warnings import fixtures -from neutron_lib import fixture from neutron_lib.plugins import constants from neutron_lib.plugins import directory -from oslo_concurrency import lockutils from oslo_config import cfg -from oslo_db import exception as os_db_exc -from oslo_db.sqlalchemy import provision from oslo_log import log from oslo_utils import timeutils from oslo_utils import uuidutils @@ -61,7 +57,6 @@ LOG = log.getLogger(__name__) # This is the directory from which infra fetches log files for functional tests DEFAULT_LOG_DIR = os.path.join(helpers.get_test_log_path(), 'dsvm-functional-logs') -SQL_FIXTURE_LOCK = 'sql_fixture_lock' def config_decorator(method_to_decorate, config_tuples): @@ -134,27 +129,6 @@ class BaseSudoTestCase(BaseLoggingTestCase): new=ovs_agent_decorator).start() -class OVNSqlFixture(fixture.StaticSqlFixture): - - @classmethod - @lockutils.synchronized(SQL_FIXTURE_LOCK) - def _init_resources(cls): - cls.schema_resource = provision.SchemaResource( - provision.DatabaseResource("sqlite"), - cls._generate_schema, teardown=False) - dependency_resources = {} - for name, resource in cls.schema_resource.resources: - dependency_resources[name] = resource.getResource() - cls.schema_resource.make(dependency_resources) - cls.engine = dependency_resources['database'].engine - - def _delete_from_schema(self, engine): - try: - super(OVNSqlFixture, self)._delete_from_schema(engine) - except os_db_exc.DBNonExistentTable: - pass - - class TestOVNFunctionalBase(test_plugin.Ml2PluginV2TestCase, BaseLoggingTestCase): @@ -251,16 +225,6 @@ class TestOVNFunctionalBase(test_plugin.Ml2PluginV2TestCase, raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), msg) - # FIXME(lucasagomes): Workaround for - # https://bugs.launchpad.net/networking-ovn/+bug/1808146. We should - # investigate and properly fix the problem. This method is just a - # workaround to alleviate the gate for now and should not be considered - # a proper fix. - def _setup_database_fixtures(self): - fixture = OVNSqlFixture() - self.useFixture(fixture) - self.engine = fixture.engine - def get_additional_service_plugins(self): p = super(TestOVNFunctionalBase, self).get_additional_service_plugins() p.update({'revision_plugin_name': 'revisions', diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index ecfd37b17c..7bf48f017f 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -37,6 +37,7 @@ from neutron.agent.linux import dhcp from neutron.agent.linux import interface from neutron.agent.linux import utils as linux_utils from neutron.agent.metadata import driver as metadata_driver +from neutron.common import _constants as common_constants from neutron.common import config as common_config from neutron.common import utils from neutron.conf.agent import common as config @@ -1929,7 +1930,7 @@ class TestDeviceManager(base.BaseTestCase): expected_ips = ['172.9.9.9/24', const.METADATA_CIDR] if ipv6_enabled: - expected_ips.append(const.METADATA_V6_CIDR) + expected_ips.append(common_constants.METADATA_V6_CIDR) expected = [mock.call.get_device_name(port)] diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py index 064461b649..98ed27d93b 100644 --- a/neutron/tests/unit/agent/linux/test_dhcp.py +++ b/neutron/tests/unit/agent/linux/test_dhcp.py @@ -33,6 +33,7 @@ import testtools from neutron.agent.linux import dhcp from neutron.agent.linux import ip_lib from neutron.cmd import runtime_checks as checks +from neutron.common import _constants as common_constants from neutron.common import utils as common_utils from neutron.conf.agent import common as config from neutron.conf.agent import dhcp as dhcp_config @@ -3295,7 +3296,7 @@ class TestDeviceManager(TestConfBase): if enable_isolated_metadata or force_metadata: expect_ips.extend([ constants.METADATA_CIDR, - constants.METADATA_V6_CIDR]) + common_constants.METADATA_V6_CIDR]) mgr.driver.init_l3.assert_called_with('ns-XXX', expect_ips, namespace='qdhcp-ns') diff --git a/neutron/tests/unit/agent/linux/test_ip_lib.py b/neutron/tests/unit/agent/linux/test_ip_lib.py index da754464c3..c488e90ddc 100644 --- a/neutron/tests/unit/agent/linux/test_ip_lib.py +++ b/neutron/tests/unit/agent/linux/test_ip_lib.py @@ -792,7 +792,7 @@ class TestIpAddrCommand(TestIPCmdBase): def test_wait_until_address_dadfailed(self): self.addr_cmd.list = mock.Mock( return_value=[{'tentative': True, 'dadfailed': True}]) - with testtools.ExpectedException(ip_lib.AddressNotReady): + with testtools.ExpectedException(ip_lib.DADFailed): self.addr_cmd.wait_until_address_ready('abcd::1234') @mock.patch.object(common_utils, 'wait_until_true') diff --git a/neutron/tests/unit/agent/metadata/test_driver.py b/neutron/tests/unit/agent/metadata/test_driver.py index fc59b7fee8..e3b0b8ef6e 100644 --- a/neutron/tests/unit/agent/metadata/test_driver.py +++ b/neutron/tests/unit/agent/metadata/test_driver.py @@ -25,6 +25,7 @@ from oslo_utils import uuidutils from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import router_info from neutron.agent.linux import external_process as ep +from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.agent.linux import utils as linux_utils from neutron.agent.metadata import driver as metadata_driver @@ -76,6 +77,7 @@ class TestMetadataDriverProcess(base.BaseTestCase): EUNAME = 'neutron' EGNAME = 'neutron' METADATA_DEFAULT_IP = '169.254.169.254' + METADATA_DEFAULT_IPV6 = 'fe80::a9fe:a9fe' METADATA_PORT = 8080 METADATA_SOCKET = '/socket/path' PIDFILE = 'pidfile' @@ -140,7 +142,7 @@ class TestMetadataDriverProcess(base.BaseTestCase): agent._process_updated_router(router) f.assert_not_called() - def test_spawn_metadata_proxy(self): + def _test_spawn_metadata_proxy(self, dad_failed=False): router_id = _uuid() router_ns = 'qrouter-%s' % router_id service_name = 'haproxy' @@ -165,22 +167,32 @@ class TestMetadataDriverProcess(base.BaseTestCase): 'NamespaceManager.list_all', return_value={}),\ mock.patch( 'neutron.agent.linux.ip_lib.' - 'IpAddrCommand.wait_until_address_ready') as mock_wait: + 'IpAddrCommand.wait_until_address_ready') as mock_wait,\ + mock.patch( + 'neutron.agent.linux.ip_lib.' + 'delete_ip_address') as mock_del: agent = l3_agent.L3NATAgent('localhost') + agent.process_monitor = mock.Mock() cfg_file = os.path.join( metadata_driver.HaproxyConfigurator.get_config_path( agent.conf.state_path), "%s.conf" % router_id) mock_open = self.useFixture( lib_fixtures.OpenFixture(cfg_file)).mock_open - mock_wait.return_value = True + if dad_failed: + mock_wait.side_effect = ip_lib.DADFailed( + address=self.METADATA_DEFAULT_IP, reason='DAD failed') + else: + mock_wait.return_value = True agent.metadata_driver.spawn_monitored_metadata_proxy( agent.process_monitor, router_ns, self.METADATA_PORT, agent.conf, bind_address=self.METADATA_DEFAULT_IP, - router_id=router_id) + router_id=router_id, + bind_address_v6=self.METADATA_DEFAULT_IPV6, + bind_interface='fake-if') netns_execute_args = [ service_name, @@ -188,6 +200,8 @@ class TestMetadataDriverProcess(base.BaseTestCase): log_tag = ("haproxy-" + metadata_driver.METADATA_SERVICE_NAME + "-" + router_id) + bind_v6_line = 'bind %s:%s interface %s' % ( + self.METADATA_DEFAULT_IPV6, self.METADATA_PORT, 'fake-if') cfg_contents = metadata_driver._HAPROXY_CONFIG_TEMPLATE % { 'user': self.EUNAME, 'group': self.EGNAME, @@ -200,19 +214,35 @@ class TestMetadataDriverProcess(base.BaseTestCase): 'pidfile': self.PIDFILE, 'log_level': 'debug', 'log_tag': log_tag, - 'bind_v6_line': ''} - - mock_open.assert_has_calls([ - mock.call(cfg_file, 'w'), - mock.call().write(cfg_contents)], - any_order=True) - - env = {ep.PROCESS_TAG: service_name + '-' + router_id} - ip_mock.assert_has_calls([ - mock.call(namespace=router_ns), - mock.call().netns.execute(netns_execute_args, addl_env=env, - run_as_root=True) - ]) + 'bind_v6_line': bind_v6_line} + + if dad_failed: + agent.process_monitor.register.assert_not_called() + mock_del.assert_called_once_with(self.METADATA_DEFAULT_IPV6, + 'fake-if', + namespace=router_ns) + else: + mock_open.assert_has_calls([ + mock.call(cfg_file, 'w'), + mock.call().write(cfg_contents)], any_order=True) + + env = {ep.PROCESS_TAG: service_name + '-' + router_id} + ip_mock.assert_has_calls([ + mock.call(namespace=router_ns), + mock.call().netns.execute(netns_execute_args, addl_env=env, + run_as_root=True) + ]) + + agent.process_monitor.register.assert_called_once_with( + router_id, metadata_driver.METADATA_SERVICE_NAME, + mock.ANY) + mock_del.assert_not_called() + + def test_spawn_metadata_proxy(self): + self._test_spawn_metadata_proxy() + + def test_spawn_metadata_proxy_dad_failed(self): + self._test_spawn_metadata_proxy(dad_failed=True) def test_create_config_file_wrong_user(self): with mock.patch('pwd.getpwnam', side_effect=KeyError): diff --git a/neutron/tests/unit/fake_resources.py b/neutron/tests/unit/fake_resources.py index 2e2737fe63..ff2e0c5401 100644 --- a/neutron/tests/unit/fake_resources.py +++ b/neutron/tests/unit/fake_resources.py @@ -163,6 +163,7 @@ class FakeOvsdbNbOvnIdl(object): self.ha_chassis_group_del = mock.Mock() self.ha_chassis_group_add_chassis = mock.Mock() self.ha_chassis_group_del_chassis = mock.Mock() + self.get_lrouter_gw_ports = mock.Mock() class FakeOvsdbSbOvnIdl(object): diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py index 39bf2a2334..864099f3b8 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py @@ -1214,6 +1214,7 @@ class TestDeleteLRouterExtGwCommand(TestBaseCommand): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'static_routes': [fake_route_1, fake_route_2], 'nat': []}) + self.ovn_api.get_lrouter_gw_ports.return_value = [] with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(idlutils, 'row_by_value', @@ -1234,6 +1235,7 @@ class TestDeleteLRouterExtGwCommand(TestBaseCommand): fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'nat': [fake_nat_1, fake_nat_2], 'static_routes': []}) + self.ovn_api.get_lrouter_gw_ports.return_value = [] with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(idlutils, 'row_by_value', @@ -1246,10 +1248,11 @@ class TestDeleteLRouterExtGwCommand(TestBaseCommand): def test_delete_lrouter_extgw_ports(self): port_id = 'fake-port-id' + fake_lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'gateway_chassis': ['fake_gwc']}) fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( - attrs={'external_ids': - {ovn_const.OVN_GW_PORT_EXT_ID_KEY: port_id}, - 'static_routes': [], 'nat': []}) + attrs={'ports': [fake_lrp], 'static_routes': [], 'nat': []}) + self.ovn_api.get_lrouter_gw_ports.return_value = [fake_lrp] with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(idlutils, 'row_by_value', @@ -1258,22 +1261,21 @@ class TestDeleteLRouterExtGwCommand(TestBaseCommand): self.ovn_api, fake_lrouter.name, False) cmd.run_idl(self.transaction) fake_lrouter.delvalue.assert_called_once_with( - 'ports', port_id) + 'ports', fake_lrp) def test_delete_lrouter_extgw_ports_not_found(self): - port_id = 'fake-port-id' fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row( - attrs={'external_ids': - {ovn_const.OVN_GW_PORT_EXT_ID_KEY: port_id}, - 'static_routes': [], 'nat': []}) + attrs={'static_routes': [], 'nat': []}) + self.ovn_api.get_lrouter_gw_ports.return_value = [] with mock.patch.object(self.ovn_api, "is_col_present", return_value=True): with mock.patch.object(idlutils, 'row_by_value', - side_effect=[fake_lrouter, - idlutils.RowNotFound]): + side_effect=[fake_lrouter]): cmd = commands.DeleteLRouterExtGwCommand( self.ovn_api, fake_lrouter.name, False) cmd.run_idl(self.transaction) + self.ovn_api.get_lrouter_gw_ports.assert_called_once_with( + fake_lrouter.name) fake_lrouter.delvalue.assert_not_called() def _test_delete_lrouter_no_lrouter_exist(self, if_exists=True): diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py index 209a41bf98..2854672389 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py @@ -161,7 +161,10 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): 'lr-name-d'}}, {'name': utils.ovn_name('lr-id-e'), 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: - 'lr-name-e'}}], + 'lr-name-e'}}, + {'name': utils.ovn_name('lr-id-f'), + 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: + 'lr-name-f'}}], 'lrouter_ports': [ {'name': utils.ovn_lrouter_port_name('orp-id-a1'), 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: @@ -169,10 +172,14 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): 'networks': ['10.0.1.0/24'], 'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-1'}}, {'name': utils.ovn_lrouter_port_name('orp-id-a2'), - 'external_ids': {}, 'networks': ['10.0.2.0/24'], + 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: + 'lr-id-a'}, + 'networks': ['10.0.2.0/24'], 'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-1'}}, {'name': utils.ovn_lrouter_port_name('orp-id-a3'), - 'external_ids': {}, 'networks': ['10.0.3.0/24'], + 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: + 'lr-id-a'}, + 'networks': ['10.0.3.0/24'], 'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: ovn_const.OVN_GATEWAY_INVALID_CHASSIS}}, {'name': 'xrp-id-b1', @@ -182,7 +189,15 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): 'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-2'}}, {'name': utils.ovn_lrouter_port_name('orp-id-b3'), 'external_ids': {}, 'networks': ['20.0.3.0/24'], + 'options': {}}, + {'name': utils.ovn_lrouter_port_name('gwc'), + 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: + 'lr-id-f'}, + 'networks': ['10.0.4.0/24'], 'options': {}}], + 'gateway_chassis': [ + {'chassis_name': 'fake-chassis', + 'name': utils.ovn_lrouter_port_name('gwc') + '_fake-chassis'}], 'static_routes': [{'ip_prefix': '20.0.0.0/16', 'nexthop': '10.0.3.253'}, {'ip_prefix': '10.0.0.0/16', @@ -317,7 +332,12 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): utils.ovn_lrouter_port_name('orp-id-a3')], utils.ovn_name('lr-id-b'): [ 'xrp-id-b1', - utils.ovn_lrouter_port_name('orp-id-b2')]}, + utils.ovn_lrouter_port_name('orp-id-b2')], + utils.ovn_name('lr-id-f'): [ + utils.ovn_lrouter_port_name('gwc')]}, + 'lrptogwc': { + utils.ovn_lrouter_port_name('gwc'): [ + utils.ovn_lrouter_port_name('gwc') + '_fake-chassis']}, 'lrtosroute': { utils.ovn_name('lr-id-a'): ['20.0.0.0/16'], utils.ovn_name('lr-id-b'): ['10.0.0.0/16'] @@ -346,6 +366,7 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): self.dhcp_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.address_set_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self.lb_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() + self.gwc_table = fakes.FakeOvsdbTable.create_one_ovsdb_table() self._tables = {} self._tables['Logical_Switch'] = self.lswitch_table @@ -358,6 +379,7 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): self._tables['Address_Set'] = self.address_set_table self._tables['Load_Balancer'] = self.lb_table self._tables['NAT'] = self.nat_table + self._tables['Gateway_Chassis'] = self.gwc_table with mock.patch.object(impl_idl_ovn.OvsdbNbOvnIdl, 'from_worker', return_value=mock.Mock()): @@ -379,16 +401,23 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): TestNBImplIdlOvn.fake_associations['lstolsp'], self.lswitch_table, self.lsp_table, 'name', 'name', 'ports') - # Load Routers and Router Ports + # Load Routers, Router Ports and Gateway Chassis fake_lrouters = TestNBImplIdlOvn.fake_set['lrouters'] self._load_ovsdb_fake_rows(self.lrouter_table, fake_lrouters) fake_lrps = TestNBImplIdlOvn.fake_set['lrouter_ports'] self._load_ovsdb_fake_rows(self.lrp_table, fake_lrps) + fake_gwc = TestNBImplIdlOvn.fake_set['gateway_chassis'] + self._load_ovsdb_fake_rows(self.gwc_table, fake_gwc) # Associate routers and router ports self._construct_ovsdb_references( TestNBImplIdlOvn.fake_associations['lrtolrp'], self.lrouter_table, self.lrp_table, 'name', 'name', 'ports') + # Associate router ports and gateway chassis + self._construct_ovsdb_references( + TestNBImplIdlOvn.fake_associations['lrptogwc'], + self.lrp_table, self.gwc_table, + 'name', 'name', 'gateway_chassis') # Load static routes fake_sroutes = TestNBImplIdlOvn.fake_set['static_routes'] self._load_ovsdb_fake_rows(self.sroute_table, fake_sroutes) @@ -484,6 +513,9 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): {'name': 'lr-id-d', 'ports': {}, 'static_routes': [], 'snats': [], 'dnat_and_snats': []}, {'name': 'lr-id-e', 'ports': {}, 'static_routes': [], + 'snats': [], 'dnat_and_snats': []}, + {'name': 'lr-id-f', 'static_routes': [], + 'ports': {'gwc': ['10.0.4.0/24']}, 'snats': [], 'dnat_and_snats': []}] self.assertCountEqual(mapping, expected) @@ -556,6 +588,11 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): def test_get_all_chassis_gateway_bindings(self): self._load_nb_db() + + # NOTE(fnordahl): The `Gateway_Chassis` table being present without + # proper associations fools the test, remove for now. + del(self._tables['Gateway_Chassis']) + bindings = self.nb_ovn_idl.get_all_chassis_gateway_bindings() expected = {'host-1': [utils.ovn_lrouter_port_name('orp-id-a1'), utils.ovn_lrouter_port_name('orp-id-a2')], @@ -574,6 +611,11 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): def test_get_gateway_chassis_binding(self): self._load_nb_db() + + # NOTE(fnordahl): The `Gateway_Chassis` table being present without + # proper associations fools the test, remove for now. + del(self._tables['Gateway_Chassis']) + chassis = self.nb_ovn_idl.get_gateway_chassis_binding( utils.ovn_lrouter_port_name('orp-id-a1')) self.assertEqual(chassis, ['host-1']) @@ -591,6 +633,11 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): def test_get_unhosted_gateways(self): self._load_nb_db() + + # NOTE(fnordahl): The `Gateway_Chassis` table being present without + # proper associations fools the test, remove for now. + del(self._tables['Gateway_Chassis']) + # Port physnet-dict port_physnet_dict = { 'orp-id-a1': 'physnet1', # scheduled @@ -626,6 +673,11 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): def test_get_unhosted_gateways_deleted_physnet(self): self._load_nb_db() + + # NOTE(fnordahl): The `Gateway_Chassis` table being present without + # proper associations fools the test, remove for now. + del(self._tables['Gateway_Chassis']) + # The LRP is on host-2 now router_row = self._find_ovsdb_fake_row(self.lrp_table, 'name', 'lrp-orp-id-a1') @@ -813,6 +865,29 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn): lb = self.nb_ovn_idl.get_floatingip_in_nat_or_lb(fip_id) self.assertEqual(lb['_uuid'], lb_row.uuid) + def test_get_lrouter_gw_ports_legacy_option(self): + self._load_nb_db() + + gw1_row = self._find_ovsdb_fake_row( + self.lrp_table, 'name', utils.ovn_lrouter_port_name('orp-id-a1')) + gw2_row = self._find_ovsdb_fake_row( + self.lrp_table, 'name', utils.ovn_lrouter_port_name('orp-id-a2')) + gw3_row = self._find_ovsdb_fake_row( + self.lrp_table, 'name', utils.ovn_lrouter_port_name('orp-id-a3')) + + gw_ports = self.nb_ovn_idl.get_lrouter_gw_ports( + utils.ovn_name('lr-id-a')) + self.assertEqual([gw1_row, gw2_row, gw3_row], gw_ports) + + def test_get_lrouter_gw_ports_gwc(self): + self._load_nb_db() + gw1_row = self._find_ovsdb_fake_row( + self.lrp_table, 'name', utils.ovn_lrouter_port_name('gwc')) + + gw_ports = self.nb_ovn_idl.get_lrouter_gw_ports( + utils.ovn_name('lr-id-f')) + self.assertEqual([gw1_row], gw_ports) + class TestSBImplIdlOvnBase(TestDBImplIdlOvn): diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py index b22e0a5470..302519c4f2 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py @@ -716,18 +716,18 @@ class TestDBInconsistenciesPeriodics(testlib_api.SqlTestCaseLight, self.fake_ovn_client._nb_idl.db_set.assert_has_calls( expected_calls) - def test_update_logical_router_with_gateway_network_id(self): + def test_remove_gw_ext_ids_from_logical_router(self): nb_idl = self.fake_ovn_client._nb_idl - # lr0: GW port ID, not GW network ID --> we need to add network ID. + # lr0: GW port ID, not GW network ID --> we need to remove port ID. lr0 = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={ 'name': 'lr0', 'external_ids': {constants.OVN_GW_PORT_EXT_ID_KEY: 'port0'}}) - # lr1: GW port ID and not GW network ID --> register already updated. + # lr1: GW port ID and GW network ID --> we need to remove both. lr1 = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={ 'name': 'lr1', 'external_ids': {constants.OVN_GW_PORT_EXT_ID_KEY: 'port1', constants.OVN_GW_NETWORK_EXT_ID_KEY: 'net1'}}) - # lr2: no GW port ID (nor GW network ID) --> no QoS. + # lr2: no GW port ID (nor GW network ID) --> no action needed. lr2 = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={ 'name': 'lr2', 'external_ids': {}}) nb_idl.lr_list.return_value.execute.return_value = (lr0, lr1, lr2) @@ -736,10 +736,11 @@ class TestDBInconsistenciesPeriodics(testlib_api.SqlTestCaseLight, self.assertRaises( periodics.NeverAgain, - self.periodic.update_logical_router_with_gateway_network_id) - ext_ids = {constants.OVN_GW_NETWORK_EXT_ID_KEY: 'net0'} + self.periodic.remove_gw_ext_ids_from_logical_router) expected_calls = [mock.call('Logical_Router', lr0.uuid, - ('external_ids', ext_ids))] + ('external_ids', {})), + mock.call('Logical_Router', lr1.uuid, + ('external_ids', {}))] nb_idl.db_set.assert_has_calls(expected_calls) def _test_check_baremetal_ports_dhcp_options(self, dhcp_disabled=False): diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py index 72e1ce4b74..38d28bc8ed 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py @@ -1251,6 +1251,28 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase): portbindings.VIF_TYPE_OVS, self.mech_driver.vif_details[portbindings.VIF_TYPE_OVS]) + def _test_bind_port_virtio_forwarder(self, fake_segments): + fake_port = fakes.FakePort.create_one_port( + attrs={'binding:vnic_type': 'virtio-forwarder'}).info() + fake_host = 'host' + fake_port_context = fakes.FakePortContext( + fake_port, fake_host, fake_segments) + self.mech_driver.bind_port(fake_port_context) + + vif_details = self.mech_driver.\ + vif_details[portbindings.VIF_TYPE_AGILIO_OVS] + vif_details.update({"vhostuser_socket": ovn_utils.ovn_vhu_sockpath( + ovn_conf.get_ovn_vhost_sock_dir(), fake_port['id'])}) + vif_details.update({"vhostuser_mode": "client"}) + + neutron_agent.AgentCache().get_agents.assert_called_once_with( + {'host': fake_host, + 'agent_type': ovn_const.OVN_CONTROLLER_TYPES}) + fake_port_context.set_binding.assert_called_once_with( + fake_segments[0]['id'], + portbindings.VIF_TYPE_AGILIO_OVS, + vif_details) + def _test_bind_port_remote_managed(self, fake_segments): fake_serial = 'fake-serial' fake_port = fakes.FakePort.create_one_port( @@ -1330,6 +1352,15 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase): [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] self._test_bind_port_remote_managed(fake_segments) + def test_bind_virtio_forwarder_port_geneve(self): + """Test binding a VIRTIO_FORWARDER port to a geneve segment.""" + segment_attrs = {'network_type': 'geneve', + 'physical_network': None, + 'segmentation_id': 1023} + fake_segments = \ + [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] + self._test_bind_port_virtio_forwarder(fake_segments) + def test_bind_remote_managed_port_vlan(self): """Test binding a REMOTE_MANAGED port to a geneve segment.""" segment_attrs = {'network_type': 'vlan', @@ -1363,6 +1394,15 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase): [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] self._test_bind_port(fake_segments) + def test_bind_virtio_forwarder_port_vxlan(self): + """Test binding a VIRTIO_FORWARDER port to a vxlan segment.""" + segment_attrs = {'network_type': 'vxlan', + 'physical_network': None, + 'segmentation_id': 1024} + fake_segments = \ + [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] + self._test_bind_port_virtio_forwarder(fake_segments) + def test__is_port_provisioning_required(self): fake_port = fakes.FakePort.create_one_port( attrs={'binding:vnic_type': 'normal', @@ -3974,6 +4014,10 @@ class TestOVNMechanismDriverSecurityGroup(MechDriverSetupBase, self._test_create_port_with_vnic_type( portbindings.VNIC_BAREMETAL) + def test_create_port_with_vnic_virtio_forwarder(self): + self._test_create_port_with_vnic_type( + portbindings.VNIC_VIRTIO_FORWARDER) + def test_update_port_with_sgs(self): with self.network() as n, self.subnet(n): sg1 = self._create_empty_sg('sg1') diff --git a/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py b/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py index 75783ad0b4..d04e525a4f 100644 --- a/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py +++ b/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py @@ -27,6 +27,7 @@ from oslo_config import cfg from oslo_utils import uuidutils import testtools +from neutron.common import config from neutron.objects import ports as port_obj from neutron.plugins.ml2.extensions import dns_integration from neutron.services.externaldns.drivers.designate import driver @@ -53,6 +54,7 @@ class DNSIntegrationTestCase(test_plugin.Ml2PluginV2TestCase): _domain = DNSDOMAIN def setUp(self): + config.register_common_config_options() cfg.CONF.set_override('extension_drivers', self._extension_drivers, group='ml2') diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index d355dac420..9e40801adc 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -49,6 +49,7 @@ import webob from neutron._i18n import _ from neutron.agent import rpc as agent_rpc +from neutron.common import config from neutron.common import utils from neutron.db import agents_db from neutron.db import ipam_pluggable_backend @@ -673,6 +674,7 @@ class TestMl2NetworksWithVlanTransparencyBase(TestMl2NetworksV2): 'vlan_transparent': 'True'}} def setUp(self, plugin=None): + config.register_common_config_options() cfg.CONF.set_override('vlan_transparent', True) super(TestMl2NetworksWithVlanTransparencyBase, self).setUp(plugin) diff --git a/neutron/tests/unit/services/ovn_l3/test_plugin.py b/neutron/tests/unit/services/ovn_l3/test_plugin.py index 29a81f22d7..a4f1512555 100644 --- a/neutron/tests/unit/services/ovn_l3/test_plugin.py +++ b/neutron/tests/unit/services/ovn_l3/test_plugin.py @@ -434,8 +434,6 @@ class TestOVNL3RouterPlugin(test_mech_driver.Ml2PluginV2TestCase): {'router': updated_data}) self.l3_inst._nb_ovn.update_lrouter.assert_called_once_with( 'neutron-router-id', enabled=True, external_ids={ - ovn_const.OVN_GW_PORT_EXT_ID_KEY: '', - ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: '', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router', ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ''}) @@ -456,8 +454,6 @@ class TestOVNL3RouterPlugin(test_mech_driver.Ml2PluginV2TestCase): 'neutron-router-id', enabled=False, external_ids={ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'test', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', - ovn_const.OVN_GW_PORT_EXT_ID_KEY: '', - ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: '', ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ''}) @mock.patch.object(utils, 'get_lrouter_non_gw_routes') @@ -551,8 +547,6 @@ class TestOVNL3RouterPlugin(test_mech_driver.Ml2PluginV2TestCase): external_ids = {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router', ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', - ovn_const.OVN_GW_PORT_EXT_ID_KEY: 'gw-port-id', - ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: 'ext-network-id', ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ''} self.l3_inst._nb_ovn.create_lrouter.assert_called_once_with( 'neutron-router-id', external_ids=external_ids, diff --git a/releasenotes/notes/bug-1953165-6e848ea2c0398f56.yaml b/releasenotes/notes/bug-1953165-6e848ea2c0398f56.yaml new file mode 100644 index 0000000000..6c79c0daef --- /dev/null +++ b/releasenotes/notes/bug-1953165-6e848ea2c0398f56.yaml @@ -0,0 +1,16 @@ +--- +issues: + - | + The high availability of metadata service on isolated networks is limited + or non-existent. IPv4 metadata is redundant when the DHCP agent managing + it is redundant, but recovery is tied to the renewal of the DHCP lease, + making most recoveries very slow. IPv6 metadata is not redundant at all + as the IPv6 metadata address can only be configured in a single place at + a time as it is link-local. Multiple agents trying to configure it will + generate an IPv6 duplicate address detection failure. + + Administrators may observe the IPv6 metadata address in "dadfailed" state + in the DHCP namespace for this reason, which is only an indication it is + not highly available. Until a redesign is made to the isolated metadata + service there is not a better deployment option. See `bug 1953165 + <https://bugs.launchpad.net/neutron/+bug/1953165>`_ for information. diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 64301d9990..073ea3218f 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -117,12 +117,14 @@ - job: name: neutron-fullstack-with-uwsgi-with-neutron-lib-master + branches: ^master$ parent: neutron-fullstack-with-uwsgi required-projects: - openstack/neutron-lib - job: name: neutron-fullstack-with-pyroute2-master + branches: ^master$ parent: neutron-fullstack required-projects: - name: github.com/svinota/pyroute2 @@ -142,6 +144,7 @@ - job: name: neutron-functional-with-uwsgi-with-neutron-lib-master + branches: ^master$ parent: neutron-functional-with-uwsgi required-projects: - openstack/neutron-lib @@ -166,12 +169,14 @@ - job: name: neutron-functional-with-pyroute2-master + branches: ^master$ parent: neutron-functional required-projects: - name: github.com/svinota/pyroute2 - job: name: neutron-functional-with-oslo-master + branches: ^master$ parent: neutron-functional description: | This job installs all oslo libraries from source and executes the @@ -212,6 +217,7 @@ - job: name: neutron-functional-with-sqlalchemy-master + branches: ^master$ parent: neutron-functional required-projects: - name: github.com/sqlalchemy/sqlalchemy diff --git a/zuul.d/job-templates.yaml b/zuul.d/job-templates.yaml index fdc4f8a860..c092cabc4b 100644 --- a/zuul.d/job-templates.yaml +++ b/zuul.d/job-templates.yaml @@ -23,6 +23,9 @@ - ^roles/.*$ - ^rally-jobs/.*$ - ^zuul.d/(?!(job-templates)).*\.yaml + - openstack-tox-py39: # from openstack-python3-jobs template + timeout: 3600 + irrelevant-files: *irrelevant-files - openstack-tox-py310: # from openstack-python3-jobs template timeout: 3600 irrelevant-files: *irrelevant-files @@ -40,6 +43,9 @@ - openstack-tox-py38-arm64: # from openstack-python3-jobs-arm64 template timeout: 4800 irrelevant-files: *irrelevant-files + - openstack-tox-py39-arm64: # from openstack-python3-jobs-arm64 template + timeout: 4800 + irrelevant-files: *irrelevant-files - openstack-tox-py310-arm64: # from openstack-python3-jobs-arm64 template timeout: 4800 irrelevant-files: *irrelevant-files @@ -48,6 +54,9 @@ - openstack-tox-py38: # from openstack-python3-jobs template timeout: 3600 irrelevant-files: *irrelevant-files + - openstack-tox-py39: # from openstack-python3-jobs template + timeout: 3600 + irrelevant-files: *irrelevant-files - openstack-tox-py310: # from openstack-python3-jobs template timeout: 3600 irrelevant-files: *irrelevant-files diff --git a/zuul.d/tempest-multinode.yaml b/zuul.d/tempest-multinode.yaml index 740570ee59..d86914482a 100644 --- a/zuul.d/tempest-multinode.yaml +++ b/zuul.d/tempest-multinode.yaml @@ -528,6 +528,7 @@ # TODO(slaweq): propose job with ovs-release and move -master one to # experimental queue name: neutron-ovn-tempest-full-multinode-ovs-master + branches: ^master$ parent: neutron-ovn-multinode-base run: playbooks/multinode-devstack-custom.yaml vars: diff --git a/zuul.d/tempest-singlenode.yaml b/zuul.d/tempest-singlenode.yaml index 1fa3e5b716..e4823c56eb 100644 --- a/zuul.d/tempest-singlenode.yaml +++ b/zuul.d/tempest-singlenode.yaml @@ -295,6 +295,7 @@ - job: name: neutron-ovs-tempest-with-os-ken-master + branches: ^master$ parent: neutron-ovs-tempest-base timeout: 10800 required-projects: @@ -343,6 +344,7 @@ - job: name: neutron-ovn-tempest-with-neutron-lib-master + branches: ^master$ parent: tempest-integrated-networking timeout: 10800 required-projects: @@ -373,12 +375,14 @@ - job: name: neutron-ovs-tempest-with-neutron-lib-master + branches: ^master$ parent: neutron-ovs-tempest-base required-projects: - openstack/neutron-lib - job: name: neutron-ovs-tempest-with-oslo-master + branches: ^master$ parent: neutron-ovs-tempest-base description: | Job testing for devstack/tempest testing Neutron with OVS driver. @@ -420,7 +424,9 @@ timeout: 10800 vars: devstack_localrc: - NEUTRON_DEPLOY_MOD_WSGI: true + # TODO(ykarel) can be enabled once + # https://bugs.launchpad.net/neutron/+bug/1912359 fixed + NEUTRON_DEPLOY_MOD_WSGI: false MYSQL_REDUCE_MEMORY: true devstack_plugins: neutron: https://opendev.org/openstack/neutron.git @@ -468,7 +474,7 @@ - job: name: neutron-ovn-tempest-with-uwsgi-loki - parent: neutron-tempest-with-uwsgi + parent: neutron-ovn-tempest-with-uwsgi timeout: 10800 vars: devstack_services: @@ -622,6 +628,7 @@ - job: name: neutron-ovn-tempest-ovs-master + branches: ^master$ description: Job testing for devstack/tempest testing Neutron with ovn driver and OVN master branch parent: neutron-ovn-base vars: @@ -634,6 +641,7 @@ - job: name: neutron-ovn-tempest-ovs-release-with-oslo-master + branches: ^master$ description: | Job testing for devstack/tempest testing Neutron with OVN driver. This job installs all oslo libraries from source. @@ -768,6 +776,7 @@ - job: name: neutron-ovn-tempest-ipv6-only-ovs-master + branches: ^master$ parent: neutron-ovn-tempest-ipv6-only-base vars: devstack_localrc: @@ -779,6 +788,7 @@ - job: name: neutron-ovn-tempest-with-sqlalchemy-master + branches: ^master$ parent: tempest-integrated-networking timeout: 10800 required-projects: @@ -814,6 +824,7 @@ - job: name: neutron-ovs-tempest-with-sqlalchemy-master + branches: ^master$ parent: neutron-ovs-tempest-base required-projects: - name: github.com/sqlalchemy/sqlalchemy |