summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/source/admin/config-dhcp-ha.rst32
-rw-r--r--doc/source/admin/config-ipv6.rst13
-rw-r--r--doc/source/admin/config-mtu.rst7
-rw-r--r--doc/source/admin/config-qos.rst9
-rw-r--r--doc/source/admin/fwaas-v2-scenario.rst31
-rw-r--r--doc/source/admin/ops-quotas.rst323
-rw-r--r--doc/source/admin/ovn/troubleshooting.rst79
-rw-r--r--doc/source/admin/shared/deploy-selfservice-initialnetworks.txt7
-rw-r--r--doc/source/contributor/testing/ci_scenario_jobs.rst8
-rw-r--r--doc/source/install/compute-install-option2-rdo.rst2
-rw-r--r--doc/source/install/compute-install-option2-ubuntu.rst4
-rw-r--r--doc/source/ovn/migration.rst6
-rw-r--r--neutron/agent/linux/dhcp.py3
-rw-r--r--neutron/agent/linux/ip_lib.py8
-rw-r--r--neutron/agent/metadata/driver.py30
-rw-r--r--neutron/agent/ovn/extensions/qos_hwol.py4
-rw-r--r--neutron/cmd/sanity/checks.py34
-rw-r--r--neutron/cmd/sanity_check.py12
-rw-r--r--neutron/common/_constants.py6
-rw-r--r--neutron/common/ovn/constants.py6
-rw-r--r--neutron/conf/agent/database/agentschedulers_db.py4
-rw-r--r--neutron/conf/plugins/ml2/drivers/ovn/ovn_conf.py11
-rw-r--r--neutron/conf/policies/availability_zone.py8
-rw-r--r--neutron/conf/policies/port.py1
-rw-r--r--neutron/db/db_base_plugin_v2.py47
-rw-r--r--neutron/db/dvr_mac_db.py1
-rw-r--r--neutron/db/ipam_backend_mixin.py2
-rw-r--r--neutron/db/l3_agentschedulers_db.py4
-rw-r--r--neutron/db/migration/alembic_migrations/env.py18
-rw-r--r--neutron/db/migration/alembic_migrations/versions/2023.2/expand/93f394357a27_remove_in_use_on_subnets.py42
-rw-r--r--neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD2
-rw-r--r--neutron/db/models_v2.py52
-rw-r--r--neutron/db/securitygroups_db.py37
-rw-r--r--neutron/exceptions/mtu.py28
-rw-r--r--neutron/objects/db/api.py18
-rw-r--r--neutron/objects/router.py20
-rw-r--r--neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py35
-rw-r--r--neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py15
-rw-r--r--neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py11
-rw-r--r--neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py16
-rw-r--r--neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py59
-rw-r--r--neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py60
-rw-r--r--neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py2
-rw-r--r--neutron/policy.py13
-rw-r--r--neutron/scheduler/l3_agent_scheduler.py17
-rw-r--r--neutron/services/loki/loki_plugin.py4
-rw-r--r--neutron/services/ovn_l3/plugin.py1
-rw-r--r--neutron/services/qos/qos_plugin.py4
-rw-r--r--neutron/services/tag/tag_plugin.py3
-rw-r--r--neutron/tests/base.py28
-rw-r--r--neutron/tests/common/net_helpers.py28
-rw-r--r--neutron/tests/fullstack/test_dhcp_agent.py2
-rw-r--r--neutron/tests/functional/agent/linux/test_keepalived.py17
-rw-r--r--neutron/tests/functional/agent/ovn/extensions/test_qos_hwol.py49
-rw-r--r--neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py26
-rw-r--r--neutron/tests/functional/agent/test_ovs_lib.py2
-rw-r--r--neutron/tests/functional/base.py36
-rw-r--r--neutron/tests/functional/db/test_network.py28
-rw-r--r--neutron/tests/functional/pecan_wsgi/test_functional.py7
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py202
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py21
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py27
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py6
-rw-r--r--neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py60
-rw-r--r--neutron/tests/functional/plugins/ml2/test_plugin.py3
-rw-r--r--neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py13
-rw-r--r--neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py78
-rw-r--r--neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py9
-rw-r--r--neutron/tests/functional/services/ovn_l3/test_plugin.py2
-rw-r--r--neutron/tests/functional/services/portforwarding/test_port_forwarding.py3
-rw-r--r--neutron/tests/unit/_test_extension_portbindings.py107
-rw-r--r--neutron/tests/unit/agent/dhcp/test_agent.py3
-rw-r--r--neutron/tests/unit/agent/linux/test_dhcp.py3
-rw-r--r--neutron/tests/unit/agent/linux/test_external_process.py113
-rw-r--r--neutron/tests/unit/agent/linux/test_ip_lib.py2
-rw-r--r--neutron/tests/unit/agent/metadata/test_driver.py64
-rw-r--r--neutron/tests/unit/api/test_extensions.py3
-rw-r--r--neutron/tests/unit/api/v2/test_base.py102
-rw-r--r--neutron/tests/unit/conf/policies/test_availability_zone.py6
-rw-r--r--neutron/tests/unit/db/metering/test_metering_db.py42
-rw-r--r--neutron/tests/unit/db/test_agentschedulers_db.py63
-rw-r--r--neutron/tests/unit/db/test_db_base_plugin_v2.py651
-rw-r--r--neutron/tests/unit/db/test_dvr_mac_db.py6
-rw-r--r--neutron/tests/unit/db/test_ipam_backend_mixin.py6
-rw-r--r--neutron/tests/unit/db/test_ipam_pluggable_backend.py5
-rw-r--r--neutron/tests/unit/db/test_l3_db.py3
-rw-r--r--neutron/tests/unit/db/test_ovn_revision_numbers_db.py2
-rw-r--r--neutron/tests/unit/db/test_securitygroups_db.py32
-rw-r--r--neutron/tests/unit/extensions/test_address_group.py5
-rw-r--r--neutron/tests/unit/extensions/test_address_scope.py39
-rw-r--r--neutron/tests/unit/extensions/test_agent.py17
-rw-r--r--neutron/tests/unit/extensions/test_availability_zone.py21
-rw-r--r--neutron/tests/unit/extensions/test_data_plane_status.py12
-rw-r--r--neutron/tests/unit/extensions/test_default_subnetpools.py8
-rw-r--r--neutron/tests/unit/extensions/test_dns.py4
-rw-r--r--neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py14
-rw-r--r--neutron/tests/unit/extensions/test_external_net.py23
-rw-r--r--neutron/tests/unit/extensions/test_extraroute.py19
-rw-r--r--neutron/tests/unit/extensions/test_flavors.py8
-rw-r--r--neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py31
-rw-r--r--neutron/tests/unit/extensions/test_l3.py341
-rw-r--r--neutron/tests/unit/extensions/test_l3_conntrack_helper.py21
-rw-r--r--neutron/tests/unit/extensions/test_l3_ext_gw_mode.py17
-rw-r--r--neutron/tests/unit/extensions/test_l3_ndp_proxy.py51
-rw-r--r--neutron/tests/unit/extensions/test_local_ip.py16
-rw-r--r--neutron/tests/unit/extensions/test_network_ip_availability.py86
-rw-r--r--neutron/tests/unit/extensions/test_network_segment_range.py36
-rw-r--r--neutron/tests/unit/extensions/test_portsecurity.py39
-rw-r--r--neutron/tests/unit/extensions/test_providernet.py12
-rw-r--r--neutron/tests/unit/extensions/test_qos_gateway_ip.py8
-rw-r--r--neutron/tests/unit/extensions/test_quotasv2.py72
-rw-r--r--neutron/tests/unit/extensions/test_securitygroup.py88
-rw-r--r--neutron/tests/unit/extensions/test_segment.py140
-rw-r--r--neutron/tests/unit/extensions/test_servicetype.py6
-rw-r--r--neutron/tests/unit/extensions/test_subnet_onboard.py4
-rw-r--r--neutron/tests/unit/extensions/test_subnet_service_types.py5
-rw-r--r--neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py4
-rw-r--r--neutron/tests/unit/fake_resources.py2
-rw-r--r--neutron/tests/unit/objects/test_router.py31
-rw-r--r--neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py62
-rw-r--r--neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py22
-rw-r--r--neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py85
-rw-r--r--neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py111
-rw-r--r--neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py301
-rw-r--r--neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py6
-rw-r--r--neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py6
-rw-r--r--neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py4
-rw-r--r--neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py22
-rw-r--r--neutron/tests/unit/plugins/ml2/test_extension_driver_api.py16
-rw-r--r--neutron/tests/unit/plugins/ml2/test_plugin.py216
-rw-r--r--neutron/tests/unit/plugins/ml2/test_port_binding.py36
-rw-r--r--neutron/tests/unit/plugins/ml2/test_security_group.py3
-rw-r--r--neutron/tests/unit/plugins/ml2/test_tracked_resources.py43
-rw-r--r--neutron/tests/unit/scheduler/test_l3_agent_scheduler.py34
-rw-r--r--neutron/tests/unit/services/metering/test_metering_plugin.py211
-rw-r--r--neutron/tests/unit/services/ovn_l3/test_plugin.py6
-rw-r--r--neutron/tests/unit/services/qos/test_qos_plugin.py12
-rw-r--r--neutron/tests/unit/services/revisions/test_revision_plugin.py8
-rw-r--r--plugin.spec2
-rw-r--r--releasenotes/notes/bug-1953165-6e848ea2c0398f56.yaml16
-rw-r--r--releasenotes/notes/enable-enforce-scope-and-new-defaults-1f82a9eb71125f5d.yaml25
-rw-r--r--releasenotes/notes/localnet-learn-fdb-22469280b49701fc.yaml23
-rw-r--r--releasenotes/notes/ovn-recreate-metadata-port-76e2c0e651267aa0.yaml11
-rw-r--r--releasenotes/releasenotes/notes/network_subnet_mtu_validation-c221f22efcfae927.yaml22
-rw-r--r--tools/ovn_migration/infrared/tripleo-ovn-migration/README.rst6
-rw-r--r--tools/ovn_migration/infrared/tripleo-ovn-migration/main.yml2
-rw-r--r--tox.ini8
-rw-r--r--zuul.d/base.yaml8
-rw-r--r--zuul.d/grenade.yaml2
-rw-r--r--zuul.d/job-templates.yaml51
-rw-r--r--zuul.d/project.yaml4
-rw-r--r--zuul.d/tempest-multinode.yaml9
-rw-r--r--zuul.d/tempest-singlenode.yaml33
153 files changed, 3476 insertions, 2149 deletions
diff --git a/doc/source/admin/config-dhcp-ha.rst b/doc/source/admin/config-dhcp-ha.rst
index 82780551b7..fcfbc87d40 100644
--- a/doc/source/admin/config-dhcp-ha.rst
+++ b/doc/source/admin/config-dhcp-ha.rst
@@ -441,6 +441,38 @@ To test the HA of DHCP agent:
#. Start DHCP agent on HostB. The VM gets the wanted IP again.
+No HA for metadata service on isolated networks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+All Neutron backends using the DHCP agent can also provide `metadata service
+<https://docs.openstack.org/nova/latest/user/metadata.html>`_ in isolated
+networks (i.e. networks without a router). In this case the DHCP agent manages
+the metadata service (see config option `enable_isolated_metadata
+<https://docs.openstack.org/neutron/latest/configuration/dhcp-agent.html#DEFAULT.enable_isolated_metadata>`_).
+
+Note however that the metadata service is only redundant for IPv4, and not
+IPv6, even when the DHCP service is configured to be highly available
+(config option `dhcp_agents_per_network
+<https://docs.openstack.org/neutron/latest/configuration/neutron.html#DEFAULT.dhcp_agents_per_network>`_
+> 1). This is because the DHCP agent will insert a route to the well known
+metadata IPv4 address (`169.254.169.254`) via its own IP address, so it will
+be reachable as long as the DHCP service is available at that IP address.
+This also means that recovery after a failure is tied to the renewal of the
+DHCP lease, since that route will only change if the DHCP server for a VM
+changes.
+
+With IPv6, the well known metadata IPv6 address (`fe80::a9fe:a9fe`) is used,
+but directly configured in the DHCP agent network namespace.
+Due to the enforcement of duplicate address detection (DAD), this address
+can only be configured in at most one DHCP network namespaces at any time.
+See `RFC 4862 <https://www.rfc-editor.org/rfc/rfc4862#section-5.4>`_ for
+details on the DAD process.
+
+For this reason, even when you have multiple DHCP agents, an arbitrary one
+(where the metadata IPv6 address is not in `dadfailed` state) will serve all
+metadata requests over IPv6. When that metadata service instance becomes
+unreachable there is no failover and the service will become unreachable.
+
Disabling and removing an agent
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/admin/config-ipv6.rst b/doc/source/admin/config-ipv6.rst
index 65b5fb9a41..8f2395f6fb 100644
--- a/doc/source/admin/config-ipv6.rst
+++ b/doc/source/admin/config-ipv6.rst
@@ -195,8 +195,8 @@ Project network considerations
Dataplane
---------
-Both the Linux bridge and the Open vSwitch dataplane modules support
-forwarding IPv6
+All dataplane modules, including OVN, Open vSwitch and Linux bridge,
+support forwarding IPv6
packets amongst the guests and router ports. Similar to IPv4, there is no
special configuration or setup required to enable the dataplane to properly
forward packets from the source to the destination using IPv6. Note that these
@@ -204,6 +204,15 @@ dataplanes will forward Link-local Address (LLA) packets between hosts on the
same network just fine without any participation or setup by OpenStack
components after the ports are all connected and MAC addresses learned.
+.. warning::
+ The only exception to this is the setting of the MTU value on
+ the network an IPv6 subnet is created on. If the MTU is less than
+ 1280 octets (the minimum link MTU value specified in
+ `RFC 8200 <https://www.rfc-editor.org/rfc/rfc8200>`__), then it
+ could lead to issues configuring both IPv6 and IPv4 addresses on
+ the network, leaving the subnets unusable. For that reason, the API
+ validates the MTU value when subnets are created to avoid this issue.
+
Addresses for subnets
---------------------
diff --git a/doc/source/admin/config-mtu.rst b/doc/source/admin/config-mtu.rst
index 4207809e46..78892c7563 100644
--- a/doc/source/admin/config-mtu.rst
+++ b/doc/source/admin/config-mtu.rst
@@ -130,6 +130,13 @@ IPv6. IPv6 uses RA via the L3 agent because the DHCP agent only supports
IPv4. Instances using IPv4 and IPv6 should obtain the same MTU value
regardless of method.
+.. note::
+
+ If you are using an MTU value on your network below 1280, please
+ read the warning listed in the
+ `IPv6 configuration guide <./config-ipv6.html#project-network-considerations>`__
+ before creating any subnets.
+
Networks with enabled vlan transparency
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/admin/config-qos.rst b/doc/source/admin/config-qos.rst
index b5713dba79..b46273fc97 100644
--- a/doc/source/admin/config-qos.rst
+++ b/doc/source/admin/config-qos.rst
@@ -126,10 +126,11 @@ updated:
Valid DSCP Marks
----------------
-Valid DSCP mark values are even numbers between 0 and 56, except 2-6, 42, 44,
-and 50-54. The full list of valid DSCP marks is:
+Valid DSCP mark values are even numbers between 0 and 56, except 2-6, 42, and
+50-54. The full list of valid DSCP marks is:
-0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 46, 48, 56
+0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 44, 46,
+48, 56
L3 QoS support
@@ -165,7 +166,7 @@ traffic directions (from the VM point of view) for **bandwidth limiting**.
Rule \\ L3 service L3 router OVN L3
==================== =================== ===================
Floating IP Egress \\ Ingress Egress \\ Ingress
- Gateway IP Egress \\ Ingress -
+ Gateway IP Egress \\ Ingress Egress \\ Ingress
==================== =================== ===================
diff --git a/doc/source/admin/fwaas-v2-scenario.rst b/doc/source/admin/fwaas-v2-scenario.rst
index bab66ae64c..a11983bf1c 100644
--- a/doc/source/admin/fwaas-v2-scenario.rst
+++ b/doc/source/admin/fwaas-v2-scenario.rst
@@ -5,6 +5,37 @@ Firewall-as-a-Service (FWaaS) v2 scenario
Firewall v2 has no support for OVN currently.
+Installation of FWaaS v2
+------------------------
+
+If possible, you should rely on packages provided by your Linux and/or
+OpenStack distribution:
+
+* For example for Ubuntu you can install the ``neutron-fwaas-common``
+ package provided by Canonical.
+
+.. warning::
+
+ Always check the version of the available package and check the releases
+ on https://releases.openstack.org/
+
+If you use ``pip``, follow these steps to install neutron-fwaas:
+
+* `identify the version of the neutron-fwaas package
+ <https://opendev.org/openstack/releases/src/branch/master/deliverables>`_
+ that matches your OpenStack version:
+
+ * 2023.1 Antelope: latest 18.0.x version
+ * Zed: latest 17.0.x version
+
+* indicate pip to (a) install precisely this version and (b) take into
+ account OpenStack upper constraints on package versions for dependencies
+ (example for Antelope):
+
+ .. code-block:: console
+
+ pip install -c https://opendev.org/openstack/requirements/raw/branch/stable/2023.1/upper-constraints.txt neutron-fwaas==18.0.0
+
Enable FWaaS v2
---------------
diff --git a/doc/source/admin/ops-quotas.rst b/doc/source/admin/ops-quotas.rst
index e08c3f29cc..723a18848e 100644
--- a/doc/source/admin/ops-quotas.rst
+++ b/doc/source/admin/ops-quotas.rst
@@ -9,10 +9,10 @@ more resources than the quota allows, an error occurs:
.. code-block:: console
$ openstack network create test_net
- Quota exceeded for resources: ['network']
+ Error while executing command: ConflictException: 409, Quota exceeded for resources: ['network'].
Per-project quota configuration is also supported by the quota
-extension API. See :ref:`cfg_quotas_per_tenant` for details.
+extension API. See :ref:`cfg_quotas_per_project` for details.
Basic quota configuration
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -28,75 +28,87 @@ default quota values:
.. code-block:: ini
[quotas]
- # number of networks allowed per tenant, and minus means unlimited
- quota_network = 10
+ # Default number of resources allowed per project. A negative value means
+ # unlimited. (integer value)
+ #default_quota = -1
- # number of subnets allowed per tenant, and minus means unlimited
- quota_subnet = 10
+ # Number of networks allowed per project. A negative value means unlimited.
+ # (integer value)
+ quota_network = 100
- # number of ports allowed per tenant, and minus means unlimited
- quota_port = 50
+ # Number of subnets allowed per project, A negative value means unlimited.
+ # (integer value)
+ quota_subnet = 100
- # default driver to use for quota checks
- quota_driver = neutron.quota.DbQuotaNoLockDriver
-
-OpenStack Networking also supports quotas for L3 resources:
-router and floating IP. Add these lines to the
-``quotas`` section in the ``/etc/neutron/neutron.conf`` file:
-
-.. code-block:: ini
+ # Number of ports allowed per project. A negative value means unlimited.
+ # (integer value)
+ quota_port = 500
- [quotas]
- # number of routers allowed per tenant, and minus means unlimited
+ # default driver to use for quota checks
+ quota_driver = neutron.db.quota.driver_nolock.DbQuotaNoLockDriver
+
+ # When set to True, quota usage will be tracked in the Neutron database
+ # for each resource, by directly mapping to a data model class, for
+ # example, networks, subnets, ports, etc. When set to False, quota usage
+ # will be tracked by the quota engine as a count of the object type
+ # directly. For more information, see the Quota Management and
+ # Enforcement guide.
+ # (boolean value)
+ track_quota_usage = true
+
+ #
+ # From neutron.extensions
+ #
+
+ # Number of routers allowed per project. A negative value means unlimited.
+ # (integer value)
quota_router = 10
- # number of floating IPs allowed per tenant, and minus means unlimited
+ # Number of floating IPs allowed per project. A negative value means
+ # unlimited.
+ # (integer value)
quota_floatingip = 50
-OpenStack Networking also supports quotas for security group
-resources: number of security groups and number of rules.
-Add these lines to the ``quotas`` section in the
-``/etc/neutron/neutron.conf`` file:
-
-.. code-block:: ini
-
- [quotas]
- # number of security groups per tenant, and minus means unlimited
+ # Number of security groups allowed per project. A negative value means
+ # unlimited.
+ # (integer value)
quota_security_group = 10
- # number of security rules allowed per tenant, and minus means unlimited
+ # Number of security group rules allowed per project. A negative value means
+ # unlimited.
+ # (integer value)
quota_security_group_rule = 100
-.. _cfg_quotas_per_tenant:
+.. _cfg_quotas_per_project:
Configure per-project quotas
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
OpenStack Networking also supports per-project quota limit by
quota extension API.
-.. todo:: This document needs to be migrated to using ``openstack`` commands
- rather than the deprecated ``neutron`` commands.
-
Use these commands to manage per-project quotas:
-neutron quota-delete
+openstack quota delete
Delete defined quotas for a specified project
-neutron quota-list
- Lists defined quotas for all projects
+openstack quota list
+ Lists defined quotas for all projects with non-default quota values
+
+openstack quota show
+ Shows defined quotas for all projects
-neutron quota-show
+openstack quota show <project>
Shows quotas for a specified project
-neutron quota-default-show
- Show default quotas for a specified tenant
+openstack quota show --default <project>
+ Show default quotas for a specified project
-neutron quota-update
+openstack quota set --<resource> <value> <project>
Updates quotas for a specified project
Only users with the ``admin`` role can change a quota value. By default,
the default set of quotas are enforced for all projects, so no
-:command:`quota-create` command exists.
+:command:`opentack quota create` command exists.
#. Configure Networking to show per-project quotas
@@ -104,7 +116,7 @@ the default set of quotas are enforced for all projects, so no
.. code-block:: ini
- quota_driver = neutron.db.quota_db.DbQuotaDriver
+ quota_driver = neutron.db.quota.driver.DbQuotaDriver
When you set this option, the output for Networking commands shows ``quotas``.
@@ -119,10 +131,6 @@ the default set of quotas are enforced for all projects, so no
The command shows the ``quotas`` extension, which provides
per-project quota management support.
- .. note::
-
- Many of the extensions shown below are supported in the Mitaka release and later.
-
.. code-block:: console
+------------------------+------------------------+--------------------------+
@@ -131,7 +139,7 @@ the default set of quotas are enforced for all projects, so no
| ... | ... | ... |
| Quota management | quotas | Expose functions for |
| support | | quotas management per |
- | | | tenant |
+ | | | project |
| ... | ... | ... |
+------------------------+------------------------+--------------------------+
@@ -141,215 +149,144 @@ the default set of quotas are enforced for all projects, so no
.. code-block:: console
- $ neutron ext-show quotas
- +-------------+------------------------------------------------------------+
- | Field | Value |
- +-------------+------------------------------------------------------------+
- | alias | quotas |
- | description | Expose functions for quotas management per tenant |
- | links | |
- | name | Quota management support |
- | namespace | https://docs.openstack.org/network/ext/quotas-sets/api/v2.0 |
- | updated | 2012-07-29T10:00:00-00:00 |
- +-------------+------------------------------------------------------------+
+ $ openstack extension show quotas
+ +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | Field | Value |
+ +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | alias | quotas |
+ | description | Expose functions for quotas management per project |
+ | id | quotas |
+ | links | [] |
+ | location | Munch({'cloud': '', 'region_name': 'RegionOne', 'zone': None, 'project': Munch({'id': 'afc55714081b4ef29f99ec128cb1fa30', 'name': 'demo', 'domain_id': 'default', 'domain_name': None})}) |
+ | name | Quota management support |
+ | updated | 2012-07-29T10:00:00-00:00 |
+ +-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
.. note::
Only some plug-ins support per-project quotas.
- Specifically, Open vSwitch, Linux Bridge, and VMware NSX
+ Specifically, OVN, Open vSwitch, Linux Bridge, and VMware NSX
support them, but new versions of other plug-ins might
bring additional functionality. See the documentation for
each plug-in.
#. List projects who have per-project quota support.
- The :command:`neutron quota-list` command lists projects for which the
+ The :command:`openstack quota list` command lists projects for which the
per-project quota is enabled. The command does not list projects with
default quota support. You must be an administrative user to run this
command:
.. code-block:: console
- $ neutron quota-list
- +------------+---------+------+--------+--------+----------------------------------+
- | floatingip | network | port | router | subnet | tenant_id |
- +------------+---------+------+--------+--------+----------------------------------+
- | 20 | 5 | 20 | 10 | 5 | 6f88036c45344d9999a1f971e4882723 |
- | 25 | 10 | 30 | 10 | 10 | bff5c9455ee24231b5bc713c1b96d422 |
- +------------+---------+------+--------+--------+----------------------------------+
+ $ openstack quota list --network
+ +----------------------------------+--------------+----------+-------+---------------+---------+-----------------+----------------------+---------+--------------+
+ | Project ID | Floating IPs | Networks | Ports | RBAC Policies | Routers | Security Groups | Security Group Rules | Subnets | Subnet Pools |
+ +----------------------------------+--------------+----------+-------+---------------+---------+-----------------+----------------------+---------+--------------+
+ | 6f88036c45344d9999a1f971e4882723 | 50 | 100 | 500 | 10 | 20 | 10 | 100 | 100 | -1 |
+ | bff5c9455ee24231b5bc713c1b96d422 | 100 | 100 | 500 | 10 | 10 | 10 | 100 | 100 | -1 |
+ +----------------------------------+--------------+----------+-------+---------------+---------+-----------------+----------------------+---------+--------------+
#. Show per-project quota values.
- The :command:`neutron quota-show` command reports the current
+ The :command:`openstack quota show` command reports the current
set of quota limits for the specified project.
Non-administrative users can run this command without the
- ``--tenant_id`` parameter. If per-project quota limits are
+ ``<project>`` argument. If per-project quota limits are
not enabled for the project, the command shows the default
set of quotas.
- .. note::
-
- Additional quotas added in the Mitaka release include ``security_group``,
- ``security_group_rule``, ``subnet``, and ``subnetpool``.
-
.. code-block:: console
- $ neutron quota-show --tenant_id 6f88036c45344d9999a1f971e4882723
- +---------------------+-------+
- | Field | Value |
- +---------------------+-------+
- | floatingip | 50 |
- | network | 10 |
- | port | 50 |
- | rbac_policy | 10 |
- | router | 10 |
- | security_group | 10 |
- | security_group_rule | 100 |
- | subnet | 10 |
- | subnetpool | -1 |
- +---------------------+-------+
+ $ openstack quota show 6f88036c45344d9999a1f971e4882723
+ +----------------+-------+
+ | Resource | Limit |
+ +----------------+-------+
+ | networks | 100 |
+ | ports | 500 |
+ | rbac_policies | 10 |
+ | routers | 20 |
+ | subnets | 100 |
+ | subnet_pools | -1 |
+ | floating-ips | 50 |
+ | secgroup-rules | 100 |
+ | secgroups | 10 |
+ +----------------+-------+
The following command shows the command output for a
non-administrative user.
.. code-block:: console
- $ neutron quota-show
- +---------------------+-------+
- | Field | Value |
- +---------------------+-------+
- | floatingip | 50 |
- | network | 10 |
- | port | 50 |
- | rbac_policy | 10 |
- | router | 10 |
- | security_group | 10 |
- | security_group_rule | 100 |
- | subnet | 10 |
- | subnetpool | -1 |
- +---------------------+-------+
+ $ openstack quota show
+ +----------------+-------+
+ | Resource | Limit |
+ +----------------+-------+
+ | networks | 100 |
+ | ports | 500 |
+ | rbac_policies | 10 |
+ | routers | 20 |
+ | subnets | 100 |
+ | subnet_pools | -1 |
+ | floating-ips | 50 |
+ | secgroup-rules | 100 |
+ | secgroups | 10 |
+ +----------------+-------+
#. Update quota values for a specified project.
- Use the :command:`neutron quota-update` command to
+ Use the :command:`openstack quota set` command to
update a quota for a specified project.
.. code-block:: console
- $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --network 5
- +---------------------+-------+
- | Field | Value |
- +---------------------+-------+
- | floatingip | 50 |
- | network | 5 |
- | port | 50 |
- | rbac_policy | 10 |
- | router | 10 |
- | security_group | 10 |
- | security_group_rule | 100 |
- | subnet | 10 |
- | subnetpool | -1 |
- +---------------------+-------+
+ $ openstack quota set --routers 20 6f88036c45344d9999a1f971e4882723
You can update quotas for multiple resources through one
command.
.. code-block:: console
- $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --subnet 5 --port 20
- +---------------------+-------+
- | Field | Value |
- +---------------------+-------+
- | floatingip | 50 |
- | network | 5 |
- | port | 20 |
- | rbac_policy | 10 |
- | router | 10 |
- | security_group | 10 |
- | security_group_rule | 100 |
- | subnet | 5 |
- | subnetpool | -1 |
- +---------------------+-------+
-
- To update the limits for an L3 resource such as, router
- or floating IP, you must define new values for the quotas
- after the ``--`` directive.
-
- This example updates the limit of the number of floating
- IPs for the specified project.
+ $ openstack quota set --subnets 50 --ports 100 6f88036c45344d9999a1f971e4882723
- .. code-block:: console
-
- $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --floatingip 20
- +---------------------+-------+
- | Field | Value |
- +---------------------+-------+
- | floatingip | 20 |
- | network | 5 |
- | port | 20 |
- | rbac_policy | 10 |
- | router | 10 |
- | security_group | 10 |
- | security_group_rule | 100 |
- | subnet | 5 |
- | subnetpool | -1 |
- +---------------------+-------+
-
- You can update the limits of multiple resources by
- including L2 resources and L3 resource through one
- command:
+ You can update the limits of multiple resources through
+ one command:
.. code-block:: console
- $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 \
- --network 3 --subnet 3 --port 3 --floatingip 3 --router 3
- +---------------------+-------+
- | Field | Value |
- +---------------------+-------+
- | floatingip | 3 |
- | network | 3 |
- | port | 3 |
- | rbac_policy | 10 |
- | router | 3 |
- | security_group | 10 |
- | security_group_rule | 100 |
- | subnet | 3 |
- | subnetpool | -1 |
- +---------------------+-------+
+ $ openstack quota set --networks 50 --subnets 50 --ports 100 \
+ --floating-ips 20 --routers 5 6f88036c45344d9999a1f971e4882723
#. Delete per-project quota values.
To clear per-project quota limits, use the
- :command:`neutron quota-delete` command.
+ :command:`openstack quota delete` command.
.. code-block:: console
- $ neutron quota-delete --tenant_id 6f88036c45344d9999a1f971e4882723
- Deleted quota: 6f88036c45344d9999a1f971e4882723
+ $ openstack quota delete 6f88036c45344d9999a1f971e4882723
After you run this command, you can see that quota
values for the project are reset to the default values.
.. code-block:: console
- $ openstack quota show 6f88036c45344d9999a1f971e4882723
- +---------------------+-------+
- | Field | Value |
- +---------------------+-------+
- | floatingip | 50 |
- | network | 10 |
- | port | 50 |
- | rbac_policy | 10 |
- | router | 10 |
- | security_group | 10 |
- | security_group_rule | 100 |
- | subnet | 10 |
- | subnetpool | -1 |
- +---------------------+-------+
+ $ openstack quota show --network 6f88036c45344d9999a1f971e4882723
+ +----------------+-------+
+ | Resource | Limit |
+ +----------------+-------+
+ | networks | 100 |
+ | ports | 500 |
+ | rbac_policies | 10 |
+ | routers | 20 |
+ | subnets | 100 |
+ | subnet_pools | -1 |
+ | floating-ips | 50 |
+ | secgroup-rules | 100 |
+ | secgroups | 10 |
+ +----------------+-------+
.. note::
Listing default quotas with the OpenStack command line client will
- provide all quotas for networking and other services. Previously,
- the :command:`neutron quota-show --tenant_id` would list only networking
- quotas.
+ provide all quotas for networking and other services.
diff --git a/doc/source/admin/ovn/troubleshooting.rst b/doc/source/admin/ovn/troubleshooting.rst
index 646a05648c..29b10dcc16 100644
--- a/doc/source/admin/ovn/troubleshooting.rst
+++ b/doc/source/admin/ovn/troubleshooting.rst
@@ -43,3 +43,82 @@ This problem is not unique to OVN but is amplified due to the possible larger
size of geneve header compared to other common tunneling protocols (VXLAN).
If you are using VM's as compute nodes make sure that you either lower the MTU
size on the virtual interface or enable fragmentation on it.
+
+Duplicated or deleted OVN agents
+--------------------------------
+
+The "ovn-controller" process is the local controller daemon for OVN. It runs
+in every host belonging to the OVN network and is in charge of registering
+the host to the OVN database by creating the corresponding "Chassis" and
+"Chassis_Private" registers in the Southbound database. At the same time,
+when the process is gracefully stopped, it deletes both registers. These
+registers are used by Neutron to control the OVN agents.
+
+ .. code-block:: console
+
+ $ openstack network agent list -c ID -c "Agent Type" -c Host -c Alive -c State
+ +--------------------------------------+------------------------------+--------+-------+-------+
+ | ID | Agent Type | Host | Alive | State |
+ +--------------------------------------+------------------------------+--------+-------+-------+
+ | a55c8d85-2071-4452-92cb-95d15c29bde7 | OVN Controller Gateway agent | u20ovn | :-) | UP |
+ | 62e29a01-a0ac-55c9-b4ec-e223d5c90853 | OVN Metadata agent | u20ovn | :-) | UP |
+ | ce9a1471-79c1-4472-adfc-9e5ce86eba07 | OVN Controller Gateway agent | u20ovn | XXX | DOWN |
+ | 3755938f-9aac-4f08-a1ab-32fcff56d1ce | OVN Metadata agent | u20ovn | XXX | DOWN |
+ +--------------------------------------+------------------------------+--------+-------+-------+
+
+
+If during a system upgrade the OVS "system-id" changes, the "Chassis" and
+"Chassis_Private" registers will be created again but with a different UUID.
+If the previous registers are not deleted (that should happen if the
+"ovn-controller" process is gracefully stopped), Neutron will show duplicated
+agents from the same host. In this case, only one agent will be alive and
+the other one will be down because the "Chassis_Private.nb_cfg_timestamp"
+is not updated. In this case, the administrator should manually delete from
+the OVN Southbound database the stale registers. For example:
+
+ * List the "Chassis" registers, filtering by hostname and name (OVS
+ "system-id"):
+
+ .. code-block:: console
+
+ $ sudo ovn-sbctl list Chassis | grep name
+ hostname : u20ovn
+ name : "a55c8d85-2071-4452-92cb-95d15c29bde7"
+ hostname : u20ovn
+ name : "ce9a1471-79c1-4472-adfc-9e5ce86eba07"
+
+
+ * Delete the stale "Chassis" register:
+
+ .. code-block:: console
+
+ $ sudo ovn-sbctl destroy Chassis ce9a1471-79c1-4472-adfc-9e5ce86eba07
+
+
+ * List the "Chassis_Private" registers, filtering by name:
+
+ .. code-block:: console
+
+ $ sudo ovn-sbctl list Chassis_Private | grep name
+ name : "a55c8d85-2071-4452-92cb-95d15c29bde7"
+ name : "ce9a1471-79c1-4472-adfc-9e5ce86eba07"
+
+
+ * Delete the stale "Chassis_Private" register:
+
+ .. code-block:: console
+
+ $ sudo ovn-sbctl destroy Chassis_Private ce9a1471-79c1-4472-adfc-9e5ce86eba07
+
+
+If the host name is also updated during the system upgrade, the Neutron
+agent list could present entries from different host names, but the older
+ones will be down too. The procedure is the same.
+
+It could also happen that during a node decommission, the "Chassis" register
+is deleted but not the "Chassis_Private" one. In that case, the OVN agent
+list will present the corresponding agents with the following message:
+"('Chassis' register deleted)". Again, the procedure is the same: the
+administrator should manually delete the orphaned OVN Southbound database
+register. Neutron will receive this event and will delete the associated
+OVN agents.
diff --git a/doc/source/admin/shared/deploy-selfservice-initialnetworks.txt b/doc/source/admin/shared/deploy-selfservice-initialnetworks.txt
index 022bfca392..0e3f85dcfa 100644
--- a/doc/source/admin/shared/deploy-selfservice-initialnetworks.txt
+++ b/doc/source/admin/shared/deploy-selfservice-initialnetworks.txt
@@ -38,6 +38,13 @@ NAT for IPv4 network traffic and directly routes IPv6 network traffic.
| status | ACTIVE |
+-------------------------+--------------+
+ .. note::
+
+ If you are using an MTU value on your network below 1280, please
+ read the warning listed in the
+ `IPv6 configuration guide <../config-ipv6.html#project-network-considerations>`__
+ before creating any subnets.
+
#. Create a IPv4 subnet on the self-service network.
.. code-block:: console
diff --git a/doc/source/contributor/testing/ci_scenario_jobs.rst b/doc/source/contributor/testing/ci_scenario_jobs.rst
index af81cef608..9c97c9306c 100644
--- a/doc/source/contributor/testing/ci_scenario_jobs.rst
+++ b/doc/source/contributor/testing/ci_scenario_jobs.rst
@@ -82,9 +82,6 @@ Below is a summary of those jobs.
|tempest-integrated-networking |tempest.api (without slow tests) | 1 | Ubuntu Jammy | ovn | ovn | --- | False | False | True | Yes |
| |tempest.scenario | | | | | | | | | |
+----------------------------------------------+----------------------------------+-------+------------------+-------------+-----------------+----------+-------+--------+------------+-------------+
- |neutron-ovn-tempest-ovs-release-ubuntu-old |tempest.api (without slow tests) | 1 | Ubuntu Focal | ovn | ovn | --- | False | False | True | Yes |
- | |tempest.scenario | | | | | | | | | |
- +----------------------------------------------+----------------------------------+-------+------------------+-------------+-----------------+----------+-------+--------+------------+-------------+
|neutron-ovn-tempest-ipv6-only-ovs-release |tempest.api (without slow tests) | 1 | Ubuntu Jammy | ovn | ovn | --- | False | False | True | Yes |
| |(only tests related to | | | | | | | | | |
| |Neutron and Nova) | | | | | | | | | |
@@ -139,11 +136,6 @@ Currently we have in that queue jobs like listed below.
| |(only tests related to | | | | | | | | | |
| |Neutron and Nova) | | | | | | | | | |
+----------------------------------------------+----------------------------------+-------+------------------+-------------+-----------------+----------+-------+--------+------------+-------------+
- |neutron-tempest-with-uwsgi-loki |tempest.api (without slow tests) | 1 | Ubuntu Jammy | openvswitch | openvswitch | legacy | False | False | True | No |
- |(non-voting) |tempest.scenario | | | | | | | | | |
- | |(only tests related to | | | | | | | | | |
- | |Neutron and Nova) | | | | | | | | | |
- +----------------------------------------------+----------------------------------+-------+------------------+-------------+-----------------+----------+-------+--------+------------+-------------+
|neutron-ovn-tempest-ipv6-only-ovs-master |tempest.api (without slow tests) | 1 | Ubuntu Jammy | ovn | ovn | --- | False | False | True | Yes |
| |(only tests related to | | | | | | | | | |
| |Neutron and Nova) | | | | | | | | | |
diff --git a/doc/source/install/compute-install-option2-rdo.rst b/doc/source/install/compute-install-option2-rdo.rst
index d7b26398ae..e166368e59 100644
--- a/doc/source/install/compute-install-option2-rdo.rst
+++ b/doc/source/install/compute-install-option2-rdo.rst
@@ -24,7 +24,7 @@ networking infrastructure for instances and handles security groups.
.. end
Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
- provider physical network interface. See :doc:`environment-networking-obs`
+ provider physical network interface. See :doc:`environment-networking-rdo`
for more information.
* In the ``[vxlan]`` section, configure the IP address of the physical
diff --git a/doc/source/install/compute-install-option2-ubuntu.rst b/doc/source/install/compute-install-option2-ubuntu.rst
index d7b26398ae..ef003646a8 100644
--- a/doc/source/install/compute-install-option2-ubuntu.rst
+++ b/doc/source/install/compute-install-option2-ubuntu.rst
@@ -24,8 +24,8 @@ networking infrastructure for instances and handles security groups.
.. end
Replace ``PROVIDER_INTERFACE_NAME`` with the name of the underlying
- provider physical network interface. See :doc:`environment-networking-obs`
- for more information.
+ provider physical network interface.
+ See :doc:`environment-networking-ubuntu` for more information.
* In the ``[vxlan]`` section, configure the IP address of the physical
network interface that handles overlay networks and enable layer-2
diff --git a/doc/source/ovn/migration.rst b/doc/source/ovn/migration.rst
index 7fa733ce4f..2123767882 100644
--- a/doc/source/ovn/migration.rst
+++ b/doc/source/ovn/migration.rst
@@ -49,11 +49,11 @@ Perform the following steps in the overcloud/undercloud
Perform the following steps in the undercloud
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-1. Install python-networking-ovn-migration-tool.
+1. Install openstack-neutron-ovn-migration-tool.
.. code-block:: console
- # yum install python-networking-ovn-migration-tool
+ # yum install openstack-neutron-ovn-migration-tool
2. Create a working directory on the undercloud, and copy the ansible playbooks
@@ -61,7 +61,7 @@ Perform the following steps in the undercloud
$ mkdir ~/ovn_migration
$ cd ~/ovn_migration
- $ cp -rfp /usr/share/ansible/networking-ovn-migration/playbooks .
+ $ cp -rfp /usr/share/ansible/neutron-ovn-migration/playbooks .
3. Create ``~/overcloud-deploy-ovn.sh`` script in your ``$HOME``.
This script must source your stackrc file, and then execute an ``openstack
diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py
index 84d444c659..4f19d6cc73 100644
--- a/neutron/agent/linux/dhcp.py
+++ b/neutron/agent/linux/dhcp.py
@@ -41,6 +41,7 @@ from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.cmd import runtime_checks as checks
+from neutron.common import _constants as common_constants
from neutron.common import utils as common_utils
from neutron.ipam import utils as ipam_utils
from neutron.privileged.agent.linux import dhcp as priv_dhcp
@@ -1841,7 +1842,7 @@ class DeviceManager(object):
if self.conf.force_metadata or self.conf.enable_isolated_metadata:
ip_cidrs.append(constants.METADATA_CIDR)
if netutils.is_ipv6_enabled():
- ip_cidrs.append(constants.METADATA_V6_CIDR)
+ ip_cidrs.append(common_constants.METADATA_V6_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py
index 4d664381f8..9953729016 100644
--- a/neutron/agent/linux/ip_lib.py
+++ b/neutron/agent/linux/ip_lib.py
@@ -102,6 +102,10 @@ class AddressNotReady(exceptions.NeutronException):
"become ready: %(reason)s")
+class DADFailed(AddressNotReady):
+ pass
+
+
InvalidArgument = privileged.InvalidArgument
@@ -592,7 +596,7 @@ class IpAddrCommand(IpDeviceCommandBase):
"""Wait until an address is no longer marked 'tentative' or 'dadfailed'
raises AddressNotReady if times out, address not present on interface
- or DAD fails
+ raises DADFailed if Duplicate Address Detection fails
"""
def is_address_ready():
try:
@@ -604,7 +608,7 @@ class IpAddrCommand(IpDeviceCommandBase):
# Since both 'dadfailed' and 'tentative' will be set if DAD fails,
# check 'dadfailed' first just to be explicit
if addr_info['dadfailed']:
- raise AddressNotReady(
+ raise DADFailed(
address=address, reason=_('Duplicate address detected'))
if addr_info['tentative']:
return False
diff --git a/neutron/agent/metadata/driver.py b/neutron/agent/metadata/driver.py
index b7e69696a0..a4a62444e2 100644
--- a/neutron/agent/metadata/driver.py
+++ b/neutron/agent/metadata/driver.py
@@ -33,6 +33,7 @@ from neutron.agent.l3 import namespaces
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils as linux_utils
+from neutron.common import _constants as common_constants
from neutron.common import coordination
from neutron.common import metadata as comm_meta
from neutron.common import utils as common_utils
@@ -241,9 +242,30 @@ class MetadataDriver(object):
# HAProxy cannot bind() until IPv6 Duplicate Address Detection
# completes. We must wait until the address leaves its 'tentative'
# state.
- ip_lib.IpAddrCommand(
- parent=ip_lib.IPDevice(name=bind_interface, namespace=ns_name)
- ).wait_until_address_ready(address=bind_address_v6)
+ try:
+ ip_lib.IpAddrCommand(
+ parent=ip_lib.IPDevice(name=bind_interface,
+ namespace=ns_name)
+ ).wait_until_address_ready(address=bind_address_v6)
+ except ip_lib.DADFailed as exc:
+ # This failure means that another DHCP agent has already
+ # configured this metadata address, so all requests will
+ # be via that single agent.
+ LOG.info('DAD failed for address %(address)s on interface '
+ '%(interface)s in namespace %(namespace)s on network '
+ '%(network)s, deleting it. Exception: %(exception)s',
+ {'address': bind_address_v6,
+ 'interface': bind_interface,
+ 'namespace': ns_name,
+ 'network': network_id,
+ 'exception': str(exc)})
+ try:
+ ip_lib.delete_ip_address(bind_address_v6, bind_interface,
+ namespace=ns_name)
+ except Exception as exc:
+ # do not re-raise a delete failure, just log
+ LOG.info('Address deletion failure: %s', str(exc))
+ return
pm.enable()
monitor.register(uuid, METADATA_SERVICE_NAME, pm)
cls.monitors[router_id] = pm
@@ -338,6 +360,6 @@ def apply_metadata_nat_rules(router, proxy):
if netutils.is_ipv6_enabled():
for c, r in proxy.metadata_nat_rules(
proxy.metadata_port,
- metadata_address=(constants.METADATA_V6_IP + '/128')):
+ metadata_address=(common_constants.METADATA_V6_CIDR)):
router.iptables_manager.ipv6['nat'].add_rule(c, r)
router.iptables_manager.apply()
diff --git a/neutron/agent/ovn/extensions/qos_hwol.py b/neutron/agent/ovn/extensions/qos_hwol.py
index abe0783e2b..a813418d04 100644
--- a/neutron/agent/ovn/extensions/qos_hwol.py
+++ b/neutron/agent/ovn/extensions/qos_hwol.py
@@ -48,7 +48,7 @@ class OVSInterfaceEvent(row_event.RowEvent):
def __init__(self, ovn_agent):
self.ovn_agent = ovn_agent
- events = (self.ROW_CREATE, self.ROW_DELETE)
+ events = (self.ROW_CREATE, self.ROW_UPDATE, self.ROW_DELETE)
table = 'Interface'
super().__init__(events, table, None)
@@ -58,7 +58,7 @@ class OVSInterfaceEvent(row_event.RowEvent):
return True
def run(self, event, row, old):
- if event == self.ROW_CREATE:
+ if event in (self.ROW_CREATE, self.ROW_UPDATE):
self.ovn_agent.qos_hwol_ext.add_port(
row.external_ids['iface-id'], row.name)
elif event == self.ROW_DELETE:
diff --git a/neutron/cmd/sanity/checks.py b/neutron/cmd/sanity/checks.py
index b111c1c2f4..329e99463a 100644
--- a/neutron/cmd/sanity/checks.py
+++ b/neutron/cmd/sanity/checks.py
@@ -49,10 +49,11 @@ DNSMASQ_VERSION_HOST_ADDR6_LIST = '2.81'
DIRECT_PORT_QOS_MIN_OVS_VERSION = '2.11'
MINIMUM_DIBBLER_VERSION = '1.0.1'
CONNTRACK_GRE_MODULE = 'nf_conntrack_proto_gre'
-OVN_NB_DB_SCHEMA_GATEWAY_CHASSIS = '5.7'
-OVN_NB_DB_SCHEMA_PORT_GROUP = '5.11'
-OVN_NB_DB_SCHEMA_STATELESS_NAT = '5.17'
-OVN_SB_DB_SCHEMA_VIRTUAL_PORT = '2.5'
+OVN_NB_DB_SCHEMA_GATEWAY_CHASSIS = '5.7.0'
+OVN_NB_DB_SCHEMA_PORT_GROUP = '5.11.0'
+OVN_NB_DB_SCHEMA_STATELESS_NAT = '5.17.0'
+OVN_SB_DB_SCHEMA_VIRTUAL_PORT = '2.5.0'
+OVN_LOCALNET_LEARN_FDB = '22.09.0'
class OVNCheckType(enum.Enum):
@@ -63,6 +64,14 @@ class OVNCheckType(enum.Enum):
def _get_ovn_version(check_type):
+ """Retrieves the OVN nbctl, sbctl, NS schema or SB schema version
+
+ :param check_type: ``OVNCheckType`` enum element. This method can return
+ the nbctl version, the sbctl version, the NB schema
+ version or the SB schema version.
+ :return: (tuple) 3 element tuple: (major, minor, revision). (0, 0, 0) by
+ default.
+ """
if check_type in (OVNCheckType.nb_version, OVNCheckType.nb_db_schema):
cmd = ['ovn-nbctl', '--version']
elif check_type in (OVNCheckType.nb_version, OVNCheckType.nb_db_schema):
@@ -78,9 +87,9 @@ def _get_ovn_version(check_type):
else:
matched_line = re.search(r"DB Schema.*", out)
- matched_version = re.search(r"(\d+\.\d+)", matched_line.group(0))
+ matched_version = re.search(r"(\d+\.\d+\.\d+)", matched_line.group(0))
return versionutils.convert_version_to_tuple(matched_version.group(1) if
- matched_version else '0.0')
+ matched_version else '0.0.0')
def ovs_vxlan_supported(from_ip='192.0.2.1', to_ip='192.0.2.2'):
@@ -657,3 +666,16 @@ def ovn_nb_db_schema_gateway_chassis_supported():
'Exception: %s', e)
return False
return True
+
+
+def ovn_localnet_learn_fdb_support():
+ try:
+ ver = _get_ovn_version(OVNCheckType.nb_version)
+ minver = versionutils.convert_version_to_tuple(OVN_LOCALNET_LEARN_FDB)
+ if ver < minver:
+ return False
+ except (OSError, RuntimeError, ValueError) as e:
+ LOG.debug('Exception while checking OVN version. '
+ 'Exception: %s', e)
+ return False
+ return True
diff --git a/neutron/cmd/sanity_check.py b/neutron/cmd/sanity_check.py
index 903b49db0d..8bd4318305 100644
--- a/neutron/cmd/sanity_check.py
+++ b/neutron/cmd/sanity_check.py
@@ -347,6 +347,14 @@ def check_ovn_nb_db_schema_gateway_chassis():
return result
+def check_ovn_localnet_learn_fdb_support():
+ result = checks.ovn_localnet_learn_fdb_support()
+ if not result:
+ LOG.warning('OVN does not support localnet_learn_fdb option. '
+ 'This support was added in OVN 22.09.')
+ return result
+
+
# Define CLI opts to test specific features, with a callback for the test
OPTS = [
BoolOptCallback('ovs_vxlan', check_ovs_vxlan, default=False,
@@ -431,6 +439,10 @@ OPTS = [
check_ovn_nb_db_schema_gateway_chassis,
help=_('Check OVN NB DB schema support Gateway_Chassis'),
default=False),
+ BoolOptCallback('ovn_localnet_learn_fdb_support',
+ check_ovn_localnet_learn_fdb_support,
+ help=_('Check OVN supports localnet_learn_fdb option'),
+ default=False),
]
diff --git a/neutron/common/_constants.py b/neutron/common/_constants.py
index 55fc718c49..40745ecc86 100644
--- a/neutron/common/_constants.py
+++ b/neutron/common/_constants.py
@@ -86,3 +86,9 @@ TRAIT_NETWORK_TUNNEL = 'CUSTOM_NETWORK_TUNNEL_PROVIDER'
# The lowest binding index for L3 agents and DHCP agents.
LOWEST_AGENT_BINDING_INDEX = 1
+
+# Neutron-lib defines this with a /64 but it should be /128
+METADATA_V6_CIDR = constants.METADATA_V6_IP + '/128'
+
+# TODO(haleyb): move this constant to neutron_lib.constants
+IPV4_MIN_MTU = 68
diff --git a/neutron/common/ovn/constants.py b/neutron/common/ovn/constants.py
index 2a4afbf4a9..d537e2b801 100644
--- a/neutron/common/ovn/constants.py
+++ b/neutron/common/ovn/constants.py
@@ -32,8 +32,8 @@ OVN_ROUTER_NAME_EXT_ID_KEY = 'neutron:router_name'
OVN_ROUTER_ID_EXT_ID_KEY = 'neutron:router_id'
OVN_AZ_HINTS_EXT_ID_KEY = 'neutron:availability_zone_hints'
OVN_ROUTER_IS_EXT_GW = 'neutron:is_ext_gw'
-OVN_GW_PORT_EXT_ID_KEY = 'neutron:gw_port_id'
-OVN_GW_NETWORK_EXT_ID_KEY = 'neutron:gw_network_id'
+OVN_GW_PORT_EXT_ID_KEY = 'neutron:gw_port_id' # DEPRECATED, DON'T USE
+OVN_GW_NETWORK_EXT_ID_KEY = 'neutron:gw_network_id' # DEPRECATED, DON'T USE
OVN_SUBNET_EXT_ID_KEY = 'neutron:subnet_id'
OVN_SUBNET_EXT_IDS_KEY = 'neutron:subnet_ids'
OVN_SUBNET_POOL_EXT_ADDR_SCOPE4_KEY = 'neutron:subnet_pool_addr_scope4'
@@ -391,6 +391,7 @@ LSP_OPTIONS_REQUESTED_CHASSIS_KEY = 'requested-chassis'
LSP_OPTIONS_MCAST_FLOOD_REPORTS = 'mcast_flood_reports'
LSP_OPTIONS_MCAST_FLOOD = 'mcast_flood'
LSP_OPTIONS_QOS_MIN_RATE = 'qos_min_rate'
+LSP_OPTIONS_LOCALNET_LEARN_FDB = 'localnet_learn_fdb'
LRP_OPTIONS_RESIDE_REDIR_CH = 'reside-on-redirect-chassis'
LRP_OPTIONS_REDIRECT_TYPE = 'redirect-type'
@@ -434,4 +435,5 @@ OVN_SUPPORTED_VNIC_TYPES = [portbindings.VNIC_NORMAL,
portbindings.VNIC_VHOST_VDPA,
portbindings.VNIC_REMOTE_MANAGED,
portbindings.VNIC_BAREMETAL,
+ portbindings.VNIC_VIRTIO_FORWARDER,
]
diff --git a/neutron/conf/agent/database/agentschedulers_db.py b/neutron/conf/agent/database/agentschedulers_db.py
index f58e0b2771..a46a18a262 100644
--- a/neutron/conf/agent/database/agentschedulers_db.py
+++ b/neutron/conf/agent/database/agentschedulers_db.py
@@ -33,7 +33,9 @@ AGENTS_SCHEDULER_OPTS = [
'network. If this number is greater than 1, the '
'scheduler automatically assigns multiple DHCP agents '
'for a given tenant network, providing high '
- 'availability for the DHCP service.')),
+ 'availability for the DHCP service. However this does '
+ 'not provide high availability for the IPv6 metadata '
+ 'service in isolated networks.')),
cfg.BoolOpt('enable_services_on_agents_with_admin_state_down',
default=False,
help=_('Enable services on an agent with admin_state_up '
diff --git a/neutron/conf/plugins/ml2/drivers/ovn/ovn_conf.py b/neutron/conf/plugins/ml2/drivers/ovn/ovn_conf.py
index be75c405a8..0706477c4f 100644
--- a/neutron/conf/plugins/ml2/drivers/ovn/ovn_conf.py
+++ b/neutron/conf/plugins/ml2/drivers/ovn/ovn_conf.py
@@ -217,6 +217,13 @@ ovn_opts = [
'order to disable the ``stateful-security-group`` API '
'extension as ``allow-stateless`` keyword is only '
'supported by OVN >= 21.06.')),
+ cfg.BoolOpt('localnet_learn_fdb',
+ default=False,
+ help=_('If enabled it will allow localnet ports to learn MAC '
+ 'addresses and store them in FDB SB table. This avoids '
+ 'flooding for traffic towards unknown IPs when port '
+ 'security is disabled. It requires OVN 22.09 or '
+ 'newer.')),
]
@@ -330,3 +337,7 @@ def is_igmp_snooping_enabled():
def is_ovn_dhcp_disabled_for_baremetal():
return cfg.CONF.ovn.disable_ovn_dhcp_for_baremetal_ports
+
+
+def is_learn_fdb_enabled():
+ return cfg.CONF.ovn.localnet_learn_fdb
diff --git a/neutron/conf/policies/availability_zone.py b/neutron/conf/policies/availability_zone.py
index 9f265dfab4..bb94f17f64 100644
--- a/neutron/conf/policies/availability_zone.py
+++ b/neutron/conf/policies/availability_zone.py
@@ -14,8 +14,6 @@ from neutron_lib import policy as neutron_policy
from oslo_log import versionutils
from oslo_policy import policy
-from neutron.conf.policies import base
-
DEPRECATION_REASON = (
"The Availability Zone API now supports project scope and default roles.")
@@ -23,7 +21,11 @@ DEPRECATION_REASON = (
rules = [
policy.DocumentedRuleDefault(
name='get_availability_zone',
- check_str=base.ADMIN,
+ # NOTE: it can't be ADMIN_OR_PROJECT_READER constant from the base
+ # module because that is using "project_id" in the check string and the
+ # availability_zone resource don't belongs to any project thus such
+ # check string would fail enforcement.
+ check_str='role:reader',
description='List availability zones',
operations=[
{
diff --git a/neutron/conf/policies/port.py b/neutron/conf/policies/port.py
index faab5c8f62..5783f080bb 100644
--- a/neutron/conf/policies/port.py
+++ b/neutron/conf/policies/port.py
@@ -276,6 +276,7 @@ rules = [
check_str=neutron_policy.policy_or(
base.ADMIN,
neutron_policy.RULE_ADVSVC,
+ base.RULE_NET_OWNER,
base.PROJECT_READER
),
scope_types=['project'],
diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py
index ca430b2cd5..d80e7b3fc7 100644
--- a/neutron/db/db_base_plugin_v2.py
+++ b/neutron/db/db_base_plugin_v2.py
@@ -58,6 +58,7 @@ from neutron.db import ipam_pluggable_backend
from neutron.db import models_v2
from neutron.db import rbac_db_mixin as rbac_mixin
from neutron.db import standardattrdescription_db as stattr_db
+from neutron.exceptions import mtu as mtu_exc
from neutron.extensions import subnetpool_prefix_ops
from neutron import ipam
from neutron.ipam import exceptions as ipam_exc
@@ -75,7 +76,7 @@ LOG = logging.getLogger(__name__)
def _ensure_subnet_not_used(context, subnet_id):
- models_v2.Subnet.lock_register(
+ models_v2.Subnet.write_lock_register(
context, exc.SubnetInUse(subnet_id=subnet_id), id=subnet_id)
try:
registry.publish(
@@ -466,6 +467,10 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
# context.
getattr(network, 'rbac_entries')
+ # validate 'mtu' parameter
+ if 'mtu' in n:
+ self._validate_change_network_mtu(context, id, n['mtu'])
+
# The filter call removes attributes from the body received from
# the API that are logically tied to network resources but are
# stored in other database tables handled by extensions
@@ -473,6 +478,28 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
ndb_utils.filter_non_model_columns(n, models_v2.Network))
return self._make_network_dict(network, context=context)
+ def _validate_change_network_mtu(self, context, id, mtu):
+ # can support either ip_version
+ if mtu >= constants.IPV6_MIN_MTU:
+ return
+
+ subnets = self._get_subnets_by_network(context, id)
+ if len(subnets) == 0:
+ return
+
+ # at least one subnet present, if below IPv4 minimum we fail early
+ if mtu < _constants.IPV4_MIN_MTU:
+ raise mtu_exc.NetworkMTUSubnetConflict(
+ net_id=id, mtu=_constants.IPV4_MIN_MTU)
+
+ # We do not need to check IPv4 subnets as they will have been
+ # caught by above IPV4_MIN_MTU check
+ for subnet in subnets:
+ if (subnet.ip_version == constants.IP_VERSION_6 and
+ mtu < constants.IPV6_MIN_MTU):
+ raise mtu_exc.NetworkMTUSubnetConflict(
+ net_id=id, mtu=constants.IPV6_MIN_MTU)
+
def _ensure_network_not_in_use(self, context, net_id):
non_auto_ports = context.session.query(
models_v2.Port.id).filter_by(network_id=net_id).filter(
@@ -715,6 +742,23 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
"Prefix Delegation.")
raise exc.BadRequest(resource='subnets', msg=reason)
+ def _validate_subnet_network_mtu(self, network, subnet):
+ """Validates that network mtu is correct for subnet association"""
+ mtu = network.mtu
+ if not mtu or mtu >= constants.IPV6_MIN_MTU:
+ return
+
+ # if below IPv4 minimum we fail early
+ if mtu < _constants.IPV4_MIN_MTU:
+ raise mtu_exc.NetworkMTUSubnetConflict(net_id=network.id, mtu=mtu)
+
+ # We do not need to check IPv4 subnets as they will have been
+ # caught by above IPV4_MIN_MTU check
+ ip_version = subnet.get('ip_version')
+ if (ip_version == constants.IP_VERSION_6 and
+ mtu < constants.IPV6_MIN_MTU):
+ raise mtu_exc.NetworkMTUSubnetConflict(net_id=network.id, mtu=mtu)
+
def _update_router_gw_ports(self, context, network, subnet):
l3plugin = directory.get_plugin(plugin_constants.L3)
if l3plugin:
@@ -876,6 +920,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
with db_api.CONTEXT_WRITER.using(context):
network = self._get_network(context,
subnet['subnet']['network_id'])
+ self._validate_subnet_network_mtu(network, s)
subnet, ipam_subnet = self.ipam.allocate_subnet(context,
network,
subnet['subnet'],
diff --git a/neutron/db/dvr_mac_db.py b/neutron/db/dvr_mac_db.py
index 68ec42ae59..4f69ffd995 100644
--- a/neutron/db/dvr_mac_db.py
+++ b/neutron/db/dvr_mac_db.py
@@ -157,6 +157,7 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase):
@log_helpers.log_method_call
@db_api.retry_if_session_inactive()
+ @db_api.CONTEXT_READER
def get_ports_on_host_by_subnet(self, context, host, subnet):
"""Returns DVR serviced ports on a given subnet in the input host
diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py
index e734996900..9c245ae689 100644
--- a/neutron/db/ipam_backend_mixin.py
+++ b/neutron/db/ipam_backend_mixin.py
@@ -683,7 +683,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
msg = ('This subnet is being modified by another concurrent '
'operation')
for subnet in subnets:
- subnet.lock_register(
+ subnet.read_lock_register(
context, exc.SubnetInUse(subnet_id=subnet.id, reason=msg),
id=subnet.id)
subnet_dicts = [self._make_subnet_dict(subnet, context=context)
diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py
index 41a2464419..ac07371185 100644
--- a/neutron/db/l3_agentschedulers_db.py
+++ b/neutron/db/l3_agentschedulers_db.py
@@ -397,12 +397,12 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
return {'agents': [self._make_agent_dict(agent)
for agent in agents]}
- def get_routers_l3_agents_count(self, context):
+ def get_routers_l3_agents_count(self, context, ha=False, less_than=0):
"""Return a map between routers and agent counts for all routers."""
# TODO(sshank): This portion needs Router OVO integration when it is
# merged.
l3_model_list = l3_objs.RouterExtraAttributes.get_router_agents_count(
- context)
+ context, ha=ha, less_than=less_than)
return [(self._make_router_dict(router_model),
agent_count if agent_count else 0)
for router_model, agent_count in l3_model_list]
diff --git a/neutron/db/migration/alembic_migrations/env.py b/neutron/db/migration/alembic_migrations/env.py
index dee4f9c474..921e1c5671 100644
--- a/neutron/db/migration/alembic_migrations/env.py
+++ b/neutron/db/migration/alembic_migrations/env.py
@@ -13,14 +13,16 @@
# under the License.
from alembic import context
+from neutron_lib import context as n_context
+from neutron_lib.db import api as db_api
from neutron_lib.db import model_base
from oslo_config import cfg
+from oslo_db import options as db_options
import sqlalchemy as sa
from sqlalchemy import event # noqa
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration import autogen
-from neutron.db.migration.connection import DBConnection
from neutron.db.migration.models import head # noqa
try:
@@ -53,6 +55,13 @@ def set_mysql_engine():
model_base.BASEV2.__table_args__['mysql_engine'])
+def setup_conf():
+ conf = cfg.CONF
+ conf.register_opts(db_options.database_opts, 'database')
+ conf.set_override('connection', neutron_config.database.connection,
+ group='database')
+
+
def include_object(object_, name, type_, reflected, compare_to):
if type_ == 'table' and name in external.TABLES:
return False
@@ -101,11 +110,12 @@ def run_migrations_online():
and associate a connection with the context.
"""
+ setup_conf()
set_mysql_engine()
- connection = config.attributes.get('connection')
- with DBConnection(neutron_config.database.connection, connection) as conn:
+ admin_ctx = n_context.get_admin_context()
+ with db_api.CONTEXT_WRITER.using(admin_ctx) as session:
context.configure(
- connection=conn,
+ connection=session.connection(),
target_metadata=target_metadata,
include_object=include_object,
process_revision_directives=autogen.process_revision_directives
diff --git a/neutron/db/migration/alembic_migrations/versions/2023.2/expand/93f394357a27_remove_in_use_on_subnets.py b/neutron/db/migration/alembic_migrations/versions/2023.2/expand/93f394357a27_remove_in_use_on_subnets.py
new file mode 100644
index 0000000000..0e9ceb6071
--- /dev/null
+++ b/neutron/db/migration/alembic_migrations/versions/2023.2/expand/93f394357a27_remove_in_use_on_subnets.py
@@ -0,0 +1,42 @@
+# Copyright 2023 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from alembic import op
+import sqlalchemy as sa
+
+
+"""remove in_use from subnet
+
+Revision ID: 93f394357a27
+Revises: fc153938cdc1
+Create Date: 2023-03-07 14:48:15.763633
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '93f394357a27'
+down_revision = 'fc153938cdc1'
+
+
+def upgrade():
+ op.drop_column('subnets', 'in_use')
+
+
+def expand_drop_exceptions():
+ """Support dropping 'in_use' column in table 'subnets'"""
+
+ return {
+ sa.Column: ['subnets.in_use']
+ }
diff --git a/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD b/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD
index 961bd474e5..88323e719e 100644
--- a/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD
+++ b/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD
@@ -1 +1 @@
-fc153938cdc1
+93f394357a27
diff --git a/neutron/db/models_v2.py b/neutron/db/models_v2.py
index 9a03b46763..8ab9707b17 100644
--- a/neutron/db/models_v2.py
+++ b/neutron/db/models_v2.py
@@ -33,25 +33,47 @@ from neutron.db import rbac_db_models
class HasInUse(object):
"""NeutronBaseV2 mixin, to add the flag "in_use" to a DB model.
- The content of this flag (boolean) parameter is not relevant. The goal of
- this field is to be used in a write transaction to mark a DB register as
- "in_use". Writing any value on this DB parameter will lock the container
- register. At the end of the DB transaction, the DB engine will check if
- this register was modified or deleted. In such case, the transaction will
- fail and won't be committed.
-
- "lock_register" is the method to write the register "in_use" column.
- Because the lifespan of this DB lock is the DB transaction, there isn't an
- unlock method. The lock will finish once the transaction ends.
+ The goal of this class is to allow users lock specific database rows with
+ a shared or exclusive lock (without necessarily introducing a change in
+ the table itself). Having these locks allows the DB engine to prevent
+ concurrent modifications (e.g. the deletion of a resource while we are
+ currently adding a new dependency on the resource).
+
+ "read_lock_register" takes a shared DB lock on the row specified by the
+ filters. The lock is automatically released once the transaction ends.
+ You can have any number of parallel read locks on the same DB row. But
+ you can not have any write lock in parallel.
+
+ "write_lock_register" takes an exclusive DB lock on the row specified by
+ the filters. The lock is automatically released on transaction commit.
+ You may only have one write lock on each row at a time. It therefor
+ blocks all other read and write locks to this row.
"""
- in_use = sa.Column(sa.Boolean(), nullable=False,
- server_default=sql.false(), default=False)
@classmethod
- def lock_register(cls, context, exception, **filters):
+ def write_lock_register(cls, context, exception, **filters):
+ # we use `with_for_update()` to include `FOR UPDATE` in the sql
+ # statement.
+ # we need to set `enable_eagerloads(False)` so that we do not try to
+ # load attached resources (e.g. standardattributes) as this breaks the
+ # `FOR UPDATE` statement.
num_reg = context.session.query(
- cls).filter_by(**filters).update({'in_use': True})
- if num_reg != 1:
+ cls).filter_by(**filters).enable_eagerloads(
+ False).with_for_update().first()
+ if num_reg is None:
+ raise exception
+
+ @classmethod
+ def read_lock_register(cls, context, exception, **filters):
+ # we use `with_for_update(read=True)` to include `LOCK IN SHARE MODE`
+ # in the sql statement.
+ # we need to set `enable_eagerloads(False)` so that we do not try to
+ # load attached resources (e.g. standardattributes) as this breaks the
+ # `LOCK IN SHARE MODE` statement.
+ num_reg = context.session.query(
+ cls).filter_by(**filters).enable_eagerloads(
+ False).with_for_update(read=True).first()
+ if num_reg is None:
raise exception
diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py
index f65b1d8fc3..57cf316ee0 100644
--- a/neutron/db/securitygroups_db.py
+++ b/neutron/db/securitygroups_db.py
@@ -251,6 +251,12 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
if sg['name'] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
+ # Check if there are rules with remote_group_id ponting to
+ # the security_group to be deleted
+ rules_ids_as_remote = self._get_security_group_rules_by_remote(
+ context=context, remote_id=id,
+ )
+
self._registry_publish(resources.SECURITY_GROUP,
events.BEFORE_DELETE,
exc_cls=ext_sg.SecurityGroupInUse, id=id,
@@ -279,6 +285,20 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
context, resource_id=id, states=(sec_group,),
metadata={'security_group_rule_ids': sgr_ids,
'name': sg['name']}))
+ for rule in rules_ids_as_remote:
+ registry.publish(
+ resources.SECURITY_GROUP_RULE,
+ events.AFTER_DELETE,
+ self,
+ payload=events.DBEventPayload(
+ context,
+ resource_id=rule['id'],
+ metadata={'security_group_id': rule['security_group_id'],
+ 'remote_group_id': rule['remote_group_id'],
+ 'rule': rule
+ }
+ )
+ )
@db_api.retry_if_session_inactive()
def update_security_group(self, context, id, security_group):
@@ -365,6 +385,23 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
self._make_security_group_binding_dict,
filters=filters, fields=fields)
+ def _get_security_group_rules_by_remote(self, context, remote_id):
+ return model_query.get_collection(
+ context, sg_models.SecurityGroupRule,
+ self._make_security_group_rule_dict,
+ filters={'remote_group_id': [remote_id]},
+ fields=['id',
+ 'remote_group_id',
+ 'security_group_id',
+ 'direction',
+ 'ethertype',
+ 'protocol',
+ 'port_range_min',
+ 'port_range_max',
+ 'normalized_cidr'
+ ]
+ )
+
@db_api.retry_if_session_inactive()
def _delete_port_security_group_bindings(self, context, port_id):
with db_api.CONTEXT_WRITER.using(context):
diff --git a/neutron/exceptions/mtu.py b/neutron/exceptions/mtu.py
new file mode 100644
index 0000000000..02672d8059
--- /dev/null
+++ b/neutron/exceptions/mtu.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2023 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron_lib import exceptions as e
+
+from neutron._i18n import _
+
+
+# TODO(haleyb): Move to n-lib
+class NetworkMTUSubnetConflict(e.Conflict):
+ """A conflict error due to MTU being invalid on said network.
+
+ :param net_id: The UUID of the network
+ :param mtu: The minimum MTU required by a subnet for the network
+ """
+ message = _("MTU of %(net_id)s is not valid, subnet requires a "
+ "minimum of %(mtu)s")
diff --git a/neutron/objects/db/api.py b/neutron/objects/db/api.py
index 482728c314..8ece717d99 100644
--- a/neutron/objects/db/api.py
+++ b/neutron/objects/db/api.py
@@ -30,19 +30,19 @@ def _get_filter_query(obj_cls, context, query_field=None, query_limit=None,
return query
-@db_api.CONTEXT_READER
def get_object(obj_cls, context, **kwargs):
- return _get_filter_query(obj_cls, context, **kwargs).first()
+ with db_api.CONTEXT_READER.using(context):
+ return _get_filter_query(obj_cls, context, **kwargs).first()
-@db_api.CONTEXT_READER
def count(obj_cls, context, query_field=None, query_limit=None, **kwargs):
- if not query_field and obj_cls.primary_keys:
- query_field = obj_cls.primary_keys[0]
- if query_field in obj_cls.fields_need_translation:
- query_field = obj_cls.fields_need_translation[query_field]
- return _get_filter_query(obj_cls, context, query_field=query_field,
- query_limit=query_limit, **kwargs).count()
+ with db_api.CONTEXT_READER.using(context):
+ if not query_field and obj_cls.primary_keys:
+ query_field = obj_cls.primary_keys[0]
+ if query_field in obj_cls.fields_need_translation:
+ query_field = obj_cls.fields_need_translation[query_field]
+ return _get_filter_query(obj_cls, context, query_field=query_field,
+ query_limit=query_limit, **kwargs).count()
def _kwargs_to_filters(**kwargs):
diff --git a/neutron/objects/router.py b/neutron/objects/router.py
index 0539f80676..f462a1b57c 100644
--- a/neutron/objects/router.py
+++ b/neutron/objects/router.py
@@ -109,7 +109,7 @@ class RouterExtraAttributes(base.NeutronDbObject):
@classmethod
@db_api.CONTEXT_READER
- def get_router_agents_count(cls, context):
+ def get_router_agents_count(cls, context, ha=False, less_than=0):
# TODO(sshank): This is pulled out from l3_agentschedulers_db.py
# until a way to handle joins is figured out.
binding_model = rb_model.RouterL3AgentBinding
@@ -121,9 +121,12 @@ class RouterExtraAttributes(base.NeutronDbObject):
l3_attrs.RouterExtraAttributes.router_id).
join(l3.Router).
group_by(binding_model.router_id).subquery())
-
- query = (context.session.query(l3.Router, sub_query.c.count).
- outerjoin(sub_query))
+ count = func.coalesce(sub_query.c.count, 0)
+ query = (context.session.query(l3.Router, count).
+ outerjoin(sub_query).join(l3_attrs.RouterExtraAttributes).
+ filter(l3_attrs.RouterExtraAttributes.ha == ha))
+ if less_than > 0:
+ query = query.filter(count < less_than)
return list(query)
@@ -160,6 +163,15 @@ class RouterPort(base.NeutronDbObject):
query = query.distinct()
return [r[0] for r in query]
+ @classmethod
+ @db_api.CONTEXT_READER
+ def get_gw_port_ids_by_router_id(cls, context, router_id):
+ query = context.session.query(l3.RouterPort)
+ query = query.filter(
+ l3.RouterPort.router_id == router_id,
+ l3.RouterPort.port_type == n_const.DEVICE_OWNER_ROUTER_GW)
+ return [rp.port_id for rp in query]
+
@base.NeutronObjectRegistry.register
class DVRMacAddress(base.NeutronDbObject):
diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py
index ef16d06227..c92e942161 100644
--- a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py
+++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py
@@ -214,6 +214,10 @@ class OVNMechanismDriver(api.MechanismDriver):
portbindings.CAP_PORT_FILTER: self.sg_enabled,
portbindings.VIF_DETAILS_CONNECTIVITY: self.connectivity,
},
+ portbindings.VIF_TYPE_AGILIO_OVS: {
+ portbindings.CAP_PORT_FILTER: self.sg_enabled,
+ portbindings.VIF_DETAILS_CONNECTIVITY: self.connectivity,
+ },
portbindings.VIF_TYPE_VHOST_USER: {
portbindings.CAP_PORT_FILTER: False,
portbindings.VHOST_USER_MODE:
@@ -265,9 +269,6 @@ class OVNMechanismDriver(api.MechanismDriver):
registry.subscribe(self._create_security_group,
resources.SECURITY_GROUP,
events.AFTER_CREATE)
- registry.subscribe(self._delete_security_group_precommit,
- resources.SECURITY_GROUP,
- events.PRECOMMIT_DELETE)
registry.subscribe(self._delete_security_group,
resources.SECURITY_GROUP,
events.AFTER_DELETE)
@@ -280,6 +281,9 @@ class OVNMechanismDriver(api.MechanismDriver):
registry.subscribe(self._process_sg_rule_notification,
resources.SECURITY_GROUP_RULE,
events.BEFORE_DELETE)
+ registry.subscribe(self._process_sg_rule_after_del_notification,
+ resources.SECURITY_GROUP_RULE,
+ events.AFTER_DELETE)
def _clean_hash_ring(self, *args, **kwargs):
admin_context = n_context.get_admin_context()
@@ -396,14 +400,6 @@ class OVNMechanismDriver(api.MechanismDriver):
self._ovn_client.create_security_group(context,
security_group)
- def _delete_security_group_precommit(self, resource, event, trigger,
- payload):
- context = n_context.get_admin_context()
- security_group_id = payload.resource_id
- for sg_rule in self._plugin.get_security_group_rules(
- context, filters={'remote_group_id': [security_group_id]}):
- self._ovn_client.delete_security_group_rule(context, sg_rule)
-
def _delete_security_group(self, resource, event, trigger, payload):
context = payload.context
security_group_id = payload.resource_id
@@ -461,6 +457,12 @@ class OVNMechanismDriver(api.MechanismDriver):
context,
sg_rule)
+ def _process_sg_rule_after_del_notification(
+ self, resource, event, trigger, payload):
+ context = payload.context
+ sg_rule = payload.metadata['rule']
+ self._ovn_client.delete_security_group_rule(context, sg_rule)
+
def _sg_has_rules_with_same_normalized_cidr(self, sg_rule):
compare_keys = [
'ethertype', 'direction', 'protocol',
@@ -1025,6 +1027,17 @@ class OVNMechanismDriver(api.MechanismDriver):
vif_details = dict(self.vif_details[vif_type])
vif_details[portbindings.VHOST_USER_SOCKET] = (
vhost_user_socket)
+ elif (vnic_type == portbindings.VNIC_VIRTIO_FORWARDER):
+ vhost_user_socket = ovn_utils.ovn_vhu_sockpath(
+ ovn_conf.get_ovn_vhost_sock_dir(), port['id'])
+ vif_type = portbindings.VIF_TYPE_AGILIO_OVS
+ port[portbindings.VIF_DETAILS].update({
+ portbindings.VHOST_USER_SOCKET: vhost_user_socket})
+ vif_details = dict(self.vif_details[vif_type])
+ vif_details[portbindings.VHOST_USER_SOCKET] = (
+ vhost_user_socket)
+ vif_details[portbindings.VHOST_USER_MODE] = (
+ portbindings.VHOST_USER_MODE_CLIENT)
else:
vif_type = portbindings.VIF_TYPE_OVS
vif_details = self.vif_details[vif_type]
diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py
index 10cdc3f031..b3a7bdca80 100644
--- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py
+++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py
@@ -836,19 +836,8 @@ class DeleteLRouterExtGwCommand(command.BaseCommand):
lrouter.delvalue('nat', nat)
nat.delete()
- lrouter_ext_ids = getattr(lrouter, 'external_ids', {})
- gw_port_id = lrouter_ext_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY)
- if not gw_port_id:
- return
-
- try:
- lrouter_port = idlutils.row_by_value(
- self.api.idl, 'Logical_Router_Port', 'name',
- utils.ovn_lrouter_port_name(gw_port_id))
- except idlutils.RowNotFound:
- return
-
- lrouter.delvalue('ports', lrouter_port)
+ for gw_port in self.api.get_lrouter_gw_ports(lrouter.name):
+ lrouter.delvalue('ports', gw_port)
class SetLSwitchPortToVirtualTypeCommand(command.BaseCommand):
diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py
index 45da53c93d..1b663c5a64 100644
--- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py
+++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py
@@ -425,13 +425,6 @@ class OVNClientQosExtension(object):
def disassociate_floatingip(self, txn, floatingip):
self.delete_floatingip(txn, floatingip)
- def _delete_gateway_ip_qos_rules(self, txn, router_id, network_id):
- if network_id:
- lswitch_name = utils.ovn_name(network_id)
- txn.add(self.nb_idl.qos_del_ext_ids(
- lswitch_name,
- {ovn_const.OVN_ROUTER_ID_EXT_ID_KEY: router_id}))
-
def create_router(self, txn, router):
self.update_router(txn, router)
@@ -465,10 +458,6 @@ class OVNClientQosExtension(object):
# Delete, if exists, the QoS rule in this direction.
txn.add(self.nb_idl.qos_del(**ovn_rule, if_exists=True))
- def delete_router(self, txn, router):
- self._delete_gateway_ip_qos_rules(txn, router['id'],
- router['gw_network_id'])
-
def update_policy(self, context, policy):
updated_port_ids = set([])
updated_fip_ids = set([])
diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py
index be5e7db367..8c74661c15 100644
--- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py
+++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py
@@ -785,6 +785,22 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend):
result = lrp.execute(check_error=True)
return result[0] if result else None
+ def get_lrouter_gw_ports(self, lrouter_name):
+ lr = self.get_lrouter(lrouter_name)
+ gw_ports = []
+ for lrp in getattr(lr, 'ports', []):
+ lrp_ext_ids = getattr(lrp, 'external_ids', {})
+ if (ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY not in lrp_ext_ids or
+ utils.ovn_name(lrp_ext_ids[
+ ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY]) != lr.name):
+ continue
+ lrp_ha_cfg = (getattr(lrp, 'gateway_chassis', None) or
+ getattr(lrp, 'options', {}).get(
+ ovn_const.OVN_GATEWAY_CHASSIS_KEY))
+ if lrp_ha_cfg:
+ gw_ports.append(lrp)
+ return gw_ports
+
def delete_lrouter_ext_gw(self, lrouter_name, if_exists=True):
return cmd.DeleteLRouterExtGwCommand(self, lrouter_name, if_exists)
diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py
index 29a3848b28..0f3deed9a1 100644
--- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py
+++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py
@@ -650,6 +650,36 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase):
raise periodics.NeverAgain()
+ # A static spacing value is used here, but this method will only run
+ # once per lock due to the use of periodics.NeverAgain().
+ @periodics.periodic(spacing=600, run_immediately=True)
+ def check_localnet_port_has_learn_fdb(self):
+ if not self.has_lock:
+ return
+
+ ports = self._nb_idl.db_find_rows(
+ "Logical_Switch_Port", ("type", "=", ovn_const.LSP_TYPE_LOCALNET)
+ ).execute(check_error=True)
+
+ with self._nb_idl.transaction(check_error=True) as txn:
+ for port in ports:
+ if ovn_conf.is_learn_fdb_enabled():
+ fdb_opt = port.options.get(
+ ovn_const.LSP_OPTIONS_LOCALNET_LEARN_FDB)
+ if not fdb_opt or fdb_opt == 'false':
+ txn.add(self._nb_idl.db_set(
+ 'Logical_Switch_Port', port.name,
+ ('options',
+ {ovn_const.LSP_OPTIONS_LOCALNET_LEARN_FDB: 'true'}
+ )))
+ elif port.options.get(
+ ovn_const.LSP_OPTIONS_LOCALNET_LEARN_FDB) == 'true':
+ txn.add(self._nb_idl.db_set(
+ 'Logical_Switch_Port', port.name,
+ ('options',
+ {ovn_const.LSP_OPTIONS_LOCALNET_LEARN_FDB: 'false'})))
+ raise periodics.NeverAgain()
+
# TODO(lucasagomes): Remove this in the Z cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@@ -775,30 +805,31 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase):
txn.add(cmd)
raise periodics.NeverAgain()
- # TODO(ralonsoh): Remove this in the Z+3 cycle. This method adds the
- # "external_ids:OVN_GW_NETWORK_EXT_ID_KEY" to each router that has
- # a gateway (that means, that has "external_ids:OVN_GW_PORT_EXT_ID_KEY").
+ # TODO(fnordahl): Remove this in the B+3 cycle. This method removes the
+ # now redundant "external_ids:OVN_GW_NETWORK_EXT_ID_KEY" and
+ # "external_ids:OVN_GW_PORT_EXT_ID_KEY" from to each router.
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
- def update_logical_router_with_gateway_network_id(self):
- """Update all OVN logical router registers with the GW network ID"""
+ def remove_gw_ext_ids_from_logical_router(self):
+ """Remove `gw_port_id` and `gw_network_id` external_ids from LRs"""
if not self.has_lock:
return
cmds = []
- context = n_context.get_admin_context()
for lr in self._nb_idl.lr_list().execute(check_error=True):
- gw_port = lr.external_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY)
- gw_net = lr.external_ids.get(ovn_const.OVN_GW_NETWORK_EXT_ID_KEY)
- if not gw_port or (gw_port and gw_net):
- # This router does not have a gateway network assigned yet or
- # it has a gateway port and its corresponding network.
+ if (ovn_const.OVN_GW_PORT_EXT_ID_KEY not in lr.external_ids and
+ ovn_const.OVN_GW_NETWORK_EXT_ID_KEY not in
+ lr.external_ids):
+ # This router have none of the deprecated external_ids.
continue
- port = self._ovn_client._plugin.get_port(context, gw_port)
- external_ids = {
- ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: port['network_id']}
+ external_ids = lr.external_ids.copy()
+ for k in (ovn_const.OVN_GW_PORT_EXT_ID_KEY,
+ ovn_const.OVN_GW_NETWORK_EXT_ID_KEY):
+ if k in external_ids:
+ del(external_ids[k])
+
cmds.append(self._nb_idl.db_set(
'Logical_Router', lr.uuid, ('external_ids', external_ids)))
diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py
index f5607e2849..11650493ef 100644
--- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py
+++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py
@@ -1329,20 +1329,17 @@ class OVNClient(object):
'device_owner': [const.DEVICE_OWNER_ROUTER_GW],
'device_id': [router_id]})
- def _get_router_ports(self, context, router_id, get_gw_port=False):
+ def _get_router_ports(self, context, router_id):
# _get_router() will raise a RouterNotFound error if there's no router
# with the router_id
router_db = self._l3_plugin._get_router(context, router_id)
- if get_gw_port:
- return [p.port for p in router_db.attached_ports]
- else:
- # When the existing deployment is migrated to OVN
- # we may need to consider other port types - DVR_INTERFACE/HA_INTF.
- return [p.port for p in router_db.attached_ports
- if p.port_type in [const.DEVICE_OWNER_ROUTER_INTF,
- const.DEVICE_OWNER_DVR_INTERFACE,
- const.DEVICE_OWNER_HA_REPLICATED_INT,
- const.DEVICE_OWNER_ROUTER_HA_INTF]]
+ # When the existing deployment is migrated to OVN
+ # we may need to consider other port types - DVR_INTERFACE/HA_INTF.
+ return [p.port for p in router_db.attached_ports
+ if p.port_type in [const.DEVICE_OWNER_ROUTER_INTF,
+ const.DEVICE_OWNER_DVR_INTERFACE,
+ const.DEVICE_OWNER_HA_REPLICATED_INT,
+ const.DEVICE_OWNER_ROUTER_HA_INTF]]
def _get_v4_network_for_router_port(self, context, port):
cidr = None
@@ -1366,18 +1363,13 @@ class OVNClient(object):
return networks
def _gen_router_ext_ids(self, router):
- gw_net_id = (router.get('external_gateway_info') or
- {}).get('network_id') or ''
return {
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:
router.get('name', 'no_router_name'),
- ovn_const.OVN_GW_PORT_EXT_ID_KEY:
- router.get('gw_port_id') or '',
ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number(
router, ovn_const.TYPE_ROUTERS)),
ovn_const.OVN_AZ_HINTS_EXT_ID_KEY:
','.join(common_utils.get_az_hints(router)),
- ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: gw_net_id,
}
def create_router(self, context, router, add_external_gateway=True):
@@ -1498,13 +1490,8 @@ class OVNClient(object):
def delete_router(self, context, router_id):
"""Delete a logical router."""
lrouter_name = utils.ovn_name(router_id)
- ovn_router = self._nb_idl.get_lrouter(lrouter_name)
- gw_network_id = ovn_router.external_ids.get(
- ovn_const.OVN_GW_NETWORK_EXT_ID_KEY) if ovn_router else None
- router_dict = {'id': router_id, 'gw_network_id': gw_network_id}
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(self._nb_idl.delete_lrouter(lrouter_name))
- self._qos_driver.delete_router(txn, router_dict)
db_rev.delete_revision(context, router_id, ovn_const.TYPE_ROUTERS)
def get_candidates_for_scheduling(self, physnet, cms=None,
@@ -1914,9 +1901,12 @@ class OVNClient(object):
def create_provnet_port(self, network_id, segment, txn=None):
tag = segment.get(segment_def.SEGMENTATION_ID, [])
physnet = segment.get(segment_def.PHYSICAL_NETWORK)
+ fdb_enabled = ('true' if ovn_conf.is_learn_fdb_enabled()
+ else 'false')
options = {'network_name': physnet,
ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true',
- ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false'}
+ ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false',
+ ovn_const.LSP_OPTIONS_LOCALNET_LEARN_FDB: fdb_enabled}
cmd = self._nb_idl.create_lswitch_port(
lport_name=utils.ovn_provnet_port_name(segment['id']),
lswitch_name=utils.ovn_name(network_id),
@@ -2336,7 +2326,7 @@ class OVNClient(object):
mport_updated = False
if subnet['ip_version'] == const.IP_VERSION_4:
mport_updated = self.update_metadata_port(
- context, network['id'], subnet=subnet)
+ context, network, subnet=subnet)
if subnet['ip_version'] == const.IP_VERSION_6 or not mport_updated:
# NOTE(ralonsoh): if IPv4 but the metadata port has not been
# updated, the DHPC options register has not been created.
@@ -2356,7 +2346,7 @@ class OVNClient(object):
subnet['id'])['subnet']
if subnet['enable_dhcp'] or ovn_subnet:
- self.update_metadata_port(context, network['id'], subnet=subnet)
+ self.update_metadata_port(context, network, subnet=subnet)
check_rev_cmd = self._nb_idl.check_revision_number(
subnet['id'], subnet, ovn_const.TYPE_SUBNETS)
@@ -2452,8 +2442,9 @@ class OVNClient(object):
if not ovn_conf.is_ovn_metadata_enabled():
return
- if self._find_metadata_port(context, network['id']):
- return
+ metadata_port = self._find_metadata_port(context, network['id'])
+ if metadata_port:
+ return metadata_port
# Create a neutron port for DHCP/metadata services
filters = {'network_id': [network['id']]}
@@ -2468,16 +2459,19 @@ class OVNClient(object):
}
}
# TODO(boden): rehome create_port into neutron-lib
- p_utils.create_port(self._plugin, context, port)
+ return p_utils.create_port(self._plugin, context, port)
- def update_metadata_port(self, context, network_id, subnet=None):
+ def update_metadata_port(self, context, network, subnet=None):
"""Update metadata port.
This function will allocate an IP address for the metadata port of
the given network in all its IPv4 subnets or the given subnet. Returns
"True" if the metadata port has been updated and "False" if OVN
- metadata is disabled or the metadata port does not exist.
+ metadata is disabled or the metadata port does not exist or
+ cannot be created.
"""
+ network_id = network['id']
+
def update_metadata_port_fixed_ips(metadata_port, add_subnet_ids,
del_subnet_ids):
wanted_fixed_ips = [
@@ -2496,11 +2490,11 @@ class OVNClient(object):
if not ovn_conf.is_ovn_metadata_enabled():
return False
- # Retrieve the metadata port of this network
- metadata_port = self._find_metadata_port(context, network_id)
+ # Retrieve or create the metadata port of this network
+ metadata_port = self.create_metadata_port(context, network)
if not metadata_port:
- LOG.error("Metadata port couldn't be found for network %s",
- network_id)
+ LOG.error("Metadata port could not be found or created "
+ "for network %s", network_id)
return False
port_subnet_ids = set(ip['subnet_id'] for ip in
diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py
index 0a307f78ee..f53bc8423b 100644
--- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py
+++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py
@@ -960,7 +960,7 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
try:
# Make sure that this port has an IP address in all the
# subnets
- self._ovn_client.update_metadata_port(ctx, net['id'])
+ self._ovn_client.update_metadata_port(ctx, net)
except n_exc.IpAddressGenerationFailure:
LOG.error('Could not allocate IP addresses for '
'metadata port in network %s', net['id'])
diff --git a/neutron/policy.py b/neutron/policy.py
index dca079b3c9..e4f0b093bd 100644
--- a/neutron/policy.py
+++ b/neutron/policy.py
@@ -51,12 +51,15 @@ _RESOURCE_FOREIGN_KEYS = {
'security_groups': 'security_group_id'
}
-
-# TODO(gmann): Remove setting the default value of config policy_file
-# once oslo_policy change the default value to 'policy.yaml'.
-# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
+# TODO(slaweq): Remove overriding the default value of config options
+# 'policy_file', 'enforce_scope', and 'enforce_new_defaults' once
+# oslo_policy change their default value to what is overridden here.
DEFAULT_POLICY_FILE = 'policy.yaml'
-opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
+opts.set_defaults(
+ cfg.CONF,
+ DEFAULT_POLICY_FILE,
+ enforce_scope=True,
+ enforce_new_defaults=True)
def reset():
diff --git a/neutron/scheduler/l3_agent_scheduler.py b/neutron/scheduler/l3_agent_scheduler.py
index 66a41dfc86..7553733a5e 100644
--- a/neutron/scheduler/l3_agent_scheduler.py
+++ b/neutron/scheduler/l3_agent_scheduler.py
@@ -103,11 +103,18 @@ class L3Scheduler(object, metaclass=abc.ABCMeta):
underscheduled_routers = []
max_agents_for_ha = plugin.get_number_of_agents_for_scheduling(context)
- for router, count in plugin.get_routers_l3_agents_count(context):
- if (count < 1 or
- router.get('ha', False) and count < max_agents_for_ha):
- # Either the router was un-scheduled (scheduled to 0 agents),
- # or it's an HA router and it was under-scheduled (scheduled to
+ # since working out a unified SQL is hard for both regular and
+ # ha routers. Split its up and run queries separately
+ for router, count in plugin.get_routers_l3_agents_count(
+ context, ha=False, less_than=1):
+ if count < 1:
+ # the router was un-scheduled (scheduled to 0 agents),
+ underscheduled_routers.append(router)
+
+ for router, count in plugin.get_routers_l3_agents_count(
+ context, ha=True, less_than=max_agents_for_ha):
+ if count < max_agents_for_ha:
+ # it's an HA router and it was under-scheduled (scheduled to
# less than max_agents_for_ha). Either way, it should be added
# to the list of routers we want to handle.
underscheduled_routers.append(router)
diff --git a/neutron/services/loki/loki_plugin.py b/neutron/services/loki/loki_plugin.py
index 491f914b86..9d496f523e 100644
--- a/neutron/services/loki/loki_plugin.py
+++ b/neutron/services/loki/loki_plugin.py
@@ -34,11 +34,13 @@ class LokiPlugin(service_base.ServicePluginBase):
def random_deadlock(self, session, flush_context, instances):
if random.randrange(0, 51) > 49: # 1/50 probability
+ LOG.info("Loki has raised a DBDeadlock exception, instances %s",
+ instances)
raise db_exc.DBDeadlock()
def random_delay(self, session, instance):
if random.randrange(0, 201) > 199: # 1/200 probability
- LOG.debug("Loki has delayed loading of instance %s", instance)
+ LOG.info("Loki has delayed loading of instance %s", instance)
time.sleep(1)
def get_plugin_type(self):
diff --git a/neutron/services/ovn_l3/plugin.py b/neutron/services/ovn_l3/plugin.py
index 56a58d3f5e..7d370089a2 100644
--- a/neutron/services/ovn_l3/plugin.py
+++ b/neutron/services/ovn_l3/plugin.py
@@ -170,6 +170,7 @@ class OVNL3RouterPlugin(service_base.ServicePluginBase,
def create_router_precommit(self, resource, event, trigger, payload):
context = payload.context
+ context.session.flush()
router_id = payload.resource_id
router_db = payload.metadata['router_db']
diff --git a/neutron/services/qos/qos_plugin.py b/neutron/services/qos/qos_plugin.py
index 776dae6da2..d4f5d5038e 100644
--- a/neutron/services/qos/qos_plugin.py
+++ b/neutron/services/qos/qos_plugin.py
@@ -323,7 +323,7 @@ class QoSPlugin(qos.QoSPluginBase):
def _get_ports_with_policy(self, context, policy):
networks_ids = policy.get_bound_networks()
ports_with_net_policy = ports_object.Port.get_objects(
- context, network_id=networks_ids)
+ context, network_id=networks_ids) if networks_ids else []
# Filter only this ports which don't have overwritten policy
ports_with_net_policy = [
@@ -333,7 +333,7 @@ class QoSPlugin(qos.QoSPluginBase):
ports_ids = policy.get_bound_ports()
ports_with_policy = ports_object.Port.get_objects(
- context, id=ports_ids)
+ context, id=ports_ids) if ports_ids else []
return list(set(ports_with_policy + ports_with_net_policy))
def _validate_create_port_callback(self, resource, event, trigger,
diff --git a/neutron/services/tag/tag_plugin.py b/neutron/services/tag/tag_plugin.py
index c44860034d..23f3cb9ed1 100644
--- a/neutron/services/tag/tag_plugin.py
+++ b/neutron/services/tag/tag_plugin.py
@@ -100,6 +100,7 @@ class TagPlugin(tagging.TagPluginBase):
tag=tag).create()
@log_helpers.log_method_call
+ @db_api.retry_if_session_inactive()
def update_tag(self, context, resource, resource_id, tag):
res = self._get_resource(context, resource, resource_id)
if any(tag == tag_db.tag for tag_db in res.standard_attr.tags):
@@ -111,12 +112,14 @@ class TagPlugin(tagging.TagPluginBase):
pass
@log_helpers.log_method_call
+ @db_api.retry_if_session_inactive()
def delete_tags(self, context, resource, resource_id):
res = self._get_resource(context, resource, resource_id)
tag_obj.Tag.delete_objects(context,
standard_attr_id=res.standard_attr_id)
@log_helpers.log_method_call
+ @db_api.retry_if_session_inactive()
def delete_tag(self, context, resource, resource_id, tag):
res = self._get_resource(context, resource, resource_id)
if not tag_obj.Tag.delete_objects(
diff --git a/neutron/tests/base.py b/neutron/tests/base.py
index 92782099f2..53bd380fc2 100644
--- a/neutron/tests/base.py
+++ b/neutron/tests/base.py
@@ -46,7 +46,6 @@ from oslotest import base
from osprofiler import profiler
from sqlalchemy import exc as sqlalchemy_exc
import testtools
-from testtools import content
from neutron._i18n import _
from neutron.agent.linux import external_process
@@ -68,8 +67,6 @@ ETCDIR = os.path.join(ROOTDIR, 'etc')
SUDO_CMD = 'sudo -n'
-TESTCASE_RETRIES = 3
-
def etcdir(*p):
return os.path.join(ETCDIR, *p)
@@ -177,26 +174,10 @@ class AttributeDict(dict):
def _catch_timeout(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
- for idx in range(1, TESTCASE_RETRIES + 1):
- try:
- return f(self, *args, **kwargs)
- except eventlet.Timeout as e:
- self.fail('Execution of this test timed out: %s' % e)
- # NOTE(ralonsoh): exception catch added due to the constant
- # occurrences of this exception during FT and UT execution.
- # This is due to [1]. Once the sync decorators are removed or the
- # privsep ones are decorated by those ones (swap decorator
- # declarations) this catch can be remove.
- # [1] https://review.opendev.org/#/c/631275/
- except fixtures.TimeoutException:
- if idx < TESTCASE_RETRIES:
- msg = ('"fixtures.TimeoutException" during test case '
- 'execution no %s; test case re-executed' % idx)
- self.addDetail('DietTestCase',
- content.text_content(msg))
- self._set_timeout()
- else:
- self.fail('Execution of this test timed out')
+ try:
+ return f(self, *args, **kwargs)
+ except eventlet.Timeout as e:
+ self.fail('Execution of this test timed out: %s' % e)
return func
@@ -553,6 +534,7 @@ class PluginFixture(fixtures.Fixture):
self.core_plugin = core_plugin
def _setUp(self):
+ config.register_common_config_options()
# Do not load default service plugins in the testing framework
# as all the mocking involved can cause havoc.
self.default_svc_plugins_p = mock.patch(
diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py
index 89f9042db9..ce8926f4bb 100644
--- a/neutron/tests/common/net_helpers.py
+++ b/neutron/tests/common/net_helpers.py
@@ -24,6 +24,7 @@ import shlex
import signal
import subprocess
import time
+from unittest import mock
import fixtures
import netaddr
@@ -880,20 +881,19 @@ class OVSPortFixture(PortFixture):
interface_config = cfg.ConfigOpts()
config.register_interface_opts(interface_config)
ovs_interface = interface.OVSInterfaceDriver(interface_config)
- ovs_interface.plug_new(
- None,
- self.port_id,
- port_name,
- self.mac,
- bridge=self.bridge.br_name,
- namespace=self.namespace)
- # NOTE(mangelajo): for OVS implementations remove the DEAD VLAN tag
- # on ports that we intend to use as fake vm interfaces, they
- # need to be flat. This is related to lp#1767422
- self.bridge.clear_db_attribute("Port", port_name, "tag")
- # Clear vlan_mode that is added for each new port. lp#1930414
- self.bridge.clear_db_attribute("Port", port_name, "vlan_mode")
- self.bridge.clear_db_attribute("Port", port_name, "trunks")
+ # NOTE(slaweq): for OVS implementation normally there would be DEAD
+ # VLAN tag set for port and we would need to remove it here as it is
+ # needed during the tests. But to avoid setting and removing tag, we
+ # can simply mock _set_port_dead method so port will not be tagged with
+ # DEAD_VLAN tag initially
+ with mock.patch.object(ovs_lib.OVSBridge, '_set_port_dead'):
+ ovs_interface.plug_new(
+ None,
+ self.port_id,
+ port_name,
+ self.mac,
+ bridge=self.bridge.br_name,
+ namespace=self.namespace)
self.addCleanup(self.bridge.delete_port, port_name)
self.port = ip_lib.IPDevice(port_name, self.namespace)
diff --git a/neutron/tests/fullstack/test_dhcp_agent.py b/neutron/tests/fullstack/test_dhcp_agent.py
index e6c05b6bca..6d19a30736 100644
--- a/neutron/tests/fullstack/test_dhcp_agent.py
+++ b/neutron/tests/fullstack/test_dhcp_agent.py
@@ -19,7 +19,6 @@ from oslo_utils import uuidutils
from neutron.agent.linux import ip_lib
from neutron.common import utils as common_utils
-from neutron.tests import base as test_base
from neutron.tests.fullstack.agents import dhcp_agent
from neutron.tests.fullstack import base
from neutron.tests.fullstack.resources import environment
@@ -185,7 +184,6 @@ class TestDhcpAgentHA(BaseDhcpAgentTest):
# check if new vm will get IP from new DHCP agent
self._spawn_vm()
- @test_base.unstable_test('bug 2000150')
def test_multiple_agents_for_network(self):
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
diff --git a/neutron/tests/functional/agent/linux/test_keepalived.py b/neutron/tests/functional/agent/linux/test_keepalived.py
index aa36cd250f..699dc25213 100644
--- a/neutron/tests/functional/agent/linux/test_keepalived.py
+++ b/neutron/tests/functional/agent/linux/test_keepalived.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import signal
from oslo_config import cfg
@@ -27,6 +26,7 @@ from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import helpers
from neutron.tests.functional import base
from neutron.tests.unit.agent.linux import test_keepalived
+from neutron_lib.exceptions import ProcessExecutionError
class KeepalivedManagerTestCase(base.BaseSudoTestCase,
@@ -51,12 +51,17 @@ class KeepalivedManagerTestCase(base.BaseSudoTestCase,
self.addCleanup(self._stop_keepalived_manager)
def _stop_keepalived_manager(self):
- self.manager.disable()
try:
- common_utils.wait_until_true(
- lambda: not self.manager.get_process().active, timeout=5)
- except common_utils.WaitTimeout:
- self.manager.get_process().disable(sig=signal.SIGKILL)
+ self.manager.disable()
+ except ProcessExecutionError as process_err:
+ # self.manager.disable() will perform SIGTERM->wait->SIGKILL
+ # (if needed) on the process. However, it is sometimes possible
+ # that SIGKILL gets called on a process that just exited due to
+ # SIGTERM. Ignore this condition so the test is not marked as
+ # failed.
+ if not (len(process_err.args) > 0 and
+ "No such process" in process_err.args[0]):
+ raise
def _prepare_devices(self):
# NOTE(slaweq): those are devices used in keepalived config file,
diff --git a/neutron/tests/functional/agent/ovn/extensions/test_qos_hwol.py b/neutron/tests/functional/agent/ovn/extensions/test_qos_hwol.py
index 83d5c58ab9..a6457dcd0e 100644
--- a/neutron/tests/functional/agent/ovn/extensions/test_qos_hwol.py
+++ b/neutron/tests/functional/agent/ovn/extensions/test_qos_hwol.py
@@ -17,27 +17,28 @@ from unittest import mock
from oslo_utils import uuidutils
-from neutron.agent.common import ovs_lib
from neutron.agent.ovn.agent import ovsdb as agent_ovsdb
from neutron.agent.ovn.extensions import qos_hwol
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils
from neutron.common import utils as n_utils
from neutron.tests import base as test_base
-from neutron.tests.common import net_helpers
from neutron.tests.functional import base
class OVSInterfaceEventTestCase(base.TestOVNFunctionalBase):
- @test_base.unstable_test(
- 'LP#2006603, it is being addressed in '
- 'https://review.opendev.org/c/openstack/neutron/+/873118')
+ def _cleanup(self):
+ self.ovs_idl.del_port(self.port_name, bridge=self.br_name).execute(
+ check_error=False)
+ self.ovs_idl.del_br(self.br_name).execute(check_error=False)
+
+ @test_base.unstable_test('bug 2006603')
def test_port_creation_and_deletion(self):
def check_add_port_called():
try:
mock_agent.qos_hwol_ext.add_port.assert_has_calls(
- [mock.call('port_iface-id', port_name)])
+ [mock.call(port_iface_id, self.port_name)])
return True
except AssertionError:
return False
@@ -45,24 +46,36 @@ class OVSInterfaceEventTestCase(base.TestOVNFunctionalBase):
def check_remove_egress_called():
try:
mock_agent.qos_hwol_ext.remove_egress.assert_has_calls(
- [mock.call('port_iface-id')])
+ [mock.call(port_iface_id)])
return True
except AssertionError:
return False
+ port_iface_id = 'port_iface-id'
mock_agent = mock.Mock()
events = [qos_hwol.OVSInterfaceEvent(mock_agent)]
- agent_ovsdb.MonitorAgentOvsIdl(events=events).start()
- br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
- self.ovs_bridge = ovs_lib.OVSBridge(br.br_name)
- port_name = ('port-' + uuidutils.generate_uuid())[:8]
-
- self.ovs_bridge.add_port(
- port_name, ('external_ids', {'iface-id': 'port_iface-id'}))
- n_utils.wait_until_true(check_add_port_called, timeout=5)
-
- self.ovs_bridge.delete_port(port_name)
- n_utils.wait_until_true(check_remove_egress_called, timeout=5)
+ self.ovs_idl = agent_ovsdb.MonitorAgentOvsIdl(events=events).start()
+ self.br_name = ('brtest-' + uuidutils.generate_uuid())[:13]
+ self.port_name = ('port-' + uuidutils.generate_uuid())[:13]
+ self.addCleanup(self._cleanup)
+ with self.ovs_idl.transaction() as txn:
+ txn.add(self.ovs_idl.add_br(self.br_name))
+ txn.add(self.ovs_idl.add_port(self.br_name, self.port_name))
+ txn.add(self.ovs_idl.iface_set_external_id(
+ self.port_name, 'iface-id', port_iface_id))
+ txn.add(self.ovs_idl.db_set(
+ 'Interface', self.port_name, ('type', 'internal')))
+
+ exc = Exception('Port %s was not added to the bridge %s' %
+ (self.port_name, self.br_name))
+ n_utils.wait_until_true(check_add_port_called, timeout=5,
+ exception=exc)
+
+ self.ovs_idl.del_port(self.port_name).execute(check_error=True)
+ exc = Exception('Port %s was not deleted from the bridge %s' %
+ (self.port_name, self.br_name))
+ n_utils.wait_until_true(check_remove_egress_called, timeout=5,
+ exception=exc)
class QoSBandwidthLimitEventTestCase(base.TestOVNFunctionalBase):
diff --git a/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py b/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py
index bfa4747038..d0d4aebac3 100644
--- a/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py
+++ b/neutron/tests/functional/agent/ovn/metadata/test_metadata_agent.py
@@ -51,6 +51,18 @@ class MetadataAgentHealthEvent(event.WaitEvent):
ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY, 0)) >= self.sb_cfg
+class MetadataPortCreateEvent(event.WaitEvent):
+ event_name = 'MetadataPortCreateEvent'
+
+ def __init__(self, metadata_port, timeout=5):
+ table = 'Port_Binding'
+ events = (self.ROW_CREATE,)
+ conditions = (('logical_port', '=', metadata_port),)
+ super(MetadataPortCreateEvent, self).__init__(
+ events, table, conditions, timeout=timeout
+ )
+
+
class TestMetadataAgent(base.TestOVNFunctionalBase):
OVN_BRIDGE = 'br-int'
FAKE_CHASSIS_HOST = 'ovn-host-fake'
@@ -132,8 +144,8 @@ class TestMetadataAgent(base.TestOVNFunctionalBase):
# chassis with the nb_cfg, 1 revisions when listing the agents.
self.assertTrue(row_event.wait())
- def _create_metadata_port(self, txn, lswitch_name):
- mdt_port_name = 'ovn-mdt-' + uuidutils.generate_uuid()
+ def _create_metadata_port(self, txn, lswitch_name, port_name=None):
+ mdt_port_name = port_name or 'ovn-mdt-' + uuidutils.generate_uuid()
txn.add(
self.nb_api.lsp_add(
lswitch_name,
@@ -144,7 +156,6 @@ class TestMetadataAgent(base.TestOVNFunctionalBase):
ovn_const.OVN_CIDRS_EXT_ID_KEY: '192.168.122.123/24',
ovn_const.OVN_DEVID_EXT_ID_KEY: 'ovnmeta-' + lswitch_name
}))
- return mdt_port_name
def _update_metadata_port_ip(self, metadata_port_name):
external_ids = {
@@ -224,7 +235,14 @@ class TestMetadataAgent(base.TestOVNFunctionalBase):
if update and type_ == ovn_const.LSP_TYPE_LOCALPORT:
with self.nb_api.transaction(
check_error=True, log_errors=True) as txn:
- mdt_port_name = self._create_metadata_port(txn, lswitch_name)
+ mdt_port_name = 'ovn-mdt-' + uuidutils.generate_uuid()
+ metadata_port_create_event = MetadataPortCreateEvent(
+ mdt_port_name)
+ self.agent.sb_idl.idl.notify_handler.watch_event(
+ metadata_port_create_event)
+ self._create_metadata_port(txn, lswitch_name, mdt_port_name)
+ self.assertTrue(metadata_port_create_event.wait())
+
self.sb_api.lsp_bind(mdt_port_name, self.chassis_name).execute(
check_error=True, log_errors=True)
self._update_metadata_port_ip(mdt_port_name)
diff --git a/neutron/tests/functional/agent/test_ovs_lib.py b/neutron/tests/functional/agent/test_ovs_lib.py
index b8968d86c0..056369a02d 100644
--- a/neutron/tests/functional/agent/test_ovs_lib.py
+++ b/neutron/tests/functional/agent/test_ovs_lib.py
@@ -489,7 +489,7 @@ class OVSBridgeTestCase(OVSBridgeTestBase):
txn.add(ovsdb.del_port(port_name, self.br.br_name,
if_exists=False))
txn.add(ovsdb.db_set('Interface', port_name,
- ('type', 'internal')))
+ ('type', 'internal'), if_exists=False))
self.assertRaises((RuntimeError, idlutils.RowNotFound),
del_port_mod_iface)
diff --git a/neutron/tests/functional/base.py b/neutron/tests/functional/base.py
index 8321c2a6ee..8ef793989e 100644
--- a/neutron/tests/functional/base.py
+++ b/neutron/tests/functional/base.py
@@ -21,13 +21,9 @@ from unittest import mock
import warnings
import fixtures
-from neutron_lib import fixture
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
-from oslo_concurrency import lockutils
from oslo_config import cfg
-from oslo_db import exception as os_db_exc
-from oslo_db.sqlalchemy import provision
from oslo_log import log
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -61,7 +57,6 @@ LOG = log.getLogger(__name__)
# This is the directory from which infra fetches log files for functional tests
DEFAULT_LOG_DIR = os.path.join(helpers.get_test_log_path(),
'dsvm-functional-logs')
-SQL_FIXTURE_LOCK = 'sql_fixture_lock'
def config_decorator(method_to_decorate, config_tuples):
@@ -134,27 +129,6 @@ class BaseSudoTestCase(BaseLoggingTestCase):
new=ovs_agent_decorator).start()
-class OVNSqlFixture(fixture.StaticSqlFixture):
-
- @classmethod
- @lockutils.synchronized(SQL_FIXTURE_LOCK)
- def _init_resources(cls):
- cls.schema_resource = provision.SchemaResource(
- provision.DatabaseResource("sqlite"),
- cls._generate_schema, teardown=False)
- dependency_resources = {}
- for name, resource in cls.schema_resource.resources:
- dependency_resources[name] = resource.getResource()
- cls.schema_resource.make(dependency_resources)
- cls.engine = dependency_resources['database'].engine
-
- def _delete_from_schema(self, engine):
- try:
- super(OVNSqlFixture, self)._delete_from_schema(engine)
- except os_db_exc.DBNonExistentTable:
- pass
-
-
class TestOVNFunctionalBase(test_plugin.Ml2PluginV2TestCase,
BaseLoggingTestCase):
@@ -251,16 +225,6 @@ class TestOVNFunctionalBase(test_plugin.Ml2PluginV2TestCase,
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), msg)
- # FIXME(lucasagomes): Workaround for
- # https://bugs.launchpad.net/networking-ovn/+bug/1808146. We should
- # investigate and properly fix the problem. This method is just a
- # workaround to alleviate the gate for now and should not be considered
- # a proper fix.
- def _setup_database_fixtures(self):
- fixture = OVNSqlFixture()
- self.useFixture(fixture)
- self.engine = fixture.engine
-
def get_additional_service_plugins(self):
p = super(TestOVNFunctionalBase, self).get_additional_service_plugins()
p.update({'revision_plugin_name': 'revisions',
diff --git a/neutron/tests/functional/db/test_network.py b/neutron/tests/functional/db/test_network.py
index ddf4390e8a..550a719a95 100644
--- a/neutron/tests/functional/db/test_network.py
+++ b/neutron/tests/functional/db/test_network.py
@@ -35,7 +35,7 @@ class NetworkRBACTestCase(testlib_api.SqlTestCase):
DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin'
self.setup_coreplugin(DB_PLUGIN_KLASS)
self.plugin = ml2_plugin.Ml2Plugin()
- self.cxt = context.Context(user_id=None,
+ self.ctx = context.Context(user_id=None,
tenant_id=None,
is_admin=True,
overwrite=False)
@@ -56,10 +56,10 @@ class NetworkRBACTestCase(testlib_api.SqlTestCase):
'shared': shared,
extnet_apidef.EXTERNAL: external,
'status': constants.NET_STATUS_ACTIVE}
- return self.plugin.create_network(self.cxt, {'network': network})
+ return self.plugin.create_network(self.ctx, {'network': network})
def _update_network(self, network_id, network):
- return self.plugin.update_network(self.cxt, network_id,
+ return self.plugin.update_network(self.ctx, network_id,
{'network': network})
def _create_subnet(self, tenant_id, subnet_id, shared, cidr=None):
@@ -76,7 +76,7 @@ class NetworkRBACTestCase(testlib_api.SqlTestCase):
'allocation_pools': constants.ATTR_NOT_SPECIFIED,
'dns_nameservers': constants.ATTR_NOT_SPECIFIED,
'host_routes': constants.ATTR_NOT_SPECIFIED}
- return self.plugin.create_subnet(self.cxt, {'subnet': subnet})
+ return self.plugin.create_subnet(self.ctx, {'subnet': subnet})
def _create_port(self, tenant_id, network_id, port_id):
port = {'tenant_id': tenant_id,
@@ -89,7 +89,7 @@ class NetworkRBACTestCase(testlib_api.SqlTestCase):
'device_id': 'test_dev_id',
'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX,
'fixed_ips': constants.ATTR_NOT_SPECIFIED}
- return self.plugin.create_port(self.cxt, {'port': port})
+ return self.plugin.create_port(self.ctx, {'port': port})
def _list_networks(self, ctx):
return self.plugin.get_networks(ctx)
@@ -100,7 +100,7 @@ class NetworkRBACTestCase(testlib_api.SqlTestCase):
else:
action = 'access_as_shared'
rbac = network_obj.NetworkRBAC.get_object(
- self.cxt, object_id=network_id, action=action, target_project='*')
+ self.ctx, object_id=network_id, action=action, target_project='*')
if is_none:
self.assertIsNone(rbac)
else:
@@ -214,24 +214,24 @@ class NetworkRBACTestCase(testlib_api.SqlTestCase):
def test_ensure_no_port_in_asterisk(self):
self._create_network(self.tenant_1, self.network_id, True)
self.plugin.ensure_no_tenant_ports_on_network(
- self.cxt, self.network_id, self.tenant_1, '*')
+ self.ctx, self.network_id, self.tenant_1, '*')
def test_ensure_no_port_in_tenant_1(self):
self._create_network(self.tenant_1, self.network_id, True)
self.plugin.ensure_no_tenant_ports_on_network(
- self.cxt, self.network_id, self.tenant_1, self.tenant_1)
+ self.ctx, self.network_id, self.tenant_1, self.tenant_1)
def test_ensure_no_port_in_tenant_2(self):
self._create_network(self.tenant_1, self.network_id, True)
self.plugin.ensure_no_tenant_ports_on_network(
- self.cxt, self.network_id, self.tenant_1, self.tenant_2)
+ self.ctx, self.network_id, self.tenant_1, self.tenant_2)
def test_ensure_port_tenant_1_in_asterisk(self):
self._create_network(self.tenant_1, self.network_id, True)
self._create_subnet(self.tenant_1, self.subnet_1_id, True)
self._create_port(self.tenant_1, self.network_id, self.port_id)
self.plugin.ensure_no_tenant_ports_on_network(
- self.cxt, self.network_id, self.tenant_1, '*')
+ self.ctx, self.network_id, self.tenant_1, '*')
def test_ensure_port_tenant_2_in_asterisk(self):
self._create_network(self.tenant_1, self.network_id, True)
@@ -239,21 +239,21 @@ class NetworkRBACTestCase(testlib_api.SqlTestCase):
self._create_port(self.tenant_2, self.network_id, self.port_id)
self.assertRaises(n_exc.InvalidSharedSetting,
self.plugin.ensure_no_tenant_ports_on_network,
- self.cxt, self.network_id, self.tenant_1, '*')
+ self.ctx, self.network_id, self.tenant_1, '*')
def test_ensure_port_tenant_1_in_tenant_1(self):
self._create_network(self.tenant_1, self.network_id, True)
self._create_subnet(self.tenant_1, self.subnet_1_id, True)
self._create_port(self.tenant_1, self.network_id, self.port_id)
self.plugin.ensure_no_tenant_ports_on_network(
- self.cxt, self.network_id, self.tenant_1, self.tenant_1)
+ self.ctx, self.network_id, self.tenant_1, self.tenant_1)
def test_ensure_no_share_port_tenant_2_in_tenant_1(self):
self._create_network(self.tenant_1, self.network_id, False)
self._create_subnet(self.tenant_1, self.subnet_1_id, True)
self._create_port(self.tenant_2, self.network_id, self.port_id)
self.plugin.ensure_no_tenant_ports_on_network(
- self.cxt, self.network_id, self.tenant_1, self.tenant_1)
+ self.ctx, self.network_id, self.tenant_1, self.tenant_1)
def test_ensure_no_share_port_tenant_2_in_tenant_2(self):
self._create_network(self.tenant_1, self.network_id, False)
@@ -261,5 +261,5 @@ class NetworkRBACTestCase(testlib_api.SqlTestCase):
self._create_port(self.tenant_2, self.network_id, self.port_id)
self.assertRaises(n_exc.InvalidSharedSetting,
self.plugin.ensure_no_tenant_ports_on_network,
- self.cxt, self.network_id, self.tenant_1,
+ self.ctx, self.network_id, self.tenant_1,
self.tenant_2)
diff --git a/neutron/tests/functional/pecan_wsgi/test_functional.py b/neutron/tests/functional/pecan_wsgi/test_functional.py
index f5b5c7eb33..4698acf201 100644
--- a/neutron/tests/functional/pecan_wsgi/test_functional.py
+++ b/neutron/tests/functional/pecan_wsgi/test_functional.py
@@ -43,8 +43,11 @@ class InjectContext(base.ConfigurableMiddleware):
# Determine the tenant
tenant_id = req.headers.get('X_PROJECT_ID')
- # Suck out the roles
- roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')]
+ roles = ['member', 'reader']
+ # Suck out the roles if any are set
+ custom_roles = req.headers.get('X_ROLES')
+ if custom_roles:
+ roles = [r.strip() for r in custom_roles.split(',')]
# Human-friendly names
tenant_name = req.headers.get('X_PROJECT_NAME')
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py
index 60222c9cf2..8edcf60b11 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py
@@ -15,12 +15,15 @@
import copy
from unittest import mock
+from neutron_lib.api.definitions import external_net
+from neutron_lib.api.definitions import provider_net as pnet
from neutron_lib import constants
from neutron_lib.services.qos import constants as qos_constants
from ovsdbapp.backend.ovs_idl import idlutils
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils as ovn_utils
+from neutron.common import utils
from neutron.db import l3_db
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.extensions \
import qos as qos_extension
@@ -62,39 +65,11 @@ QOS_RULES_3 = {
}
-class TestOVNClientQosExtension(base.TestOVNFunctionalBase):
-
- def setUp(self, maintenance_worker=False):
- super(TestOVNClientQosExtension, self).setUp(
- maintenance_worker=maintenance_worker)
- self._add_logical_switch()
- self.qos_driver = qos_extension.OVNClientQosExtension(
- nb_idl=self.nb_api)
- self.gw_port_id = 'gw_port_id'
- self._mock_get_router = mock.patch.object(l3_db.L3_NAT_dbonly_mixin,
- '_get_router')
- self.mock_get_router = self._mock_get_router.start()
- self.mock_get_router.return_value = {'gw_port_id': self.gw_port_id}
- self._mock_qos_rules = mock.patch.object(self.qos_driver,
- '_qos_rules')
- self.mock_qos_rules = self._mock_qos_rules.start()
- self.fip = {'router_id': 'router_id', 'qos_policy_id': 'qos_policy_id',
- 'floating_network_id': self.network_1,
- 'id': 'fip_id', 'floating_ip_address': '1.2.3.4'}
-
- def _add_logical_switch(self):
- self.network_1 = 'network_1'
- with self.nb_api.transaction(check_error=True) as txn:
- txn.add(self.nb_api.ls_add(ovn_utils.ovn_name(self.network_1)))
-
- def _add_logical_switch_port(self, port_id):
- with self.nb_api.transaction(check_error=True) as txn:
- txn.add(self.nb_api.lsp_add(
- ovn_utils.ovn_name(self.network_1), port_id,
- options={'requested-chassis': 'compute1'}))
+class TestOVNClientQosExtensionBase(base.TestOVNFunctionalBase):
def _check_rules(self, rules, port_id, network_id, fip_id=None,
- ip_address=None):
+ ip_address=None, check_min_rate=True,
+ expected_ext_ids=None):
egress_ovn_rule = self.qos_driver._ovn_qos_rule(
constants.EGRESS_DIRECTION, rules.get(constants.EGRESS_DIRECTION),
port_id, network_id, fip_id=fip_id, ip_address=ip_address)
@@ -105,7 +80,7 @@ class TestOVNClientQosExtension(base.TestOVNFunctionalBase):
with self.nb_api.transaction(check_error=True):
ls = self.qos_driver.nb_idl.lookup(
- 'Logical_Switch', ovn_utils.ovn_name(self.network_1))
+ 'Logical_Switch', ovn_utils.ovn_name(network_id))
try:
lsp = self.qos_driver.nb_idl.lsp_get(port_id).execute(
check_error=True)
@@ -118,6 +93,8 @@ class TestOVNClientQosExtension(base.TestOVNFunctionalBase):
self.assertEqual(len(rules), len(ls.qos_rules))
for rule in ls.qos_rules:
+ if expected_ext_ids:
+ self.assertDictEqual(expected_ext_ids, rule.external_ids)
ref_rule = (egress_ovn_rule if rule.direction == 'from-lport'
else ingress_ovn_rule)
action = {}
@@ -133,10 +110,42 @@ class TestOVNClientQosExtension(base.TestOVNFunctionalBase):
self.assertEqual(bandwidth, rule.bandwidth)
min_rate = rules.get(constants.EGRESS_DIRECTION, {}).get(
qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH)
- if min_rate is not None:
+ if min_rate is not None and check_min_rate:
min_ovn = lsp.options.get(ovn_const.LSP_OPTIONS_QOS_MIN_RATE)
self.assertEqual(str(min_rate['min_kbps']), min_ovn)
+
+class TestOVNClientQosExtension(TestOVNClientQosExtensionBase):
+
+ def setUp(self, maintenance_worker=False):
+ super(TestOVNClientQosExtension, self).setUp(
+ maintenance_worker=maintenance_worker)
+ self._add_logical_switch()
+ self.qos_driver = qos_extension.OVNClientQosExtension(
+ nb_idl=self.nb_api)
+ self.gw_port_id = 'gw_port_id'
+ self._mock_get_router = mock.patch.object(l3_db.L3_NAT_dbonly_mixin,
+ '_get_router')
+ self.mock_get_router = self._mock_get_router.start()
+ self.mock_get_router.return_value = {'gw_port_id': self.gw_port_id}
+ self._mock_qos_rules = mock.patch.object(self.qos_driver,
+ '_qos_rules')
+ self.mock_qos_rules = self._mock_qos_rules.start()
+ self.fip = {'router_id': 'router_id', 'qos_policy_id': 'qos_policy_id',
+ 'floating_network_id': self.network_1,
+ 'id': 'fip_id', 'floating_ip_address': '1.2.3.4'}
+
+ def _add_logical_switch(self):
+ self.network_1 = 'network_1'
+ with self.nb_api.transaction(check_error=True) as txn:
+ txn.add(self.nb_api.ls_add(ovn_utils.ovn_name(self.network_1)))
+
+ def _add_logical_switch_port(self, port_id):
+ with self.nb_api.transaction(check_error=True) as txn:
+ txn.add(self.nb_api.lsp_add(
+ ovn_utils.ovn_name(self.network_1), port_id,
+ options={'requested-chassis': 'compute1'}))
+
def test__update_port_qos_rules(self):
port = 'port1'
self._add_logical_switch_port(port)
@@ -183,3 +192,130 @@ class TestOVNClientQosExtension(base.TestOVNFunctionalBase):
fip_dict = {'floating_network_id': self.fip['floating_network_id'],
'id': self.fip['id']}
self._update_fip_and_check(fip_dict, {})
+
+
+class TestOVNClientQosExtensionEndToEnd(TestOVNClientQosExtensionBase):
+
+ def setUp(self, maintenance_worker=False):
+ super(TestOVNClientQosExtensionEndToEnd, self).setUp(
+ maintenance_worker=maintenance_worker)
+ self.qos_driver = self.l3_plugin._ovn_client._qos_driver
+ self._mock_qos_rules = mock.patch.object(self.qos_driver, '_qos_rules')
+ self.mock_qos_rules = self._mock_qos_rules.start()
+
+ def _create_router(self, name, gw_info=None, az_hints=None):
+ router = {'router':
+ {'name': name,
+ 'admin_state_up': True,
+ 'tenant_id': self._tenant_id}}
+ if az_hints:
+ router['router']['availability_zone_hints'] = az_hints
+ if gw_info:
+ router['router']['external_gateway_info'] = gw_info
+ return self.l3_plugin.create_router(self.context, router)
+
+ def _create_ext_network(self, name, net_type, physnet, seg,
+ gateway, cidr):
+ arg_list = (pnet.NETWORK_TYPE, external_net.EXTERNAL,)
+ net_arg = {pnet.NETWORK_TYPE: net_type,
+ external_net.EXTERNAL: True}
+ if seg:
+ arg_list = arg_list + (pnet.SEGMENTATION_ID,)
+ net_arg[pnet.SEGMENTATION_ID] = seg
+ if physnet:
+ arg_list = arg_list + (pnet.PHYSICAL_NETWORK,)
+ net_arg[pnet.PHYSICAL_NETWORK] = physnet
+ network = self._make_network(self.fmt, name, True,
+ as_admin=True,
+ arg_list=arg_list, **net_arg)
+ if cidr:
+ self._make_subnet(self.fmt, network, gateway, cidr,
+ ip_version=constants.IP_VERSION_4)
+ return network
+
+ def test_create_router_gateway_ip_qos(self):
+ _qos_rules = copy.deepcopy(QOS_RULES_1)
+ for direction in constants.VALID_DIRECTIONS:
+ _qos_rules[direction] = _qos_rules.get(direction, {})
+ self.mock_qos_rules.return_value = _qos_rules
+
+ network = self._create_ext_network(
+ utils.get_rand_name(), 'flat', 'physnet4',
+ None, "110.0.0.1", "110.0.0.0/24")
+ gw_info = {'network_id': network['network']['id']}
+ router = self._create_router(utils.get_rand_name(), gw_info=gw_info)
+
+ self._check_rules(
+ _qos_rules, router['gw_port_id'],
+ network['network']['id'],
+ check_min_rate=False,
+ expected_ext_ids={
+ ovn_const.OVN_ROUTER_ID_EXT_ID_KEY: router['id']})
+ self.l3_plugin.delete_router(self.context, router['id'])
+
+ def test_delete_router_gateway_ip_qos_rules_removed(self):
+ _qos_rules = copy.deepcopy(QOS_RULES_1)
+ for direction in constants.VALID_DIRECTIONS:
+ _qos_rules[direction] = _qos_rules.get(direction, {})
+ self.mock_qos_rules.return_value = _qos_rules
+
+ network = self._create_ext_network(
+ utils.get_rand_name(), 'flat', 'physnet4',
+ None, "120.0.0.1", "120.0.0.0/24")
+ gw_info = {'network_id': network['network']['id']}
+ router = self._create_router(utils.get_rand_name(), gw_info=gw_info)
+
+ self._check_rules(
+ _qos_rules, router['gw_port_id'],
+ network['network']['id'],
+ check_min_rate=False,
+ expected_ext_ids={
+ ovn_const.OVN_ROUTER_ID_EXT_ID_KEY: router['id']})
+ ls = self.qos_driver.nb_idl.lookup(
+ 'Logical_Switch', ovn_utils.ovn_name(network['network']['id']))
+ self.assertNotEqual(
+ ls.qos_rules,
+ [])
+
+ self.l3_plugin.delete_router(self.context, router['id'])
+ self.assertEqual(
+ [],
+ ls.qos_rules)
+
+ def test_update_gateway_ip_qos(self):
+ network = self._create_ext_network(
+ utils.get_rand_name(), 'flat', 'physnet4',
+ None, "130.0.0.1", "130.0.0.0/24")
+ gw_info = {'network_id': network['network']['id']}
+ router = self._create_router(utils.get_rand_name(), gw_info=gw_info)
+
+ ls = self.qos_driver.nb_idl.lookup(
+ 'Logical_Switch', ovn_utils.ovn_name(network['network']['id']))
+ self.assertEqual(
+ [],
+ ls.qos_rules)
+
+ def update_and_check(qos_rules):
+ _qos_rules = copy.deepcopy(qos_rules)
+ for direction in constants.VALID_DIRECTIONS:
+ _qos_rules[direction] = _qos_rules.get(direction, {})
+ self.mock_qos_rules.return_value = _qos_rules
+ self.l3_plugin.update_router(
+ self.context, router['id'],
+ {'router': {'admin_state_up': False}})
+ self.l3_plugin.update_router(
+ self.context, router['id'],
+ {'router': {'admin_state_up': True}})
+ self._check_rules(
+ qos_rules, router['gw_port_id'], network['network']['id'],
+ check_min_rate=False,
+ expected_ext_ids={
+ ovn_const.OVN_ROUTER_ID_EXT_ID_KEY: router['id']})
+
+ update_and_check(QOS_RULES_0)
+ update_and_check(QOS_RULES_1)
+ update_and_check(QOS_RULES_2)
+ update_and_check(QOS_RULES_3)
+ update_and_check({})
+
+ self.l3_plugin.delete_router(self.context, router['id'])
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py
index 6a23b3cc32..a4111d453a 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py
@@ -55,9 +55,9 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
return row
def _create_network(self, name, external=False):
- data = {'network': {'name': name, 'tenant_id': self._tenant_id,
- extnet_apidef.EXTERNAL: external}}
- req = self.new_create_request('networks', data, self.fmt)
+ data = {'network': {'name': name, extnet_apidef.EXTERNAL: external}}
+ req = self.new_create_request('networks', data, self.fmt,
+ as_admin=True)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['network']
@@ -70,7 +70,6 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
def _create_port(self, name, net_id, security_groups=None,
device_owner=None):
data = {'port': {'name': name,
- 'tenant_id': self._tenant_id,
'network_id': net_id}}
if security_groups is not None:
@@ -125,7 +124,6 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
data = {'subnet': {'name': name,
'network_id': net_id,
'ip_version': ip_version,
- 'tenant_id': self._tenant_id,
'cidr': cidr,
'enable_dhcp': True}}
data['subnet'].update(kwargs)
@@ -146,10 +144,13 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
return row
def _create_router(self, name, external_gateway_info=None):
- data = {'router': {'name': name, 'tenant_id': self._tenant_id}}
+ data = {'router': {'name': name}}
+ as_admin = False
if external_gateway_info is not None:
data['router']['external_gateway_info'] = external_gateway_info
- req = self.new_create_request('routers', data, self.fmt)
+ as_admin = bool(external_gateway_info.get('enable_snat'))
+ req = self.new_create_request('routers', data, self.fmt,
+ as_admin=as_admin)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['router']
@@ -167,7 +168,6 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
def _create_security_group(self):
data = {'security_group': {'name': 'sgtest',
- 'tenant_id': self._tenant_id,
'description': 'SpongeBob Rocks!'}}
req = self.new_create_request('security-groups', data, self.fmt)
res = req.get_response(self.api)
@@ -183,8 +183,7 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
'protocol': n_const.PROTO_NAME_TCP,
'ethertype': n_const.IPv4,
'port_range_min': 22,
- 'port_range_max': 22,
- 'tenant_id': self._tenant_id}}
+ 'port_range_max': 22}}
req = self.new_create_request('security-group-rules', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group_rule']
@@ -772,8 +771,8 @@ class TestMaintenance(_TestMaintenanceHelper):
p1 = self._create_port('testp1', net1['id'])
logical_ip = p1['fixed_ips'][0]['ip_address']
fip_info = {'floatingip': {
- 'description': 'test_fip',
'tenant_id': self._tenant_id,
+ 'description': 'test_fip',
'floating_network_id': ext_net['id'],
'port_id': p1['id'],
'fixed_ip_address': logical_ip}}
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py
index 892280f3bc..67aee6ba9a 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py
@@ -365,7 +365,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
port_req.get_response(self.api)
# External network and subnet
- e1 = self._make_network(self.fmt, 'e1', True,
+ e1 = self._make_network(self.fmt, 'e1', True, as_admin=True,
arg_list=('router:external',
'provider:network_type',
'provider:physical_network'),
@@ -1608,20 +1608,23 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
net = self.deserialize(self.fmt, res)['network']
self._create_subnet(self.fmt, net['id'], '10.0.0.0/24')
- res = self._create_qos_policy(self.fmt, 'qos_maxbw')
+ res = self._create_qos_policy(self.fmt, 'qos_maxbw', is_admin=True)
qos_maxbw = self.deserialize(self.fmt, res)['policy']
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
- max_kbps=1000, max_burst_kbps=800)
+ max_kbps=1000, max_burst_kbps=800,
+ is_admin=True)
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
direction=constants.INGRESS_DIRECTION,
- max_kbps=700, max_burst_kbps=600)
+ max_kbps=700, max_burst_kbps=600,
+ is_admin=True)
- res = self._create_qos_policy(self.fmt, 'qos_maxbw')
+ res = self._create_qos_policy(self.fmt, 'qos_maxbw', is_admin=True)
qos_dscp = self.deserialize(self.fmt, res)['policy']
self._create_qos_rule(self.fmt, qos_dscp['id'],
- qos_const.RULE_TYPE_DSCP_MARKING, dscp_mark=14)
+ qos_const.RULE_TYPE_DSCP_MARKING, dscp_mark=14,
+ is_admin=True)
res = self._create_port(
self.fmt, net['id'], arg_list=('qos_policy_id', ),
@@ -1677,7 +1680,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
{'floatingip': body})
def test_sync_fip_qos_policies(self):
- res = self._create_network(self.fmt, 'n1_ext', True,
+ res = self._create_network(self.fmt, 'n1_ext', True, as_admin=True,
arg_list=('router:external', ),
**{'router:external': True})
net_ext = self.deserialize(self.fmt, res)['network']
@@ -1687,15 +1690,17 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
net_int = self.deserialize(self.fmt, res)['network']
self._create_subnet(self.fmt, net_int['id'], '10.10.0.0/24')
- res = self._create_qos_policy(self.fmt, 'qos_maxbw')
+ res = self._create_qos_policy(self.fmt, 'qos_maxbw', is_admin=True)
qos_maxbw = self.deserialize(self.fmt, res)['policy']
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
- max_kbps=1000, max_burst_kbps=800)
+ max_kbps=1000, max_burst_kbps=800,
+ is_admin=True)
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
direction=constants.INGRESS_DIRECTION,
- max_kbps=700, max_burst_kbps=600)
+ max_kbps=700, max_burst_kbps=600,
+ is_admin=True)
# Create a router with net_ext as GW network and net_int as internal
# one, and a floating IP on the external network.
@@ -1750,7 +1755,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
self._validate_qos_records()
def test_fip_nat_revert_to_stateful(self):
- res = self._create_network(self.fmt, 'n1_ext', True,
+ res = self._create_network(self.fmt, 'n1_ext', True, as_admin=True,
arg_list=('router:external', ),
**{'router:external': True})
net_ext = self.deserialize(self.fmt, res)['network']
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py
index 5a6073cfb2..1eea038c01 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py
@@ -103,12 +103,13 @@ class TestNBDbMonitor(base.TestOVNFunctionalBase):
allowedaddresspairs.ADDRESS_PAIRS: allowed_address_pairs
}
port_res = self._create_port(self.fmt, self.net['network']['id'],
+ is_admin=True,
arg_list=arg_list, **host_arg)
port = self.deserialize(self.fmt, port_res)['port']
return port
def _create_fip(self, port, fip_address):
- e1 = self._make_network(self.fmt, 'e1', True,
+ e1 = self._make_network(self.fmt, 'e1', True, as_admin=True,
arg_list=('router:external',
'provider:network_type',
'provider:physical_network'),
@@ -403,7 +404,8 @@ class TestSBDbMonitor(base.TestOVNFunctionalBase, test_l3.L3NatTestCaseMixin):
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
+ ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
+ **kwargs)
self._make_subnet(self.fmt, ext_net, '10.251.0.1', '10.251.0.0/24',
enable_dhcp=True)
router = self._make_router(self.fmt, self._tenant_id)
diff --git a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
index 18f3f0d554..e1abd67c8e 100644
--- a/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
+++ b/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
@@ -101,20 +101,21 @@ class TestPortBinding(base.TestOVNFunctionalBase):
'network_id': self.n1['network']['id'],
'tenant_id': self._tenant_id})
- port_req = self.new_create_request('ports', port_data, self.fmt)
+ port_req = self.new_create_request('ports', port_data, self.fmt,
+ as_admin=True)
port_res = port_req.get_response(self.api)
p = self.deserialize(self.fmt, port_res)
port_id = p['port']['id']
else:
port_req = self.new_update_request('ports', port_data, port_id,
- self.fmt)
+ self.fmt, as_admin=True)
port_res = port_req.get_response(self.api)
self.deserialize(self.fmt, port_res)
return port_id
def _port_show(self, port_id):
- port_req = self.new_show_request('ports', port_id)
+ port_req = self.new_show_request('ports', port_id, as_admin=True)
port_res = port_req.get_response(self.api)
return self.deserialize(self.fmt, port_res)
@@ -715,13 +716,13 @@ class TestExternalPorts(base.TestOVNFunctionalBase):
def _test_external_port_create_switchdev(self, vnic_type):
port_data = {
'port': {'network_id': self.n1['network']['id'],
- 'tenant_id': self._tenant_id,
portbindings.VNIC_TYPE: vnic_type,
ovn_const.OVN_PORT_BINDING_PROFILE: {
ovn_const.PORT_CAP_PARAM: [
ovn_const.PORT_CAP_SWITCHDEV]}}}
- port_req = self.new_create_request('ports', port_data, self.fmt)
+ port_req = self.new_create_request('ports', port_data, self.fmt,
+ as_admin=True)
port_res = port_req.get_response(self.api)
port = self.deserialize(self.fmt, port_res)['port']
@@ -769,7 +770,8 @@ class TestExternalPorts(base.TestOVNFunctionalBase):
ovn_const.PORT_CAP_PARAM: [
ovn_const.PORT_CAP_SWITCHDEV]}}}
port_req = self.new_update_request(
- 'ports', port_upt_data, port['id'], self.fmt)
+ 'ports', port_upt_data, port['id'], self.fmt,
+ as_admin=True)
port_res = port_req.get_response(self.api)
port = self.deserialize(self.fmt, port_res)['port']
@@ -948,7 +950,7 @@ class TestProvnetPorts(base.TestOVNFunctionalBase):
def test_network_segments_localnet_ports(self):
n1 = self._make_network(
- self.fmt, 'n1', True,
+ self.fmt, 'n1', True, as_admin=True,
arg_list=('provider:network_type',
'provider:segmentation_id',
'provider:physical_network'),
@@ -995,6 +997,7 @@ class TestMetadataPorts(base.TestOVNFunctionalBase):
def setUp(self, *args, **kwargs):
super().setUp(*args, **kwargs)
+ self.plugin = self.mech_driver._plugin
self._ovn_client = self.mech_driver._ovn_client
self.meta_regex = re.compile(r'%s,(\d+\.\d+\.\d+\.\d+)' %
constants.METADATA_V4_CIDR)
@@ -1017,7 +1020,7 @@ class TestMetadataPorts(base.TestOVNFunctionalBase):
res = self._list_ports(self.fmt, net_id=net_id)
return self.deserialize(self.fmt, res)['ports']
- def _check_metadata_port(self, net_id, fixed_ip):
+ def _check_metadata_port(self, net_id, fixed_ip, fail=True):
for port in self._list_ports_ovn(net_id=net_id):
if ovn_client.OVNClient.is_metadata_port(port):
self.assertEqual(net_id, port['network_id'])
@@ -1027,13 +1030,17 @@ class TestMetadataPorts(base.TestOVNFunctionalBase):
self.assertEqual([], port['fixed_ips'])
return port['id']
- self.fail('Metadata port is not present in network %s or data is not '
- 'correct' % self.n1_id)
+ if fail:
+ self.fail('Metadata port is not present in network %s or data is '
+ 'not correct' % self.n1_id)
def _check_subnet_dhcp_options(self, subnet_id, cidr):
- # This method checks the DHCP options CIDR and returns, if exits, the
- # metadata port IP address, included in the classless static routes.
+ # This method checks DHCP options for a subnet ID, and if they exist,
+ # verifies the CIDR matches. Returns the metadata port IP address
+ # if it is included in the classless static routes, else returns None.
dhcp_opts = self._ovn_client._nb_idl.get_subnet_dhcp_options(subnet_id)
+ if not dhcp_opts['subnet']:
+ return
self.assertEqual(cidr, dhcp_opts['subnet']['cidr'])
routes = dhcp_opts['subnet']['options'].get('classless_static_route')
if not routes:
@@ -1062,6 +1069,35 @@ class TestMetadataPorts(base.TestOVNFunctionalBase):
fixed_ip = {'subnet_id': subnet['id'], 'ip_address': metatada_ip}
self._check_metadata_port(self.n1_id, fixed_ip)
+ def test_update_subnet_ipv4(self):
+ self._create_network_ovn(metadata_enabled=True)
+ subnet = self._create_subnet_ovn('10.0.0.0/24')
+ metatada_ip = self._check_subnet_dhcp_options(subnet['id'],
+ '10.0.0.0/24')
+ fixed_ip = {'subnet_id': subnet['id'], 'ip_address': metatada_ip}
+ port_id = self._check_metadata_port(self.n1_id, fixed_ip)
+
+ # Disable DHCP, port should still be present
+ subnet['enable_dhcp'] = False
+ self._ovn_client.update_subnet(self.context, subnet,
+ self.n1['network'])
+ port_id = self._check_metadata_port(self.n1_id, None)
+ self.assertIsNone(self._check_subnet_dhcp_options(subnet['id'], []))
+
+ # Delete metadata port
+ self.plugin.delete_port(self.context, port_id)
+ port_id = self._check_metadata_port(self.n1_id, None, fail=False)
+ self.assertIsNone(port_id)
+
+ # Enable DHCP, metadata port should have been re-created
+ subnet['enable_dhcp'] = True
+ self._ovn_client.update_subnet(self.context, subnet,
+ self.n1['network'])
+ metatada_ip = self._check_subnet_dhcp_options(subnet['id'],
+ '10.0.0.0/24')
+ fixed_ip = {'subnet_id': subnet['id'], 'ip_address': metatada_ip}
+ port_id = self._check_metadata_port(self.n1_id, fixed_ip)
+
def test_subnet_ipv4_no_metadata(self):
self._create_network_ovn(metadata_enabled=False)
subnet = self._create_subnet_ovn('10.0.0.0/24')
diff --git a/neutron/tests/functional/plugins/ml2/test_plugin.py b/neutron/tests/functional/plugins/ml2/test_plugin.py
index 667ef651a4..16fde03b09 100644
--- a/neutron/tests/functional/plugins/ml2/test_plugin.py
+++ b/neutron/tests/functional/plugins/ml2/test_plugin.py
@@ -50,6 +50,7 @@ class TestMl2PortBinding(ml2_test_base.ML2TestFramework,
with self.subnet(network=network) as subnet:
with self.port(
subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE,
+ is_admin=True,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**self.host_args) as port:
# Note: Port creation invokes _bind_port_if_needed(),
@@ -65,6 +66,7 @@ class TestMl2PortBinding(ml2_test_base.ML2TestFramework,
with self.subnet(network=network) as subnet:
with self.port(
subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE,
+ is_admin=True,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**self.host_args) as port:
# Since the agent is dead, expect binding to fail
@@ -88,6 +90,7 @@ class TestMl2PortBinding(ml2_test_base.ML2TestFramework,
with self.subnet(network=network) as subnet:
with self.port(
subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE,
+ is_admin=True,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**self.host_args) as port:
pass
diff --git a/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py b/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py
index 0b6019fefb..ac2aac1cdc 100644
--- a/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py
+++ b/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py
@@ -134,21 +134,25 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
self.subnet(cidr='30.0.0.0/24') as subnet2, \
self.subnet(cidr='40.0.0.0/24') as subnet3, \
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}), \
self.port(subnet=subnet2,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST2}), \
self.port(subnet=subnet3,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST3}):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client,
'prepare') as mock_prepare:
# add external gateway to router
@@ -231,7 +235,7 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.subnet() as subnet, \
- self.network(**kwargs) as ext_net, \
+ self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net, cidr='20.0.0.0/24'):
gw_info = {'network_id': ext_net['network']['id']}
self.l3_plugin.update_router(
@@ -256,7 +260,7 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
router = self._create_router(distributed=True, ha=True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net, \
+ with self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net), \
self.subnet(cidr='20.0.0.0/24') as subnet, \
self.port(subnet=subnet,
@@ -300,7 +304,8 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
def _create_external_network(self):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
+ ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
diff --git a/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py b/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py
index 17e37cda8d..097be093a3 100644
--- a/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py
+++ b/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py
@@ -24,7 +24,6 @@ from neutron_lib.callbacks import resources
from neutron_lib.db import api as db_api
from neutron_lib import constants
-from neutron_lib import context
from neutron.api.rpc.handlers import l3_rpc
from neutron.tests.common import helpers
@@ -112,7 +111,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
subnet2 = self._make_subnet(
self.fmt, net1, '10.2.0.1', '10.2.0.0/24', enable_dhcp=True)
- ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
+ ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True)
# Create first router and add an interface
@@ -170,7 +170,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='20.0.0.0/24') as subnet2:
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net, \
+ with self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net,
cidr='30.0.0.0/24'):
router = self._create_router()
@@ -287,7 +287,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
subnet2 = self._make_subnet(
self.fmt, net2, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
- ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
+ ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True)
# Create first router and add an interface
@@ -358,7 +359,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
router = self._create_router(distributed=dvr)
self.l3_plugin.update_router(
@@ -447,7 +449,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
router1 = self._create_router(distributed=dvr)
router2 = self._create_router(distributed=dvr)
@@ -559,7 +562,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
router = self._create_router(distributed=dvr)
self.l3_plugin.update_router(
@@ -636,7 +640,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
def test_router_with_ipv4_and_multiple_ipv6_on_same_network(self):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -710,7 +715,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -820,7 +826,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -904,7 +911,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -982,7 +990,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -1067,7 +1076,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -1200,7 +1210,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -1243,7 +1254,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- ext_net = self._make_network(self.fmt, '', True, **kwargs)
+ ext_net = self._make_network(self.fmt, '', True, as_admin=True,
+ **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@@ -1382,7 +1394,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
router = self._create_router()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net,\
+ with self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet):
@@ -1412,7 +1424,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
router = self._create_router()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net,\
+ with self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet,
@@ -1450,7 +1462,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
helpers.register_l3_agent(
host=HOST, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
- with self.network(shared=True) as net,\
+ with self.network(as_admin=True, shared=True) as net,\
self.subnet(network=net) as subnet,\
self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
@@ -1465,9 +1477,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
with mock.patch.object(self.l3_plugin.l3_rpc_notifier,
'router_removed_from_agent') as remove_mock:
- ctx = context.Context(
- '', non_admin_tenant) if non_admin_port else self.context
- self._delete('ports', port['port']['id'], neutron_context=ctx)
+ self._delete('ports', port['port']['id'],
+ tenant_id=non_admin_tenant)
remove_mock.assert_called_once_with(
mock.ANY, router['id'], HOST)
@@ -1501,13 +1512,15 @@ class L3DvrTestCase(L3DvrTestCaseBase):
with self.subnet() as ext_subnet,\
self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}) as vm_port:
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
# add external gateway to router
self.l3_plugin.update_router(
self.context, router['id'],
@@ -1576,21 +1589,25 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.subnet(cidr='40.0.0.0/24') as subnet3,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}),\
self.port(subnet=subnet2,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST2}),\
self.port(subnet=subnet3,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST3}):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client,
'prepare') as mock_prepare:
@@ -1661,7 +1678,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.subnet() as subnet,\
- self.network(**kwargs) as ext_net,\
+ self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net, cidr='20.0.0.0/24'):
gw_info = {'network_id': ext_net['network']['id']}
request_body = {
@@ -1693,7 +1710,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
router = self._create_router()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net,\
+ with self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet,
@@ -1796,10 +1813,12 @@ class L3DvrTestCase(L3DvrTestCaseBase):
with self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: host}),\
self.port(subnet=subnet2,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
@@ -1834,10 +1853,12 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: host}),\
self.port(subnet=subnet2,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
@@ -1883,7 +1904,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
# add external gateway to router
self.l3_plugin.update_router(
self.context, router3['id'],
@@ -1915,6 +1937,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.port(subnet=wan_subnet) as wan_port1,\
self.port(subnet=wan_subnet) as wan_port2,\
self.port(subnet=subnet1,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
@@ -1958,6 +1981,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
arg_list = (portbindings.HOST_ID,)
with self.subnet() as subnet,\
self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}):
@@ -2067,7 +2091,7 @@ class L3DvrTestCaseMigration(L3DvrTestCaseBase):
with self.subnet() as subnet1:
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as ext_net, \
+ with self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net,
cidr='30.0.0.0/24'):
router = self._create_router(distributed=False)
diff --git a/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py b/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py
index 57c7978b8f..fb5bd58c68 100644
--- a/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py
+++ b/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py
@@ -28,7 +28,7 @@ class LogApiTestCaseBase(functional_base.TestOVNFunctionalBase):
super().setUp()
self.log_driver = self.mech_driver.log_driver
self._check_is_supported()
- self.ctxt = context.Context('admin', 'fake_tenant')
+ self.ctxt = context.Context('admin', self._tenant_id)
def _check_is_supported(self):
if not self.log_driver.network_logging_supported(self.nb_api):
@@ -110,7 +110,6 @@ class LogApiTestCaseComplex(LogApiTestCaseBase):
def _create_port(self, name, net_id, security_groups):
data = {'port': {'name': name,
- 'tenant_id': self.ctxt.project_id,
'network_id': net_id,
'security_groups': security_groups}}
req = self.new_create_request('ports', data, self.fmt)
@@ -118,8 +117,7 @@ class LogApiTestCaseComplex(LogApiTestCaseBase):
return self.deserialize(self.fmt, res)['port']['id']
def _create_security_group(self, name):
- data = {'security_group': {'name': name,
- 'tenant_id': self.ctxt.project_id}}
+ data = {'security_group': {'name': name}}
req = self.new_create_request('security-groups', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group']['id']
@@ -130,8 +128,7 @@ class LogApiTestCaseComplex(LogApiTestCaseBase):
'protocol': n_const.PROTO_NAME_TCP,
'ethertype': n_const.IPv4,
'port_range_min': tcp_port,
- 'port_range_max': tcp_port,
- 'tenant_id': self.ctxt.project_id}}
+ 'port_range_max': tcp_port}}
req = self.new_create_request('security-group-rules', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group_rule']['id']
diff --git a/neutron/tests/functional/services/ovn_l3/test_plugin.py b/neutron/tests/functional/services/ovn_l3/test_plugin.py
index 91859c02ef..fbaa0b8ade 100644
--- a/neutron/tests/functional/services/ovn_l3/test_plugin.py
+++ b/neutron/tests/functional/services/ovn_l3/test_plugin.py
@@ -63,7 +63,7 @@ class TestRouter(base.TestOVNFunctionalBase):
if physnet:
arg_list = arg_list + (pnet.PHYSICAL_NETWORK,)
net_arg[pnet.PHYSICAL_NETWORK] = physnet
- network = self._make_network(self.fmt, name, True,
+ network = self._make_network(self.fmt, name, True, as_admin=True,
arg_list=arg_list, **net_arg)
if cidr:
self._make_subnet(self.fmt, network, gateway, cidr,
diff --git a/neutron/tests/functional/services/portforwarding/test_port_forwarding.py b/neutron/tests/functional/services/portforwarding/test_port_forwarding.py
index 9881c1c4a0..e0b4dbf733 100644
--- a/neutron/tests/functional/services/portforwarding/test_port_forwarding.py
+++ b/neutron/tests/functional/services/portforwarding/test_port_forwarding.py
@@ -98,7 +98,8 @@ class PortForwardingTestCase(PortForwardingTestCaseBase):
def _prepare_env(self):
self.router = self._create_router(distributed=True)
self.ext_net = self._create_network(
- self.fmt, 'ext-net', True, arg_list=("router:external",),
+ self.fmt, 'ext-net', True, as_admin=True,
+ arg_list=("router:external",),
**{"router:external": True}).json['network']
self.ext_subnet = self._create_subnet(
self.fmt, self.ext_net['id'], '172.24.2.0/24').json['subnet']
diff --git a/neutron/tests/unit/_test_extension_portbindings.py b/neutron/tests/unit/_test_extension_portbindings.py
index ea18b96e10..763e28b6d8 100644
--- a/neutron/tests/unit/_test_extension_portbindings.py
+++ b/neutron/tests/unit/_test_extension_portbindings.py
@@ -55,24 +55,16 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertNotIn(portbindings.VIF_TYPE, port)
self.assertNotIn(portbindings.VIF_DETAILS, port)
- def _get_non_admin_context(self):
- return context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
-
def test_port_vif_details(self):
- with self.port(name='name') as port:
+ with self.port(is_admin=True, name='name') as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings(port['port'])
# Check a response of get_port
- ctx = context.get_admin_context()
- port = self._show('ports', port_id, neutron_context=ctx)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
- ctx = self._get_non_admin_context()
- non_admin_port = self._show(
- 'ports', port_id, neutron_context=ctx)['port']
+ non_admin_port = self._show('ports', port_id)['port']
self._check_response_no_portbindings(non_admin_port)
def test_ports_vif_details(self):
@@ -83,9 +75,7 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertEqual(len(ports), 2)
for port in ports:
self._check_response_portbindings(port)
- # By default user is admin - now test non admin user
- ctx = self._get_non_admin_context()
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports')['ports']
self.assertEqual(len(ports), 2)
for non_admin_port in ports:
self._check_response_no_portbindings(non_admin_port)
@@ -97,11 +87,12 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_create_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
- with self.port(arg_list=(portbindings.PROFILE,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
port_id = port['port']['id']
self._check_port_binding_profile(port['port'], profile)
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self._check_port_binding_profile(port['port'], profile)
def test_create_port_binding_profile_none(self):
@@ -112,14 +103,13 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_update_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
- with self.port() as port:
+ with self.port(is_admin=True) as port:
self._check_port_binding_profile(port['port'])
port_id = port['port']['id']
- ctx = context.get_admin_context()
port = self._update('ports', port_id, {'port': profile_arg},
- neutron_context=ctx)['port']
+ as_admin=True)['port']
self._check_port_binding_profile(port, profile)
- port = self._show('ports', port_id)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_profile_none(self):
@@ -131,18 +121,16 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def test_port_create_portinfo_non_admin(self):
profile_arg = {portbindings.PROFILE: {dummy_plugin.RESOURCE_NAME:
dummy_plugin.RESOURCE_NAME}}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
# succeed without binding:profile
- with self.port(subnet=subnet1,
- set_context=True, tenant_id='test'):
+ with self.port(subnet=subnet1):
pass
# fail with binding:profile
try:
with self.port(subnet=subnet1,
expected_res_status=403,
arg_list=(portbindings.PROFILE,),
- set_context=True, tenant_id='test',
**profile_arg):
pass
except exc.HTTPClientError:
@@ -156,11 +144,9 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
with self.port(subnet=subnet1) as port:
# By default user is admin - now test non admin user
port_id = port['port']['id']
- ctx = self._get_non_admin_context()
port = self._update('ports', port_id,
{'port': profile_arg},
- expected_code=exc.HTTPForbidden.code,
- neutron_context=ctx)
+ expected_code=exc.HTTPForbidden.code)
class PortBindingsHostTestCaseMixin(object):
@@ -192,74 +178,70 @@ class PortBindingsHostTestCaseMixin(object):
def test_port_vif_host(self):
host_arg = {portbindings.HOST_ID: self.hostname}
- with self.port(name='name', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings_host(port['port'])
# Check a response of get_port
- ctx = context.get_admin_context()
- port = self._show('ports', port_id, neutron_context=ctx)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_response_portbindings_host(port)
- # By default user is admin - now test non admin user
- ctx = context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
- non_admin_port = self._show(
- 'ports', port_id, neutron_context=ctx)['port']
+ non_admin_port = self._show('ports', port_id)['port']
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name1',
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg), self.port(name='name2'):
- ctx = context.get_admin_context()
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports', as_admin=True)['ports']
self.assertEqual(2, len(ports))
for port in ports:
if port['name'] == 'name1':
self._check_response_portbindings_host(port)
else:
self.assertFalse(port[portbindings.HOST_ID])
- # By default user is admin - now test non admin user
- ctx = context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports')['ports']
self.assertEqual(2, len(ports))
for non_admin_port in ports:
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host_update(self):
host_arg = {portbindings.HOST_ID: self.hostname}
- with self.port(name='name1', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name1', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port1, self.port(name='name2') as port2:
data = {'port': {portbindings.HOST_ID: 'testhosttemp'}}
- req = self.new_update_request('ports', data, port1['port']['id'])
+ req = self.new_update_request('ports', data, port1['port']['id'],
+ as_admin=True)
req.get_response(self.api)
- req = self.new_update_request('ports', data, port2['port']['id'])
- ctx = context.get_admin_context()
+ req = self.new_update_request('ports', data, port2['port']['id'],
+ as_admin=True)
req.get_response(self.api)
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports', as_admin=True)['ports']
self.assertEqual(2, len(ports))
for port in ports:
self.assertEqual('testhosttemp', port[portbindings.HOST_ID])
def test_ports_vif_non_host_update(self):
host_arg = {portbindings.HOST_ID: self.hostname}
- with self.port(name='name', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
data = {'port': {'admin_state_up': False}}
- req = self.new_update_request('ports', data, port['port']['id'])
+ req = self.new_update_request('ports', data, port['port']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][portbindings.HOST_ID],
res['port'][portbindings.HOST_ID])
def test_ports_vif_non_host_update_when_host_null(self):
- with self.port() as port:
+ with self.port(is_admin=True) as port:
data = {'port': {'admin_state_up': False}}
- req = self.new_update_request('ports', data, port['port']['id'])
+ req = self.new_update_request('ports', data, port['port']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][portbindings.HOST_ID],
res['port'][portbindings.HOST_ID])
@@ -267,10 +249,12 @@ class PortBindingsHostTestCaseMixin(object):
def test_ports_vif_host_list(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name1',
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1,\
self.port(name='name2'),\
self.port(name='name3',
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
self._test_list_resources(
@@ -308,23 +292,16 @@ class PortBindingsVnicTestCaseMixin(object):
# Check a response of create_port
self._check_response_portbindings_vnic_type(port['port'])
# Check a response of get_port
- ctx = context.get_admin_context()
- port = self._show('ports', port_id, neutron_context=ctx)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_response_portbindings_vnic_type(port)
- # By default user is admin - now test non admin user
- ctx = context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
- non_admin_port = self._show(
- 'ports', port_id, neutron_context=ctx)['port']
+ non_admin_port = self._show('ports', port_id)['port']
self._check_response_portbindings_vnic_type(non_admin_port)
def test_ports_vnic_type(self):
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
with self.port(name='name1', arg_list=(portbindings.VNIC_TYPE,),
**vnic_arg), self.port(name='name2'):
- ctx = context.get_admin_context()
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports', as_admin=True)['ports']
self.assertEqual(2, len(ports))
for port in ports:
if port['name'] == 'name1':
@@ -332,11 +309,7 @@ class PortBindingsVnicTestCaseMixin(object):
else:
self.assertEqual(portbindings.VNIC_NORMAL,
port[portbindings.VNIC_TYPE])
- # By default user is admin - now test non admin user
- ctx = context.Context(user_id=None,
- tenant_id=self._tenant_id,
- is_admin=False)
- ports = self._list('ports', neutron_context=ctx)['ports']
+ ports = self._list('ports')['ports']
self.assertEqual(2, len(ports))
for non_admin_port in ports:
self._check_response_portbindings_vnic_type(non_admin_port)
diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py
index ecfd37b17c..7bf48f017f 100644
--- a/neutron/tests/unit/agent/dhcp/test_agent.py
+++ b/neutron/tests/unit/agent/dhcp/test_agent.py
@@ -37,6 +37,7 @@ from neutron.agent.linux import dhcp
from neutron.agent.linux import interface
from neutron.agent.linux import utils as linux_utils
from neutron.agent.metadata import driver as metadata_driver
+from neutron.common import _constants as common_constants
from neutron.common import config as common_config
from neutron.common import utils
from neutron.conf.agent import common as config
@@ -1929,7 +1930,7 @@ class TestDeviceManager(base.BaseTestCase):
expected_ips = ['172.9.9.9/24', const.METADATA_CIDR]
if ipv6_enabled:
- expected_ips.append(const.METADATA_V6_CIDR)
+ expected_ips.append(common_constants.METADATA_V6_CIDR)
expected = [mock.call.get_device_name(port)]
diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py
index 064461b649..98ed27d93b 100644
--- a/neutron/tests/unit/agent/linux/test_dhcp.py
+++ b/neutron/tests/unit/agent/linux/test_dhcp.py
@@ -33,6 +33,7 @@ import testtools
from neutron.agent.linux import dhcp
from neutron.agent.linux import ip_lib
from neutron.cmd import runtime_checks as checks
+from neutron.common import _constants as common_constants
from neutron.common import utils as common_utils
from neutron.conf.agent import common as config
from neutron.conf.agent import dhcp as dhcp_config
@@ -3295,7 +3296,7 @@ class TestDeviceManager(TestConfBase):
if enable_isolated_metadata or force_metadata:
expect_ips.extend([
constants.METADATA_CIDR,
- constants.METADATA_V6_CIDR])
+ common_constants.METADATA_V6_CIDR])
mgr.driver.init_l3.assert_called_with('ns-XXX',
expect_ips,
namespace='qdhcp-ns')
diff --git a/neutron/tests/unit/agent/linux/test_external_process.py b/neutron/tests/unit/agent/linux/test_external_process.py
index 6da2b7f5aa..4430746699 100644
--- a/neutron/tests/unit/agent/linux/test_external_process.py
+++ b/neutron/tests/unit/agent/linux/test_external_process.py
@@ -13,6 +13,7 @@
# under the License.
import os.path
+import shutil
import tempfile
from unittest import mock
@@ -203,56 +204,6 @@ class TestProcessManager(base.BaseTestCase):
except common_utils.WaitTimeout:
self.fail('ProcessManager.enable() raised WaitTimeout')
- def _create_env_var_testing_environment(self, script_content, _create_cmd):
- with tempfile.NamedTemporaryFile('w+', dir='/tmp/',
- delete=False) as script:
- script.write(script_content)
- output = tempfile.NamedTemporaryFile('w+', dir='/tmp/', delete=False)
- os.chmod(script.name, 0o777)
- service_name = 'my_new_service'
- uuid = uuidutils.generate_uuid()
- pm = ep.ProcessManager(self.conf, uuid, service=service_name,
- default_cmd_callback=_create_cmd)
- return script, output, service_name, uuid, pm
-
- def test_enable_check_process_id_env_var(self):
- def _create_cmd(*args):
- return [script.name, output.name]
-
- self.execute_p.stop()
- script, output, service_name, uuid, pm = (
- self._create_env_var_testing_environment(SCRIPT, _create_cmd))
- with mock.patch.object(ep.ProcessManager, 'active') as active:
- active.__get__ = mock.Mock(return_value=False)
- pm.enable()
-
- with open(output.name, 'r') as f:
- ret_value = f.readline().strip()
- expected_value = ('Variable PROCESS_TAG set: %s-%s' %
- (service_name, uuid))
- self.assertEqual(expected_value, ret_value)
-
- def test_disable_check_process_id_env_var(self):
- def _create_cmd(*args):
- return [script.name, output.name]
-
- self.execute_p.stop()
- script, output, service_name, uuid, pm = (
- self._create_env_var_testing_environment(SCRIPT, _create_cmd))
- with mock.patch.object(ep.ProcessManager, 'active') as active, \
- mock.patch.object(pm, 'get_kill_cmd') as mock_kill_cmd:
- active.__get__ = mock.Mock(return_value=True)
- # NOTE(ralonsoh): the script we are using for testing does not
- # expect to receive the SIG number as the first argument.
- mock_kill_cmd.return_value = [script.name, output.name]
- pm.disable(sig='15')
-
- with open(output.name, 'r') as f:
- ret_value = f.readline().strip()
- expected_value = ('Variable PROCESS_TAG set: %s-%s' %
- (service_name, uuid))
- self.assertEqual(expected_value, ret_value)
-
def test_reload_cfg_without_custom_reload_callback(self):
with mock.patch.object(ep.ProcessManager, 'disable') as disable:
manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns')
@@ -439,3 +390,65 @@ class TestProcessManager(base.BaseTestCase):
manager = ep.ProcessManager(self.conf, 'uuid')
self.assertIsNone(manager.cmdline)
proc.assert_called_once_with(4)
+
+
+class TestProcessManagerScript(TestProcessManager):
+ def setUp(self):
+ super().setUp()
+ self.env_path = tempfile.mkdtemp(prefix="pm_env_", dir="/tmp/")
+ os.chmod(self.env_path, 0o755)
+ self.addCleanup(self._clean_env_path)
+
+ def _create_env_var_testing_environment(self, script_content, _create_cmd):
+ with tempfile.NamedTemporaryFile('w+', dir=self.env_path,
+ delete=False) as script:
+ script.write(script_content)
+ output = tempfile.NamedTemporaryFile('w+', dir=self.env_path,
+ delete=False)
+ os.chmod(script.name, 0o777)
+ service_name = 'my_new_service'
+ uuid = uuidutils.generate_uuid()
+ pm = ep.ProcessManager(self.conf, uuid, service=service_name,
+ default_cmd_callback=_create_cmd)
+ return script, output, service_name, uuid, pm
+
+ def _clean_env_path(self):
+ shutil.rmtree(self.env_path, ignore_errors=True)
+
+ def test_enable_check_process_id_env_var(self):
+ def _create_cmd(*args):
+ return [script.name, output.name]
+
+ self.execute_p.stop()
+ script, output, service_name, uuid, pm = (
+ self._create_env_var_testing_environment(SCRIPT, _create_cmd))
+ with mock.patch.object(ep.ProcessManager, 'active') as active:
+ active.__get__ = mock.Mock(return_value=False)
+ pm.enable()
+
+ with open(output.name, 'r') as f:
+ ret_value = f.readline().strip()
+ expected_value = ('Variable PROCESS_TAG set: %s-%s' %
+ (service_name, uuid))
+ self.assertEqual(expected_value, ret_value)
+
+ def test_disable_check_process_id_env_var(self):
+ def _create_cmd(*args):
+ return [script.name, output.name]
+
+ self.execute_p.stop()
+ script, output, service_name, uuid, pm = (
+ self._create_env_var_testing_environment(SCRIPT, _create_cmd))
+ with mock.patch.object(ep.ProcessManager, 'active') as active, \
+ mock.patch.object(pm, 'get_kill_cmd') as mock_kill_cmd:
+ active.__get__ = mock.Mock(return_value=True)
+ # NOTE(ralonsoh): the script we are using for testing does not
+ # expect to receive the SIG number as the first argument.
+ mock_kill_cmd.return_value = [script.name, output.name]
+ pm.disable(sig='15')
+
+ with open(output.name, 'r') as f:
+ ret_value = f.readline().strip()
+ expected_value = ('Variable PROCESS_TAG set: %s-%s' %
+ (service_name, uuid))
+ self.assertEqual(expected_value, ret_value)
diff --git a/neutron/tests/unit/agent/linux/test_ip_lib.py b/neutron/tests/unit/agent/linux/test_ip_lib.py
index da754464c3..c488e90ddc 100644
--- a/neutron/tests/unit/agent/linux/test_ip_lib.py
+++ b/neutron/tests/unit/agent/linux/test_ip_lib.py
@@ -792,7 +792,7 @@ class TestIpAddrCommand(TestIPCmdBase):
def test_wait_until_address_dadfailed(self):
self.addr_cmd.list = mock.Mock(
return_value=[{'tentative': True, 'dadfailed': True}])
- with testtools.ExpectedException(ip_lib.AddressNotReady):
+ with testtools.ExpectedException(ip_lib.DADFailed):
self.addr_cmd.wait_until_address_ready('abcd::1234')
@mock.patch.object(common_utils, 'wait_until_true')
diff --git a/neutron/tests/unit/agent/metadata/test_driver.py b/neutron/tests/unit/agent/metadata/test_driver.py
index fc59b7fee8..e3b0b8ef6e 100644
--- a/neutron/tests/unit/agent/metadata/test_driver.py
+++ b/neutron/tests/unit/agent/metadata/test_driver.py
@@ -25,6 +25,7 @@ from oslo_utils import uuidutils
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import router_info
from neutron.agent.linux import external_process as ep
+from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import utils as linux_utils
from neutron.agent.metadata import driver as metadata_driver
@@ -76,6 +77,7 @@ class TestMetadataDriverProcess(base.BaseTestCase):
EUNAME = 'neutron'
EGNAME = 'neutron'
METADATA_DEFAULT_IP = '169.254.169.254'
+ METADATA_DEFAULT_IPV6 = 'fe80::a9fe:a9fe'
METADATA_PORT = 8080
METADATA_SOCKET = '/socket/path'
PIDFILE = 'pidfile'
@@ -140,7 +142,7 @@ class TestMetadataDriverProcess(base.BaseTestCase):
agent._process_updated_router(router)
f.assert_not_called()
- def test_spawn_metadata_proxy(self):
+ def _test_spawn_metadata_proxy(self, dad_failed=False):
router_id = _uuid()
router_ns = 'qrouter-%s' % router_id
service_name = 'haproxy'
@@ -165,22 +167,32 @@ class TestMetadataDriverProcess(base.BaseTestCase):
'NamespaceManager.list_all', return_value={}),\
mock.patch(
'neutron.agent.linux.ip_lib.'
- 'IpAddrCommand.wait_until_address_ready') as mock_wait:
+ 'IpAddrCommand.wait_until_address_ready') as mock_wait,\
+ mock.patch(
+ 'neutron.agent.linux.ip_lib.'
+ 'delete_ip_address') as mock_del:
agent = l3_agent.L3NATAgent('localhost')
+ agent.process_monitor = mock.Mock()
cfg_file = os.path.join(
metadata_driver.HaproxyConfigurator.get_config_path(
agent.conf.state_path),
"%s.conf" % router_id)
mock_open = self.useFixture(
lib_fixtures.OpenFixture(cfg_file)).mock_open
- mock_wait.return_value = True
+ if dad_failed:
+ mock_wait.side_effect = ip_lib.DADFailed(
+ address=self.METADATA_DEFAULT_IP, reason='DAD failed')
+ else:
+ mock_wait.return_value = True
agent.metadata_driver.spawn_monitored_metadata_proxy(
agent.process_monitor,
router_ns,
self.METADATA_PORT,
agent.conf,
bind_address=self.METADATA_DEFAULT_IP,
- router_id=router_id)
+ router_id=router_id,
+ bind_address_v6=self.METADATA_DEFAULT_IPV6,
+ bind_interface='fake-if')
netns_execute_args = [
service_name,
@@ -188,6 +200,8 @@ class TestMetadataDriverProcess(base.BaseTestCase):
log_tag = ("haproxy-" + metadata_driver.METADATA_SERVICE_NAME +
"-" + router_id)
+ bind_v6_line = 'bind %s:%s interface %s' % (
+ self.METADATA_DEFAULT_IPV6, self.METADATA_PORT, 'fake-if')
cfg_contents = metadata_driver._HAPROXY_CONFIG_TEMPLATE % {
'user': self.EUNAME,
'group': self.EGNAME,
@@ -200,19 +214,35 @@ class TestMetadataDriverProcess(base.BaseTestCase):
'pidfile': self.PIDFILE,
'log_level': 'debug',
'log_tag': log_tag,
- 'bind_v6_line': ''}
-
- mock_open.assert_has_calls([
- mock.call(cfg_file, 'w'),
- mock.call().write(cfg_contents)],
- any_order=True)
-
- env = {ep.PROCESS_TAG: service_name + '-' + router_id}
- ip_mock.assert_has_calls([
- mock.call(namespace=router_ns),
- mock.call().netns.execute(netns_execute_args, addl_env=env,
- run_as_root=True)
- ])
+ 'bind_v6_line': bind_v6_line}
+
+ if dad_failed:
+ agent.process_monitor.register.assert_not_called()
+ mock_del.assert_called_once_with(self.METADATA_DEFAULT_IPV6,
+ 'fake-if',
+ namespace=router_ns)
+ else:
+ mock_open.assert_has_calls([
+ mock.call(cfg_file, 'w'),
+ mock.call().write(cfg_contents)], any_order=True)
+
+ env = {ep.PROCESS_TAG: service_name + '-' + router_id}
+ ip_mock.assert_has_calls([
+ mock.call(namespace=router_ns),
+ mock.call().netns.execute(netns_execute_args, addl_env=env,
+ run_as_root=True)
+ ])
+
+ agent.process_monitor.register.assert_called_once_with(
+ router_id, metadata_driver.METADATA_SERVICE_NAME,
+ mock.ANY)
+ mock_del.assert_not_called()
+
+ def test_spawn_metadata_proxy(self):
+ self._test_spawn_metadata_proxy()
+
+ def test_spawn_metadata_proxy_dad_failed(self):
+ self._test_spawn_metadata_proxy(dad_failed=True)
def test_create_config_file_wrong_user(self):
with mock.patch('pwd.getpwnam', side_effect=KeyError):
diff --git a/neutron/tests/unit/api/test_extensions.py b/neutron/tests/unit/api/test_extensions.py
index 7e15bc4077..14155f1b02 100644
--- a/neutron/tests/unit/api/test_extensions.py
+++ b/neutron/tests/unit/api/test_extensions.py
@@ -17,6 +17,7 @@ import copy
from unittest import mock
import fixtures
+from neutron_lib import context
from neutron_lib import exceptions
from neutron_lib.plugins import constants as lib_const
from neutron_lib.plugins import directory
@@ -1045,6 +1046,8 @@ class ExtensionExtendedAttributeTestCase(base.BaseTestCase):
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
+ req.environ['neutron.context'] = context.Context(
+ '', self._tenant_id, roles=['member', 'reader'])
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)
diff --git a/neutron/tests/unit/api/v2/test_base.py b/neutron/tests/unit/api/v2/test_base.py
index f0cb1f1a26..2e2a32e8e0 100644
--- a/neutron/tests/unit/api/v2/test_base.py
+++ b/neutron/tests/unit/api/v2/test_base.py
@@ -74,6 +74,14 @@ def _get_path(resource, id=None, action=None,
return path
+def _get_neutron_env(tenant_id=None, as_admin=False):
+ tenant_id = tenant_id or _uuid()
+ roles = ['member', 'reader']
+ if as_admin:
+ roles.append('admin')
+ return {'neutron.context': context.Context('', tenant_id, roles=roles)}
+
+
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
@@ -98,6 +106,8 @@ class APIv2TestBase(base.BaseTestCase):
api = router.APIRouter()
self.api = webtest.TestApp(api)
+ self._tenant_id = "api-test-tenant"
+
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', quota_conf.QUOTA_DB_DRIVER,
group='QUOTAS')
@@ -105,6 +115,27 @@ class APIv2TestBase(base.BaseTestCase):
# APIRouter initialization resets policy module, re-initializing it
policy.init()
+ def _post_request(self, path, initial_input, expect_errors=None,
+ req_tenant_id=None, as_admin=False):
+ req_tenant_id = req_tenant_id or self._tenant_id
+ return self.api.post_json(
+ path, initial_input, expect_errors=expect_errors,
+ extra_environ=_get_neutron_env(req_tenant_id, as_admin))
+
+ def _put_request(self, path, initial_input, expect_errors=None,
+ req_tenant_id=None, as_admin=False):
+ req_tenant_id = req_tenant_id or self._tenant_id
+ return self.api.put_json(
+ path, initial_input, expect_errors=expect_errors,
+ extra_environ=_get_neutron_env(req_tenant_id, as_admin))
+
+ def _delete_request(self, path, expect_errors=None,
+ req_tenant_id=None, as_admin=False):
+ req_tenant_id = req_tenant_id or self._tenant_id
+ return self.api.delete_json(
+ path, expect_errors=expect_errors,
+ extra_environ=_get_neutron_env(req_tenant_id, as_admin))
+
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
@@ -512,17 +543,16 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
- env = {'neutron.context': context.Context('', req_tenant_id)}
+ env = _get_neutron_env(req_tenant_id)
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
- 'tenant_id': real_tenant_id,
+ 'project_id': real_tenant_id,
'shared': False,
'subnets': []}
- return_value = [input_dict]
instance = self.plugin.return_value
- instance.get_networks.return_value = return_value
+ instance.get_networks.return_value = [input_dict]
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
@@ -789,7 +819,7 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
- env = {'neutron.context': context.Context('', tenant_id)}
+ env = _get_neutron_env(tenant_id)
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
@@ -947,8 +977,9 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def test_create_return_extra_attr(self):
net_id = _uuid()
+ project_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
- 'tenant_id': _uuid()}}
+ 'tenant_id': project_id}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
@@ -959,7 +990,8 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
- content_type='application/' + self.fmt)
+ content_type='application/' + self.fmt,
+ extra_environ=_get_neutron_env(project_id))
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
@@ -969,23 +1001,25 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
+ project_id = _uuid()
return_value = {'name': 'net1', 'admin_state_up': True,
- 'subnets': []}
+ 'project_id': project_id, 'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
- fmt=self.fmt))
+ fmt=self.fmt),
+ extra_environ=_get_neutron_env(project_id))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
- env = {'neutron.context': context.Context('', req_tenant_id)}
+ env = _get_neutron_env(req_tenant_id)
instance = self.plugin.return_value
- instance.get_network.return_value = {'tenant_id': real_tenant_id,
+ instance.get_network.return_value = {'project_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
@@ -1010,15 +1044,12 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
+ shared = req_tenant_id and req_tenant_id.endswith('another')
env = {}
- shared = False
if req_tenant_id:
- env = {'neutron.context': context.Context('', req_tenant_id)}
- if req_tenant_id.endswith('another'):
- shared = True
- env['neutron.context'].roles = ['tenant_admin']
+ env = _get_neutron_env(req_tenant_id)
- data = {'tenant_id': real_tenant_id, 'shared': shared}
+ data = {'project_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
@@ -1060,14 +1091,14 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
expect_errors=False):
env = {}
if req_tenant_id:
- env = {'neutron.context': context.Context('', req_tenant_id)}
+ env = _get_neutron_env(req_tenant_id)
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
- instance.get_network.return_value = {'tenant_id': real_tenant_id,
+ instance.get_network.return_value = {'project_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
@@ -1308,26 +1339,31 @@ class NotificationTest(APIv2TestBase):
group='QUOTAS')
def _resource_op_notifier(self, opname, resource, expected_errors=False):
- initial_input = {resource: {'name': 'myname'}}
+ tenant_id = _uuid()
+ network_obj = {'name': 'myname',
+ 'project_id': tenant_id}
+ initial_input = {resource: network_obj}
instance = self.plugin.return_value
- instance.get_networks.return_value = initial_input
+ instance.get_network.return_value = network_obj
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
- initial_input[resource]['tenant_id'] = _uuid()
- res = self.api.post_json(
+ res = self._post_request(
_get_path('networks'),
- initial_input, expect_errors=expected_errors)
+ initial_input, expect_errors=expected_errors,
+ req_tenant_id=tenant_id)
if opname == 'update':
- res = self.api.put_json(
- _get_path('networks', id=_uuid()),
- initial_input, expect_errors=expected_errors)
+ op_input = {resource: {'name': 'myname'}}
+ res = self._put_request(
+ _get_path('networks', id=tenant_id),
+ op_input, expect_errors=expected_errors,
+ req_tenant_id=tenant_id)
expected_code = exc.HTTPOk.code
if opname == 'delete':
- initial_input[resource]['tenant_id'] = _uuid()
- res = self.api.delete(
- _get_path('networks', id=_uuid()),
- expect_errors=expected_errors)
+ res = self._delete_request(
+ _get_path('networks', id=tenant_id),
+ expect_errors=expected_errors,
+ req_tenant_id=tenant_id)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
@@ -1472,7 +1508,9 @@ class ExtensionTestCase(base.BaseTestCase):
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
- res = self.api.post_json(_get_path('networks'), initial_input)
+ res = self.api.post_json(
+ _get_path('networks'), initial_input,
+ extra_environ=_get_neutron_env(tenant_id))
instance.create_network.assert_called_with(mock.ANY,
network=data)
diff --git a/neutron/tests/unit/conf/policies/test_availability_zone.py b/neutron/tests/unit/conf/policies/test_availability_zone.py
index ad797da9b4..85d9679121 100644
--- a/neutron/tests/unit/conf/policies/test_availability_zone.py
+++ b/neutron/tests/unit/conf/policies/test_availability_zone.py
@@ -70,12 +70,6 @@ class ProjectMemberTests(AdminTests):
super(ProjectMemberTests, self).setUp()
self.context = self.project_member_ctx
- def test_get_availability_zone(self):
- self.assertRaises(
- base_policy.PolicyNotAuthorized,
- policy.enforce,
- self.context, "get_availability_zone", self.target)
-
class ProjectReaderTests(ProjectMemberTests):
diff --git a/neutron/tests/unit/db/metering/test_metering_db.py b/neutron/tests/unit/db/metering/test_metering_db.py
index 35d7f733c1..1c2af72a4e 100644
--- a/neutron/tests/unit/db/metering/test_metering_db.py
+++ b/neutron/tests/unit/db/metering/test_metering_db.py
@@ -16,7 +16,6 @@ import contextlib
from neutron_lib.api.definitions import metering as metering_apidef
from neutron_lib import constants as n_consts
-from neutron_lib import context
from neutron_lib.db import constants as db_const
from neutron_lib.plugins import constants
from oslo_utils import uuidutils
@@ -42,18 +41,12 @@ _fake_uuid = uuidutils.generate_uuid
class MeteringPluginDbTestCaseMixin(object):
def _create_metering_label(self, fmt, name, description, **kwargs):
data = {'metering_label': {'name': name,
- 'tenant_id': kwargs.get('tenant_id',
- 'test-tenant'),
'shared': kwargs.get('shared', False),
'description': description}}
- req = self.new_create_request('metering-labels', data,
- fmt)
-
- if kwargs.get('set_context') and 'tenant_id' in kwargs:
- # create a specific auth context for this request
- req.environ['neutron.context'] = (
- context.Context('', kwargs['tenant_id'],
- is_admin=kwargs.get('is_admin', True)))
+ req = self.new_create_request(
+ 'metering-labels', data, fmt,
+ tenant_id=kwargs.get('tenant_id', self._tenant_id),
+ as_admin=kwargs.get('is_admin', True))
return req.get_response(self.ext_api)
@@ -71,7 +64,6 @@ class MeteringPluginDbTestCaseMixin(object):
data = {
'metering_label_rule': {
'metering_label_id': metering_label_id,
- 'tenant_id': kwargs.get('tenant_id', 'test-tenant'),
'direction': direction,
'excluded': excluded,
}
@@ -87,13 +79,10 @@ class MeteringPluginDbTestCaseMixin(object):
data['metering_label_rule']['destination_ip_prefix'] =\
destination_ip_prefix
- req = self.new_create_request('metering-label-rules',
- data, fmt)
-
- if kwargs.get('set_context') and 'tenant_id' in kwargs:
- # create a specific auth context for this request
- req.environ['neutron.context'] = (
- context.Context('', kwargs['tenant_id']))
+ req = self.new_create_request(
+ 'metering-label-rules', data, fmt,
+ tenant_id=kwargs.get('tenant_id', self._tenant_id),
+ as_admin=kwargs.get('is_admin', True))
return req.get_response(self.ext_api)
@@ -203,7 +192,8 @@ class TestMetering(MeteringPluginDbTestCase):
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
- self._delete('metering-labels', metering_label_id, 204)
+ self._delete('metering-labels', metering_label_id, 204,
+ as_admin=True)
def test_list_metering_label(self):
name = 'my label'
@@ -258,7 +248,7 @@ class TestMetering(MeteringPluginDbTestCase):
remote_ip_prefix=remote_ip_prefix) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._update('metering-label-rules', rule_id, data,
- webob.exc.HTTPNotImplemented.code)
+ webob.exc.HTTPNotImplemented.code, as_admin=True)
def test_delete_metering_label_rule(self):
name = 'my label'
@@ -275,7 +265,8 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_id, direction, excluded,
remote_ip_prefix=remote_ip_prefix) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
- self._delete('metering-label-rules', rule_id, 204)
+ self._delete('metering-label-rules', rule_id, 204,
+ as_admin=True)
def test_list_metering_label_rule(self):
name = 'my label'
@@ -297,7 +288,7 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
- metering_label_rule)
+ metering_label_rule, as_admin=True)
def test_create_metering_label_rules(self):
name = 'my label'
@@ -319,7 +310,7 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
- metering_label_rule)
+ metering_label_rule, as_admin=True)
def test_create_overlap_metering_label_rules(self):
name = 'my label'
@@ -365,4 +356,5 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
- metering_label_rule)
+ metering_label_rule,
+ as_admin=True)
diff --git a/neutron/tests/unit/db/test_agentschedulers_db.py b/neutron/tests/unit/db/test_agentschedulers_db.py
index d503aee1ec..df4edfaa9b 100644
--- a/neutron/tests/unit/db/test_agentschedulers_db.py
+++ b/neutron/tests/unit/db/test_agentschedulers_db.py
@@ -45,6 +45,7 @@ from neutron.db.models import agent as agent_model
from neutron.extensions import l3agentscheduler
from neutron.objects import agent as ag_obj
from neutron.objects import l3agent as rb_obj
+from neutron import policy
from neutron.tests.common import helpers
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
@@ -78,18 +79,21 @@ class AgentSchedulerTestMixIn(object):
def _path_req(self, path, method='GET', data=None,
query_string=None,
- admin_context=True):
+ admin_context=True,
+ req_tenant_id=None):
content_type = 'application/%s' % self.fmt
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
+ roles = ['member', 'reader']
+ req_tenant_id = req_tenant_id or self._tenant_id
if admin_context:
- return testlib_api.create_request(
- path, body, content_type, method, query_string=query_string)
- else:
- return testlib_api.create_request(
- path, body, content_type, method, query_string=query_string,
- context=context.Context('', 'tenant_id'))
+ roles.append('admin')
+ req = testlib_api.create_request(
+ path, body, content_type, method, query_string=query_string)
+ req.environ['neutron.context'] = context.Context(
+ '', req_tenant_id, roles=roles, is_admin=admin_context)
+ return req
def _path_create_request(self, path, data, admin_context=True):
return self._path_req(path, method='POST', data=data,
@@ -218,7 +222,7 @@ class AgentSchedulerTestMixIn(object):
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = admin_state_up
- self._update('agents', agent_id, new_agent)
+ self._update('agents', agent_id, new_agent, as_admin=True)
def _get_agent_id(self, agent_type, host):
agents = self._list_agents()
@@ -269,6 +273,7 @@ class OvsAgentSchedulerTestCaseBase(test_l3.L3NatTestCaseMixin,
self.dhcp_notify_p = mock.patch(
'neutron.extensions.dhcpagentscheduler.notify')
self.patched_dhcp_notify = self.dhcp_notify_p.start()
+ policy.init()
class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
@@ -911,10 +916,12 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self.assertNotEqual(agent['host'], new_agent_host)
def test_router_auto_schedule_with_invalid_router(self):
- with self.router() as router:
+ project_id = uuidutils.generate_uuid()
+ with self.router(project_id=project_id) as router:
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
- self._delete('routers', router['router']['id'])
+ self._delete('routers', router['router']['id'],
+ tenant_id=project_id)
# deleted router
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
@@ -1106,19 +1113,22 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self.assertEqual(0, len(router_ids))
def test_router_without_l3_agents(self):
+ project_id = uuidutils.generate_uuid()
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
- data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
+ data = {'router': {'tenant_id': project_id}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
- router_req = self.new_create_request('routers', data, self.fmt)
+ router_req = self.new_create_request(
+ 'routers', data, self.fmt, tenant_id=project_id)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
l3agents = (
self.l3plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['router']['id']]))
- self._delete('routers', router['router']['id'])
+ self._delete(
+ 'routers', router['router']['id'], tenant_id=project_id)
self.assertEqual(0, len(l3agents))
def test_dvr_router_scheduling_to_only_dvr_snat_agent(self):
@@ -1217,26 +1227,30 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self.assertEqual(agent['id'], new_agent['id'])
def test_router_sync_data(self):
- with self.subnet() as s1,\
- self.subnet(cidr='10.0.2.0/24') as s2,\
- self.subnet(cidr='10.0.3.0/24') as s3:
+ project_id = uuidutils.generate_uuid()
+ with self.subnet(project_id=project_id) as s1,\
+ self.subnet(project_id=project_id, cidr='10.0.2.0/24') as s2,\
+ self.subnet(project_id=project_id, cidr='10.0.3.0/24') as s3:
self._register_agent_states()
self._set_net_external(s1['subnet']['network_id'])
- data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
+ data = {'router': {'tenant_id': project_id}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s1['subnet']['network_id']}
- router_req = self.new_create_request('routers', data, self.fmt)
+ router_req = self.new_create_request(
+ 'routers', data, self.fmt, tenant_id=project_id)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
s2['subnet']['id'],
- None)
+ None,
+ tenant_id=project_id)
self._router_interface_action('add',
router['router']['id'],
s3['subnet']['id'],
- None)
+ None,
+ tenant_id=project_id)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1, len(l3agents['agents']))
@@ -1267,7 +1281,8 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self._router_interface_action('remove',
router['router']['id'],
s2['subnet']['id'],
- None)
+ None,
+ tenant_id=project_id)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1,
@@ -1275,8 +1290,10 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self._router_interface_action('remove',
router['router']['id'],
s3['subnet']['id'],
- None)
- self._delete('routers', router['router']['id'])
+ None,
+ tenant_id=project_id)
+ self._delete('routers', router['router']['id'],
+ tenant_id=project_id)
def _test_router_add_to_l3_agent(self, admin_state_up=True):
with self.router() as router1:
diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py
index a25ccc4f0d..c4a1dce9ae 100644
--- a/neutron/tests/unit/db/test_db_base_plugin_v2.py
+++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py
@@ -49,6 +49,7 @@ import neutron
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.v2 import router
+from neutron.common import _constants as common_constants
from neutron.common import ipv6_utils
from neutron.common.ovn import utils as ovn_utils
from neutron.common import test_lib
@@ -60,6 +61,7 @@ from neutron.db import ipam_backend_mixin
from neutron.db.models import l3 as l3_models
from neutron.db.models import securitygroup as sg_models
from neutron.db import models_v2
+from neutron.exceptions import mtu as mtu_exc
from neutron.ipam.drivers.neutrondb_ipam import driver as ipam_driver
from neutron.ipam import exceptions as ipam_exc
from neutron.objects import network as network_obj
@@ -246,60 +248,117 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
query_string=params, context=context,
headers=headers)
+ def _admin_req(self, method, resource, data=None, fmt=None, id=None,
+ params=None, action=None, subresource=None, sub_id=None,
+ ctx=None, headers=None, tenant_id=None):
+ tenant_id = tenant_id or self._tenant_id
+ req = self._req(method, resource, data, fmt, id, params, action,
+ subresource, sub_id, ctx, headers)
+ req.environ['neutron.context'] = context.Context(
+ '', tenant_id, is_admin=True,
+ roles=['admin', 'member', 'reader'])
+ return req
+
+ def _member_req(self, method, resource, data=None, fmt=None, id=None,
+ params=None, action=None, subresource=None, sub_id=None,
+ ctx=None, headers=None, tenant_id=None):
+ tenant_id = tenant_id or self._tenant_id
+ req = self._req(method, resource, data, fmt, id, params, action,
+ subresource, sub_id, ctx, headers)
+ req.environ['neutron.context'] = context.Context(
+ '', tenant_id, roles=['member', 'reader'])
+ return req
+
+ def _reader_req(self, method, resource, data=None, fmt=None, id=None,
+ params=None, action=None, subresource=None, sub_id=None,
+ ctx=None, headers=None, tenant_id=None):
+ tenant_id = tenant_id or self._tenant_id
+ req = self._req(method, resource, data, fmt, id, params, action,
+ subresource, sub_id, ctx, headers)
+ req.environ['neutron.context'] = context.Context(
+ '', tenant_id, roles=['reader'])
+ return req
+
def new_create_request(self, resource, data, fmt=None, id=None,
- subresource=None, context=None):
- return self._req('POST', resource, data, fmt, id=id,
- subresource=subresource, context=context)
+ subresource=None, context=None, tenant_id=None,
+ as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req(
+ 'POST', resource, data, fmt, id=id,
+ subresource=subresource, ctx=context, tenant_id=tenant_id)
+ return self._member_req('POST', resource, data, fmt, id=id,
+ subresource=subresource, ctx=context,
+ tenant_id=tenant_id)
def new_list_request(self, resource, fmt=None, params=None,
- subresource=None, parent_id=None):
- return self._req(
+ subresource=None, parent_id=None, tenant_id=None,
+ as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req(
+ 'GET', resource, None, fmt, params=params, id=parent_id,
+ subresource=subresource, tenant_id=tenant_id
+ )
+ return self._reader_req(
'GET', resource, None, fmt, params=params, id=parent_id,
- subresource=subresource
+ subresource=subresource, tenant_id=tenant_id
)
def new_show_request(self, resource, id, fmt=None,
- subresource=None, fields=None, sub_id=None):
+ subresource=None, fields=None, sub_id=None,
+ tenant_id=None, as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
if fields:
params = "&".join(["fields=%s" % x for x in fields])
else:
params = None
- return self._req('GET', resource, None, fmt, id=id,
- params=params, subresource=subresource, sub_id=sub_id)
+ if as_admin:
+ return self._admin_req('GET', resource, None, fmt, id=id,
+ params=params, subresource=subresource,
+ sub_id=sub_id, tenant_id=tenant_id)
+ return self._reader_req('GET', resource, None, fmt, id=id,
+ params=params, subresource=subresource,
+ sub_id=sub_id, tenant_id=tenant_id)
def new_delete_request(self, resource, id, fmt=None, subresource=None,
- sub_id=None, data=None, headers=None):
- return self._req(
- 'DELETE',
- resource,
- data,
- fmt,
- id=id,
- subresource=subresource,
- sub_id=sub_id,
- headers=headers
- )
+ sub_id=None, data=None, headers=None,
+ tenant_id=None, as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req('DELETE', resource, data, fmt, id=id,
+ subresource=subresource, sub_id=sub_id,
+ headers=headers, tenant_id=tenant_id)
+ return self._member_req('DELETE', resource, data, fmt, id=id,
+ subresource=subresource, sub_id=sub_id,
+ headers=headers, tenant_id=tenant_id)
def new_update_request(self, resource, data, id, fmt=None,
subresource=None, context=None, sub_id=None,
- headers=None):
- return self._req(
+ headers=None, as_admin=False, tenant_id=None):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req(
+ 'PUT', resource, data, fmt, id=id, subresource=subresource,
+ sub_id=sub_id, ctx=context, headers=headers,
+ tenant_id=tenant_id
+ )
+ return self._member_req(
'PUT', resource, data, fmt, id=id, subresource=subresource,
- sub_id=sub_id, context=context, headers=headers
+ sub_id=sub_id, ctx=context, headers=headers, tenant_id=tenant_id
)
def new_action_request(self, resource, data, id, action, fmt=None,
- subresource=None, sub_id=None):
- return self._req(
- 'PUT',
- resource,
- data,
- fmt,
- id=id,
- action=action,
- subresource=subresource,
- sub_id=sub_id
- )
+ subresource=None, sub_id=None, tenant_id=None,
+ as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
+ if as_admin:
+ return self._admin_req('PUT', resource, data, fmt, id=id,
+ action=action, subresource=subresource,
+ sub_id=sub_id, tenant_id=tenant_id)
+ return self._member_req('PUT', resource, data, fmt, id=id,
+ action=action, subresource=subresource,
+ sub_id=sub_id, tenant_id=tenant_id)
def deserialize(self, content_type, response):
ctype = 'application/%s' % content_type
@@ -328,23 +387,19 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
return random.choice(ip_list)
return ip_list[0]
- def _create_bulk_from_list(self, fmt, resource, objects, **kwargs):
+ def _create_bulk_from_list(self, fmt, resource, objects, tenant_id=None,
+ as_admin=False, **kwargs):
"""Creates a bulk request from a list of objects."""
collection = "%ss" % resource
req_data = {collection: objects}
- req = self.new_create_request(collection, req_data, fmt)
- if ('set_context' in kwargs and
- kwargs['set_context'] is True and
- 'tenant_id' in kwargs):
- # create a specific auth context for this request
- req.environ['neutron.context'] = context.Context(
- '', kwargs['tenant_id'])
- elif 'context' in kwargs:
- req.environ['neutron.context'] = kwargs['context']
+ req = self.new_create_request(collection, req_data, fmt,
+ tenant_id=tenant_id, as_admin=as_admin)
return req.get_response(self.api)
- def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs):
+ def _create_bulk(self, fmt, number, resource, data, name='test',
+ tenant_id=None, as_admin=False, **kwargs):
"""Creates a bulk request for any kind of resource."""
+ tenant_id = tenant_id or self._tenant_id
objects = []
collection = "%ss" % resource
for i in range(number):
@@ -354,35 +409,27 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
obj[resource].update(kwargs['override'][i])
objects.append(obj)
req_data = {collection: objects}
- req = self.new_create_request(collection, req_data, fmt)
- if ('set_context' in kwargs and
- kwargs['set_context'] is True and
- 'tenant_id' in kwargs):
- # create a specific auth context for this request
- req.environ['neutron.context'] = context.Context(
- '', kwargs['tenant_id'])
- elif 'context' in kwargs:
- req.environ['neutron.context'] = kwargs['context']
+ req = self.new_create_request(collection, req_data, fmt,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
return req.get_response(self.api)
def _create_network(self, fmt, name, admin_state_up,
- arg_list=None, set_context=False, tenant_id=None,
+ arg_list=None, tenant_id=None, as_admin=False,
**kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'network': {'name': name,
'admin_state_up': admin_state_up,
'tenant_id': tenant_id}}
for arg in (('admin_state_up', 'tenant_id', 'shared',
- 'vlan_transparent',
+ 'vlan_transparent', 'mtu',
'availability_zone_hints') + (arg_list or ())):
# Arg must be present
if arg in kwargs:
data['network'][arg] = kwargs[arg]
- network_req = self.new_create_request('networks', data, fmt)
- if set_context and tenant_id:
- # create a specific auth context for this request
- network_req.environ['neutron.context'] = context.Context(
- '', tenant_id)
+ network_req = self.new_create_request('networks', data, fmt,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
return network_req.get_response(self.api)
@@ -392,11 +439,12 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
'tenant_id': self._tenant_id}}
return self._create_bulk(fmt, number, 'network', base_data, **kwargs)
- def _create_subnet(self, fmt, net_id, cidr,
- expected_res_status=None, **kwargs):
+ def _create_subnet(self, fmt, net_id, cidr, expected_res_status=None,
+ tenant_id=None, as_admin=False, **kwargs):
+ tenant_id = tenant_id or self._tenant_id
data = {'subnet': {'network_id': net_id,
'ip_version': constants.IP_VERSION_4,
- 'tenant_id': self._tenant_id}}
+ 'tenant_id': tenant_id}}
if cidr:
data['subnet']['cidr'] = cidr
for arg in ('ip_version', 'tenant_id', 'subnetpool_id', 'prefixlen',
@@ -412,11 +460,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
kwargs['gateway_ip'] is not constants.ATTR_NOT_SPECIFIED):
data['subnet']['gateway_ip'] = kwargs['gateway_ip']
- subnet_req = self.new_create_request('subnets', data, fmt)
- if (kwargs.get('set_context') and 'tenant_id' in kwargs):
- # create a specific auth context for this request
- subnet_req.environ['neutron.context'] = context.Context(
- '', kwargs['tenant_id'])
+ subnet_req = self.new_create_request('subnets', data, fmt,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
subnet_res = subnet_req.get_response(self.api)
if expected_res_status:
@@ -443,24 +489,25 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs)
def _create_subnetpool(self, fmt, prefixes,
- expected_res_status=None, admin=False, **kwargs):
+ expected_res_status=None, admin=False,
+ tenant_id=None, **kwargs):
+ tenant_id = tenant_id or self._tenant_id
subnetpool = {'subnetpool': {'prefixes': prefixes}}
for k, v in kwargs.items():
subnetpool['subnetpool'][k] = str(v)
api = self._api_for_resource('subnetpools')
subnetpools_req = self.new_create_request('subnetpools',
- subnetpool, fmt)
- if not admin:
- neutron_context = context.Context('', kwargs['tenant_id'])
- subnetpools_req.environ['neutron.context'] = neutron_context
+ subnetpool, fmt,
+ tenant_id=tenant_id,
+ as_admin=admin)
subnetpool_res = subnetpools_req.get_response(api)
if expected_res_status:
self.assertEqual(expected_res_status, subnetpool_res.status_int)
return subnetpool_res
def _create_port(self, fmt, net_id, expected_res_status=None,
- arg_list=None, set_context=False, is_admin=False,
+ arg_list=None, is_admin=False,
tenant_id=None, **kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'port': {'network_id': net_id,
@@ -481,11 +528,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
'device_id' not in kwargs):
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
data['port']['device_id'] = device_id
- port_req = self.new_create_request('ports', data, fmt)
- if set_context and tenant_id:
- # create a specific auth context for this request
- port_req.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=is_admin)
+ port_req = self.new_create_request('ports', data, fmt,
+ tenant_id=tenant_id,
+ as_admin=is_admin)
port_res = port_req.get_response(self.api)
if expected_res_status:
@@ -499,28 +544,26 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
query_params.append("network_id=%s" % net_id)
if kwargs.get('device_owner'):
query_params.append("device_owner=%s" % kwargs.get('device_owner'))
- port_req = self.new_list_request('ports', fmt, '&'.join(query_params))
- if ('set_context' in kwargs and
- kwargs['set_context'] is True and
- 'tenant_id' in kwargs):
- # create a specific auth context for this request
- port_req.environ['neutron.context'] = context.Context(
- '', kwargs['tenant_id'])
-
+ port_req = self.new_list_request('ports', fmt, '&'.join(query_params),
+ tenant_id=kwargs.get('tenant_id'))
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(expected_res_status, port_res.status_int)
return port_res
def _create_port_bulk(self, fmt, number, net_id, name,
- admin_state_up, **kwargs):
+ admin_state_up, tenant_id=None, as_admin=False,
+ **kwargs):
base_data = {'port': {'network_id': net_id,
- 'admin_state_up': admin_state_up,
- 'tenant_id': self._tenant_id}}
- return self._create_bulk(fmt, number, 'port', base_data, **kwargs)
-
- def _make_network(self, fmt, name, admin_state_up, **kwargs):
- res = self._create_network(fmt, name, admin_state_up, **kwargs)
+ 'admin_state_up': admin_state_up}}
+ return self._create_bulk(fmt, number, 'port', base_data,
+ tenant_id=tenant_id, as_admin=as_admin,
+ **kwargs)
+
+ def _make_network(self, fmt, name, admin_state_up, as_admin=False,
+ **kwargs):
+ res = self._create_network(fmt, name, admin_state_up,
+ as_admin=as_admin, **kwargs)
# TODO(salvatore-orlando): do exception handling in this test module
# in a uniform way (we do it differently for ports, subnets, and nets
# Things can go wrong - raise HTTP exc with res code only
@@ -533,7 +576,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
allocation_pools=None, ip_version=constants.IP_VERSION_4,
enable_dhcp=True, dns_nameservers=None, host_routes=None,
shared=None, ipv6_ra_mode=None, ipv6_address_mode=None,
- tenant_id=None, set_context=False, segment_id=None):
+ tenant_id=None, segment_id=None, as_admin=False):
res = self._create_subnet(fmt,
net_id=network['network']['id'],
cidr=cidr,
@@ -550,7 +593,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
shared=shared,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_address_mode,
- set_context=set_context)
+ as_admin=as_admin)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
@@ -572,11 +615,13 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
ipv6_ra_mode=ra_addr_mode,
ipv6_address_mode=ra_addr_mode))
- def _make_subnetpool(self, fmt, prefixes, admin=False, **kwargs):
+ def _make_subnetpool(self, fmt, prefixes, admin=False, tenant_id=None,
+ **kwargs):
res = self._create_subnetpool(fmt,
prefixes,
None,
admin,
+ tenant_id=tenant_id,
**kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
@@ -584,8 +629,10 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
- def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs):
- res = self._create_port(fmt, net_id, expected_res_status, **kwargs)
+ def _make_port(self, fmt, net_id, expected_res_status=None,
+ as_admin=False, **kwargs):
+ res = self._create_port(fmt, net_id, expected_res_status,
+ is_admin=as_admin, **kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
@@ -596,7 +643,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
max_burst_kbps=None, dscp_mark=None, min_kbps=None,
direction=constants.EGRESS_DIRECTION,
expected_res_status=None, project_id=None,
- set_context=False, is_admin=False):
+ is_admin=False):
# Accepted rule types: "bandwidth_limit", "dscp_marking" and
# "minimum_bandwidth"
self.assertIn(rule_type, [qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
@@ -615,11 +662,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
data[type_req][qos_const.MIN_KBPS] = min_kbps
data[type_req][qos_const.DIRECTION] = direction
route = 'qos/policies/%s/%s' % (qos_policy_id, type_req + 's')
- qos_rule_req = self.new_create_request(route, data, fmt)
- if set_context and project_id:
- # create a specific auth context for this request
- qos_rule_req.environ['neutron.context'] = context.Context(
- '', project_id, is_admin=is_admin)
+ qos_rule_req = self.new_create_request(route, data, fmt,
+ tenant_id=project_id,
+ as_admin=is_admin)
qos_rule_res = qos_rule_req.get_response(self.api)
if expected_res_status:
@@ -628,16 +673,14 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
def _create_qos_policy(self, fmt, qos_policy_name=None,
expected_res_status=None, project_id=None,
- set_context=False, is_admin=False):
+ is_admin=False):
project_id = project_id or self._tenant_id
name = qos_policy_name or uuidutils.generate_uuid()
data = {'policy': {'name': name,
'project_id': project_id}}
- qos_req = self.new_create_request('policies', data, fmt)
- if set_context and project_id:
- # create a specific auth context for this request
- qos_req.environ['neutron.context'] = context.Context(
- '', project_id, is_admin=is_admin)
+ qos_req = self.new_create_request('policies', data, fmt,
+ tenant_id=project_id,
+ as_admin=is_admin)
qos_policy_res = qos_req.get_response(self.api)
if expected_res_status:
@@ -653,54 +696,49 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
def _delete(self, collection, id,
expected_code=webob.exc.HTTPNoContent.code,
- neutron_context=None, headers=None, subresource=None,
- sub_id=None):
+ headers=None, subresource=None, sub_id=None,
+ tenant_id=None, as_admin=False):
req = self.new_delete_request(collection, id, headers=headers,
- subresource=subresource, sub_id=sub_id)
- if neutron_context:
- # create a specific auth context for this request
- req.environ['neutron.context'] = neutron_context
+ subresource=subresource, sub_id=sub_id,
+ tenant_id=tenant_id, as_admin=as_admin)
+
res = req.get_response(self._api_for_resource(collection))
self.assertEqual(expected_code, res.status_int)
- def _show_response(self, resource, id, neutron_context=None):
- req = self.new_show_request(resource, id)
- if neutron_context:
- # create a specific auth context for this request
- req.environ['neutron.context'] = neutron_context
- elif hasattr(self, 'tenant_id'):
- req.environ['neutron.context'] = context.Context('',
- self.tenant_id)
+ def _show_response(self, resource, id, tenant_id=None, as_admin=False):
+ req = self.new_show_request(resource, id,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
return req.get_response(self._api_for_resource(resource))
def _show(self, resource, id,
expected_code=webob.exc.HTTPOk.code,
- neutron_context=None):
- res = self._show_response(resource, id,
- neutron_context=neutron_context)
+ tenant_id=None, as_admin=False):
+ res = self._show_response(resource, id, tenant_id=tenant_id,
+ as_admin=as_admin)
self.assertEqual(expected_code, res.status_int)
return self.deserialize(self.fmt, res)
def _update(self, resource, id, new_data,
- expected_code=webob.exc.HTTPOk.code,
- neutron_context=None, headers=None):
- req = self.new_update_request(resource, new_data, id, headers=headers)
- if neutron_context:
- # create a specific auth context for this request
- req.environ['neutron.context'] = neutron_context
+ expected_code=webob.exc.HTTPOk.code, headers=None,
+ request_tenant_id=None, as_admin=False):
+ req = self.new_update_request(
+ resource, new_data, id, headers=headers,
+ tenant_id=request_tenant_id, as_admin=as_admin)
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(expected_code, res.status_int)
return self.deserialize(self.fmt, res)
- def _list(self, resource, fmt=None, neutron_context=None,
+ def _list(self, resource, fmt=None,
query_params=None, expected_code=webob.exc.HTTPOk.code,
- parent_id=None, subresource=None):
+ parent_id=None, subresource=None,
+ tenant_id=None, as_admin=False):
fmt = fmt or self.fmt
req = self.new_list_request(resource, fmt, query_params,
subresource=subresource,
- parent_id=parent_id)
- if neutron_context:
- req.environ['neutron.context'] = neutron_context
+ parent_id=parent_id,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(expected_code, res.status_int)
return self.deserialize(fmt, res)
@@ -730,13 +768,14 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
self.assertEqual(items[0]['name'], 'test_0')
self.assertEqual(items[1]['name'], 'test_1')
- def _test_list_resources(self, resource, items, neutron_context=None,
- query_params=None,
- expected_code=webob.exc.HTTPOk.code):
+ def _test_list_resources(self, resource, items, query_params=None,
+ expected_code=webob.exc.HTTPOk.code,
+ tenant_id=None, as_admin=False):
res = self._list('%ss' % resource,
- neutron_context=neutron_context,
query_params=query_params,
- expected_code=expected_code)
+ expected_code=expected_code,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
if expected_code == webob.exc.HTTPOk.code:
resource = resource.replace('-', '_')
self.assertCountEqual([i['id'] for i in res['%ss' % resource]],
@@ -771,7 +810,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
tenant_id=None,
project_id=None,
service_types=None,
- set_context=False):
+ as_admin=False):
if project_id:
tenant_id = project_id
cidr = netaddr.IPNetwork(cidr) if cidr else None
@@ -780,7 +819,6 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
gateway_ip = netaddr.IPAddress(gateway_ip)
with optional_ctx(network, self.network,
- set_context=set_context,
tenant_id=tenant_id) as network_to_use:
subnet = self._make_subnet(fmt or self.fmt,
network_to_use,
@@ -797,7 +835,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_address_mode,
tenant_id=tenant_id,
- set_context=set_context)
+ as_admin=as_admin)
yield subnet
@contextlib.contextmanager
@@ -811,22 +849,22 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
yield subnetpool
@contextlib.contextmanager
- def port(self, subnet=None, fmt=None, set_context=False, project_id=None,
+ def port(self, subnet=None, fmt=None, project_id=None, is_admin=False,
**kwargs):
tenant_id = project_id if project_id else kwargs.pop(
'tenant_id', None)
with optional_ctx(
subnet, self.subnet,
- set_context=set_context, tenant_id=tenant_id) as subnet_to_use:
+ tenant_id=tenant_id) as subnet_to_use:
net_id = subnet_to_use['subnet']['network_id']
port = self._make_port(
- fmt or self.fmt, net_id,
- set_context=set_context, tenant_id=tenant_id,
- **kwargs)
+ fmt or self.fmt, net_id, tenant_id=tenant_id,
+ as_admin=is_admin, **kwargs)
yield port
def _test_list_with_sort(self, resource,
- items, sorts, resources=None, query_params=''):
+ items, sorts, resources=None, query_params='',
+ tenant_id=None, as_admin=False):
query_str = query_params
for key, direction in sorts:
query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key,
@@ -834,7 +872,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
if not resources:
resources = '%ss' % resource
req = self.new_list_request(resources,
- params=query_str)
+ params=query_str,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
api = self._api_for_resource(resources)
res = self.deserialize(self.fmt, req.get_response(api))
resource = resource.replace('-', '_')
@@ -846,13 +886,17 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
limit, expected_page_num,
resources=None,
query_params='',
- verify_key='id'):
+ verify_key='id',
+ tenant_id=None,
+ as_admin=False):
if not resources:
resources = '%ss' % resource
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&sort_key=%s&"
"sort_dir=%s") % (limit, sort[0], sort[1])
- req = self.new_list_request(resources, params=query_str)
+ req = self.new_list_request(resources, params=query_str,
+ tenant_id=tenant_id, as_admin=as_admin)
+ neutron_ctx = req.environ['neutron.context']
items_res = []
page_num = 0
api = self._api_for_resource(resources)
@@ -871,6 +915,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
+ req.environ['neutron.context'] = neutron_ctx
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
@@ -880,7 +925,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
def _test_list_with_pagination_reverse(self, resource, items, sort,
limit, expected_page_num,
resources=None,
- query_params=''):
+ query_params='',
+ tenant_id=None,
+ as_admin=False):
if not resources:
resources = '%ss' % resource
resource = resource.replace('-', '_')
@@ -891,7 +938,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
"sort_key=%s&sort_dir=%s&"
"marker=%s") % (limit, sort[0], sort[1],
marker)
- req = self.new_list_request(resources, params=query_str)
+ req = self.new_list_request(resources, params=query_str,
+ tenant_id=tenant_id, as_admin=as_admin)
+ neutron_ctx = req.environ['neutron.context']
item_res = [items[-1][resource]]
page_num = 0
resources = resources.replace('-', '_')
@@ -909,6 +958,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
+ req.environ['neutron.context'] = neutron_ctx
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
@@ -1001,10 +1051,9 @@ class TestV2HTTPResponse(NeutronDbPluginV2TestCase):
self._create_network(self.fmt,
'some_net',
True,
- tenant_id=tenant_id,
- set_context=True)
- req = self.new_list_request('networks', params="fields=name")
- req.environ['neutron.context'] = context.Context('', tenant_id)
+ tenant_id=tenant_id)
+ req = self.new_list_request(
+ 'networks', params="fields=name", tenant_id=tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'name')
@@ -1020,10 +1069,9 @@ class TestV2HTTPResponse(NeutronDbPluginV2TestCase):
self._create_network(self.fmt,
'some_net',
True,
- tenant_id=tenant_id,
- set_context=True)
- req = self.new_list_request('networks', params="fields=tenant_id")
- req.environ['neutron.context'] = context.Context('', tenant_id)
+ tenant_id=tenant_id)
+ req = self.new_list_request(
+ 'networks', params="fields=tenant_id", tenant_id=tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'tenant_id')
@@ -1086,7 +1134,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_port_json(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network) as subnet:
with self.port(name='myname', subnet=subnet) as port:
for k, v in keys:
@@ -1108,7 +1156,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
device_id='fake_device',
device_owner='fake_owner',
fixed_ips=[],
- set_context=False)
+ is_admin=True)
def test_create_port_bad_tenant(self):
with self.network() as network:
@@ -1118,17 +1166,15 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
tenant_id='bad_tenant_id',
device_id='fake_device',
device_owner='fake_owner',
- fixed_ips=[],
- set_context=True)
+ fixed_ips=[])
def test_create_port_public_network(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='another_tenant',
- set_context=True)
+ tenant_id='another_tenant')
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
@@ -1147,11 +1193,10 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
webob.exc.HTTPClientError.code,
tenant_id='tenant_id',
fixed_ips=[],
- set_context=False,
**kwargs)
def test_create_port_public_network_with_ip(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
ip_net = netaddr.IPNetwork('10.0.0.0/24')
with self.subnet(network=network, cidr=str(ip_net)):
keys = [('admin_state_up', True),
@@ -1159,8 +1204,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='another_tenant',
- set_context=True)
+ tenant_id='another_tenant')
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
@@ -1170,7 +1214,7 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
self._delete('ports', port['port']['id'])
def test_create_port_anticipating_allocation(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
fixed_ips = [{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id'],
@@ -1181,14 +1225,13 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_port_public_network_with_invalid_ip_no_subnet_id(self,
expected_error='InvalidIpForNetwork'):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24'):
ips = [{'ip_address': '1.1.1.1'}]
res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPBadRequest.code,
- fixed_ips=ips,
- set_context=True)
+ fixed_ips=ips)
data = self.deserialize(self.fmt, res)
msg = str(lib_exc.InvalidIpForNetwork(ip_address='1.1.1.1'))
self.assertEqual(expected_error, data['NeutronError']['type'])
@@ -1196,15 +1239,14 @@ class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_port_public_network_with_invalid_ip_and_subnet_id(self,
expected_error='InvalidIpForSubnet'):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '1.1.1.1'}]
res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPBadRequest.code,
- fixed_ips=ips,
- set_context=True)
+ fixed_ips=ips)
data = self.deserialize(self.fmt, res)
msg = str(lib_exc.InvalidIpForSubnet(ip_address='1.1.1.1'))
self.assertEqual(expected_error, data['NeutronError']['type'])
@@ -1342,29 +1384,29 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
self._test_list_ports_filtered_by_fixed_ip(limit=500)
def test_list_ports_public_network(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network) as subnet:
with self.port(subnet, tenant_id='tenant_1') as port1,\
self.port(subnet, tenant_id='tenant_2') as port2:
# Admin request - must return both ports
- self._test_list_resources('port', [port1, port2])
+ self._test_list_resources(
+ 'port', [port1, port2], as_admin=True)
# Tenant_1 request - must return single port
- n_context = context.Context('', 'tenant_1')
self._test_list_resources('port', [port1],
- neutron_context=n_context)
+ tenant_id='tenant_1')
# Tenant_2 request - must return single port
- n_context = context.Context('', 'tenant_2')
self._test_list_resources('port', [port2],
- neutron_context=n_context)
+ tenant_id='tenant_2')
def test_list_ports_for_network_owner(self):
with self.network(tenant_id='tenant_1') as network:
- with self.subnet(network) as subnet:
- with self.port(subnet, tenant_id='tenant_1') as port1,\
- self.port(subnet, tenant_id='tenant_2') as port2:
+ with self.subnet(network, tenant_id='tenant_1') as subnet:
+ with self.port(subnet, project_id='tenant_1') as port1,\
+ self.port(subnet, project_id='tenant_2',
+ is_admin=True) as port2:
# network owner request, should return all ports
port_res = self._list_ports(
- 'json', set_context=True, tenant_id='tenant_1')
+ 'json', tenant_id='tenant_1')
port_list = self.deserialize('json', port_res)['ports']
port_ids = [p['id'] for p in port_list]
self.assertEqual(2, len(port_list))
@@ -1373,7 +1415,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
# another tenant request, only return ports belong to it
port_res = self._list_ports(
- 'json', set_context=True, tenant_id='tenant_2')
+ 'json', tenant_id='tenant_2')
port_list = self.deserialize('json', port_res)['ports']
port_ids = [p['id'] for p in port_list]
self.assertEqual(1, len(port_list))
@@ -1467,12 +1509,11 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
expected_code=webob.exc.HTTPNotFound.code)
def test_delete_port_public_network(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='another_tenant',
- set_context=True)
+ tenant_id='another_tenant')
port = self.deserialize(self.fmt, port_res)
self._delete('ports', port['port']['id'])
@@ -1482,15 +1523,15 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
def test_delete_port_by_network_owner(self):
with self.network(tenant_id='tenant_1') as network:
with self.subnet(network) as subnet:
- with self.port(subnet, tenant_id='tenant_2') as port:
+ with self.port(subnet, tenant_id='tenant_2',
+ is_admin=True) as port:
self._delete(
- 'ports', port['port']['id'],
- neutron_context=context.Context('', 'tenant_1'))
+ 'ports', port['port']['id'], tenant_id='tenant_1')
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_update_port_with_stale_subnet(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
port = self._make_port(self.fmt, network['network']['id'])
subnet = self._make_subnet(self.fmt, network,
'10.0.0.1', '10.0.0.0/24')
@@ -1528,7 +1569,8 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
data = {'port': {'mac_address': new_mac}}
if updated_fixed_ips:
data['port']['fixed_ips'] = updated_fixed_ips
- req = self.new_update_request('ports', data, port['id'])
+ req = self.new_update_request(
+ 'ports', data, port['id'], as_admin=True)
return req.get_response(self.api), new_mac
def _verify_ips_after_mac_change(self, orig_port, new_port):
@@ -1553,6 +1595,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
host_arg = host_arg or {}
arg_list = arg_list or []
with self.port(device_owner=device_owner, subnet=subnet,
+ is_admin=True,
arg_list=arg_list, **host_arg) as port:
self.assertIn('mac_address', port['port'])
res, new_mac = self.update_port_mac(
@@ -1634,7 +1677,8 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
new_mac = port2['port']['mac_address']
data = {'port': {'mac_address': new_mac}}
req = self.new_update_request('ports', data,
- port['port']['id'])
+ port['port']['id'],
+ as_admin=True)
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPConflict.code,
res.status_int)
@@ -1647,16 +1691,14 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
def test_update_port_not_admin(self):
res = self._create_network(self.fmt, 'net1', True,
- tenant_id='not_admin',
- set_context=True)
+ tenant_id='not_admin')
net1 = self.deserialize(self.fmt, res)
res = self._create_port(self.fmt, net1['network']['id'],
- tenant_id='not_admin', set_context=True)
+ tenant_id='not_admin')
port = self.deserialize(self.fmt, res)
data = {'port': {'admin_state_up': False}}
- neutron_context = context.Context('', 'not_admin')
port = self._update('ports', port['port']['id'], data,
- neutron_context=neutron_context)
+ request_tenant_id='not_admin')
self.assertFalse(port['port']['admin_state_up'])
def test_update_device_id_unchanged(self):
@@ -2746,7 +2788,7 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
name = 'public_net'
keys = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', self.net_create_status), ('shared', True)]
- with self.network(name=name, shared=True) as net:
+ with self.network(name=name, shared=True, as_admin=True) as net:
for k, v in keys:
self.assertEqual(net['network'][k], v)
@@ -2756,8 +2798,7 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
webob.exc.HTTPClientError) as ctx_manager:
with self.network(name=name,
shared=True,
- tenant_id="another_tenant",
- set_context=True):
+ tenant_id="another_tenant"):
pass
self.assertEqual(webob.exc.HTTPForbidden.code,
ctx_manager.exception.code)
@@ -2773,12 +2814,12 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
res['network']['name'])
def test_update_shared_network_noadmin_returns_403(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
data = {'network': {'name': 'a_brand_new_name'}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
- req.environ['neutron.context'] = context.Context('', 'somebody')
+ network['network']['id'],
+ tenant_id='other-tenant')
res = req.get_response(self.api)
self.assertEqual(403, res.status_int)
@@ -2787,7 +2828,8 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
@@ -2808,7 +2850,8 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
# must query db to see whether subnet's shared attribute
@@ -2819,39 +2862,38 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
self.assertTrue(subnet_db['shared'])
def test_update_network_set_not_shared_single_tenant(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id=network['network']['tenant_id'],
- set_context=True)
+ tenant_id=network['network']['tenant_id'])
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertFalse(res['network']['shared'])
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
- def test_update_network_set_not_shared_other_tenant_returns_409(self):
- with self.network(shared=True) as network:
+ def test_update_network_set_not_shared_other_tenant_returns_403(self):
+ with self.network(shared=True, as_admin=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='somebody_else',
- set_context=True)
+ tenant_id='somebody_else')
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
- self.assertEqual(webob.exc.HTTPConflict.code,
+ self.assertEqual(webob.exc.HTTPForbidden.code,
req.get_response(self.api).status_int)
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_other_tenant_access_via_rbac(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
ctx = context.get_admin_context()
with db_api.CONTEXT_WRITER.using(ctx):
network_obj.NetworkRBAC(
@@ -2867,33 +2909,32 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='somebody_else',
- set_context=True)
+ tenant_id='somebody_else')
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertFalse(res['network']['shared'])
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_multi_tenants_returns_409(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='somebody_else',
- set_context=True)
+ tenant_id='somebody_else')
res2 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id=network['network']['tenant_id'],
- set_context=True)
+ tenant_id=network['network']['tenant_id'])
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
self.assertEqual(webob.exc.HTTPConflict.code,
req.get_response(self.api).status_int)
port1 = self.deserialize(self.fmt, res1)
@@ -2902,22 +2943,21 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
self._delete('ports', port2['port']['id'])
def test_update_network_set_not_shared_multi_tenants2_returns_409(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
- tenant_id='somebody_else',
- set_context=True)
+ tenant_id='somebody_else')
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPCreated.code,
- tenant_id=network['network']['tenant_id'],
- set_context=True)
+ tenant_id=network['network']['tenant_id'])
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
self.assertEqual(webob.exc.HTTPConflict.code,
req.get_response(self.api).status_int)
@@ -2967,7 +3007,8 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
- res = self._create_bulk_from_list(self.fmt, 'network', networks)
+ res = self._create_bulk_from_list(self.fmt, 'network', networks,
+ as_admin=True)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_networks_bulk_tenants_and_quotas_fail(self):
@@ -2987,7 +3028,8 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
- res = self._create_bulk_from_list(self.fmt, 'network', networks)
+ res = self._create_bulk_from_list(self.fmt, 'network', networks,
+ as_admin=True)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_networks_bulk_emulated(self):
@@ -3136,9 +3178,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
- with self.network(name='net1', shared=True) as net1,\
+ with self.network(name='net1', shared=True, as_admin=True) as net1,\
self.network(name='net2', shared=False) as net2,\
- self.network(name='net3', shared=True) as net3:
+ self.network(name='net3', shared=True, as_admin=True) as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2,
@@ -3215,14 +3257,13 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
tenant_id='tenant1') as net1,\
self.network(shared=True,
name='net2',
+ as_admin=True,
tenant_id='another_tenant') as net2,\
self.network(shared=False,
name='net3',
tenant_id='another_tenant'):
- ctx = context.Context(user_id='non_admin',
- tenant_id='tenant1',
- is_admin=False)
- self._test_list_resources('network', (net1, net2), ctx)
+ self._test_list_resources('network', (net1, net2),
+ tenant_id='tenant1')
def test_show_network(self):
with self.network(name='net1') as net:
@@ -3760,8 +3801,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
ip_version=constants.IP_VERSION_4,
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
- device_owner='fake_owner',
- set_context=True)
+ device_owner='fake_owner')
def test_create_subnet_as_admin(self):
with self.network() as network:
@@ -3773,7 +3813,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
device_owner='fake_owner',
- set_context=False)
+ as_admin=True)
def test_create_subnet_nonzero_cidr(self):
# Pass None as gateway_ip to prevent ip auto allocation for gw
@@ -4464,7 +4504,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
'tenant_id': 'tenant_id',
'device_id': 'fake_device',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW}
- res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+ res = self._create_port(self.fmt, net_id=net_id,
+ is_admin=True, **kwargs)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
def test_create_subnet_ipv6_first_ip_owned_by_non_router(self):
@@ -4480,7 +4521,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
'tenant_id': 'tenant_id',
'device_id': 'fake_device',
'device_owner': 'fake_owner'}
- res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+ res = self._create_port(self.fmt, net_id=net_id,
+ is_admin=True, **kwargs)
self.assertEqual(webob.exc.HTTPClientError.code,
res.status_int)
@@ -4804,7 +4846,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
ra_addr_mode=constants.DHCPV6_STATELESS)
def test_update_subnet_shared_returns_400(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'shared': True}}
req = self.new_update_request('subnets', data,
@@ -5294,7 +5336,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
with self.subnet(network=network,
gateway_ip='10.0.0.1',
cidr='10.0.0.0/24',
- tenant_id=project_id),\
+ tenant_id=project_id,
+ as_admin=True),\
self.subnet(network=network,
gateway_ip='10.0.1.1',
cidr='10.0.1.0/24'),\
@@ -5351,7 +5394,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
self._test_list_resources('subnet', subnets)
def test_list_subnets_shared(self):
- with self.network(shared=True) as network:
+ with self.network(shared=True, as_admin=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as priv_subnet:
# normal user should see only 1 subnet
@@ -6117,8 +6160,7 @@ class TestSubnetPoolsV2(NeutronDbPluginV2TestCase):
min_prefixlen='24',
shared=True)
admin_res = self._list('subnetpools')
- mortal_res = self._list('subnetpools',
- neutron_context=context.Context('', 'not-the-owner'))
+ mortal_res = self._list('subnetpools', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['subnetpools']))
self.assertEqual(1, len(mortal_res['subnetpools']))
@@ -6130,8 +6172,7 @@ class TestSubnetPoolsV2(NeutronDbPluginV2TestCase):
min_prefixlen='24',
shared=False)
admin_res = self._list('subnetpools')
- mortal_res = self._list('subnetpools',
- neutron_context=context.Context('', 'not-the-owner'))
+ mortal_res = self._list('subnetpools', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['subnetpools']))
self.assertEqual(0, len(mortal_res['subnetpools']))
@@ -7066,7 +7107,8 @@ class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase,
super(NeutronDbPluginV2AsMixinTestCase, self).setUp()
self.plugin = importutils.import_object(DB_PLUGIN_KLASS)
self.context = context.get_admin_context()
- self.net_data = {'network': {'id': 'fake-id',
+ self.net_id = uuidutils.generate_uuid()
+ self.net_data = {'network': {'id': self.net_id,
'name': 'net1',
'admin_state_up': True,
'tenant_id': TEST_TENANT_ID,
@@ -7075,7 +7117,7 @@ class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase,
def test_create_network_with_default_status(self):
net = self.plugin.create_network(self.context, self.net_data)
default_net_create_status = 'ACTIVE'
- expected = [('id', 'fake-id'), ('name', 'net1'),
+ expected = [('id', self.net_id), ('name', 'net1'),
('admin_state_up', True), ('tenant_id', TEST_TENANT_ID),
('shared', False), ('status', default_net_create_status)]
for k, v in expected:
@@ -7113,6 +7155,81 @@ class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase,
new_subnetpool_id,
None)
+ def test_create_subnet_invalid_network_mtu_ipv4_returns_409(self):
+ self.net_data['network']['mtu'] = common_constants.IPV4_MIN_MTU - 1
+ net = self.plugin.create_network(self.context, self.net_data)
+ self._create_subnet(self.fmt,
+ net['id'],
+ '10.0.0.0/24',
+ webob.exc.HTTPConflict.code)
+
+ def test_create_subnet_invalid_network_mtu_ipv6_returns_409(self):
+ self.net_data['network']['mtu'] = constants.IPV6_MIN_MTU - 1
+ net = self.plugin.create_network(self.context, self.net_data)
+ self._create_subnet(self.fmt,
+ net['id'],
+ '2001:db8:0:1::/64',
+ webob.exc.HTTPConflict.code,
+ ip_version=constants.IP_VERSION_6)
+
+ def test_update_network_invalid_mtu(self):
+ self.net_data['network']['mtu'] = 1500
+ net = self.plugin.create_network(self.context, self.net_data)
+
+ # This should succeed with no subnets
+ self.net_data['network']['mtu'] = common_constants.IPV4_MIN_MTU - 1
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+
+ # reset mtu
+ self.net_data['network']['mtu'] = 1500
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+
+ self._create_subnet(self.fmt,
+ net['id'],
+ '10.0.0.0/24',
+ ip_version=constants.IP_VERSION_4)
+
+ # These should succeed with just an IPv4 subnet present
+ self.net_data['network']['mtu'] = constants.IPV6_MIN_MTU
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+ self.net_data['network']['mtu'] = constants.IPV6_MIN_MTU - 1
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+ self.net_data['network']['mtu'] = common_constants.IPV4_MIN_MTU
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+
+ # This should fail with any subnets present
+ self.net_data['network']['mtu'] = common_constants.IPV4_MIN_MTU - 1
+ with testlib_api.ExpectedException(mtu_exc.NetworkMTUSubnetConflict):
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+
+ def test_update_network_invalid_mtu_ipv4_ipv6(self):
+ self.net_data['network']['mtu'] = 1500
+ net = self.plugin.create_network(self.context, self.net_data)
+
+ self._create_subnet(self.fmt,
+ net['id'],
+ '10.0.0.0/24',
+ ip_version=constants.IP_VERSION_4)
+ self._create_subnet(self.fmt,
+ net['id'],
+ '2001:db8:0:1::/64',
+ ip_version=constants.IP_VERSION_6)
+
+ # This should succeed with both subnets present
+ self.net_data['network']['mtu'] = constants.IPV6_MIN_MTU
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+
+ # These should all fail with both subnets present
+ with testlib_api.ExpectedException(mtu_exc.NetworkMTUSubnetConflict):
+ self.net_data['network']['mtu'] = constants.IPV6_MIN_MTU - 1
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+ with testlib_api.ExpectedException(mtu_exc.NetworkMTUSubnetConflict):
+ self.net_data['network']['mtu'] = common_constants.IPV4_MIN_MTU
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+ with testlib_api.ExpectedException(mtu_exc.NetworkMTUSubnetConflict):
+ self.net_data['network']['mtu'] = common_constants.IPV4_MIN_MTU - 1
+ self.plugin.update_network(self.context, net['id'], self.net_data)
+
class TestNetworks(testlib_api.SqlTestCase):
def setUp(self):
@@ -7197,10 +7314,10 @@ class DbOperationBoundMixin(object):
def get_api_kwargs(self):
context_ = self._get_context()
- return {'set_context': True, 'tenant_id': context_.project_id}
+ return {'tenant_id': context_.project_id}
def _list_and_record_queries(self, resource, query_params=None):
- kwargs = {'neutron_context': self._get_context()}
+ kwargs = {}
if query_params:
kwargs['query_params'] = query_params
# list once before tracking to flush out any quota recalculations.
diff --git a/neutron/tests/unit/db/test_dvr_mac_db.py b/neutron/tests/unit/db/test_dvr_mac_db.py
index 80d650a7d8..6f87672712 100644
--- a/neutron/tests/unit/db/test_dvr_mac_db.py
+++ b/neutron/tests/unit/db/test_dvr_mac_db.py
@@ -188,22 +188,28 @@ class DvrDbMixinTestCase(test_plugin.Ml2PluginV2TestCase):
arg_list = (portbindings.HOST_ID,)
with self.subnet() as subnet,\
self.port(subnet=subnet,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX,
arg_list=arg_list, **host_arg) as compute_port,\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_DHCP,
+ is_admin=True,
arg_list=arg_list, **host_arg) as dhcp_port,\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_LOADBALANCER,
+ is_admin=True,
arg_list=arg_list, **host_arg) as lb_port,\
self.port(device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX,
+ is_admin=True,
arg_list=arg_list, **host_arg),\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX,
+ is_admin=True,
arg_list=arg_list,
**{portbindings.HOST_ID: 'other'}),\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX,
+ is_admin=True,
arg_list=arg_list, **host_arg):
expected_ids = [port['port']['id'] for port in
[compute_port, dhcp_port, lb_port]]
diff --git a/neutron/tests/unit/db/test_ipam_backend_mixin.py b/neutron/tests/unit/db/test_ipam_backend_mixin.py
index e81a908ec2..fa2872a317 100644
--- a/neutron/tests/unit/db/test_ipam_backend_mixin.py
+++ b/neutron/tests/unit/db/test_ipam_backend_mixin.py
@@ -373,7 +373,8 @@ class TestPortUpdateIpam(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
arg_list=(portbindings.HOST_ID,),
- **{portbindings.HOST_ID: 'fakehost'})
+ **{portbindings.HOST_ID: 'fakehost'},
+ is_admin=True)
port = self.deserialize(self.fmt, response)
# Create the subnet and try to update the port to get an IP
@@ -381,7 +382,8 @@ class TestPortUpdateIpam(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'port': {
'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
res = self.deserialize(self.fmt, response)
diff --git a/neutron/tests/unit/db/test_ipam_pluggable_backend.py b/neutron/tests/unit/db/test_ipam_pluggable_backend.py
index 69fc9fc5c6..958c0abbc6 100644
--- a/neutron/tests/unit/db/test_ipam_pluggable_backend.py
+++ b/neutron/tests/unit/db/test_ipam_pluggable_backend.py
@@ -26,6 +26,7 @@ from oslo_utils import netutils
from oslo_utils import uuidutils
import webob.exc
+from neutron.conf import common as base_config
from neutron.db import ipam_backend_mixin
from neutron.db import ipam_pluggable_backend
from neutron.ipam import exceptions as ipam_exc
@@ -39,6 +40,7 @@ from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_base
class UseIpamMixin(object):
def setUp(self):
+ cfg.CONF.register_opts(base_config.core_opts)
cfg.CONF.set_override("ipam_driver", 'internal')
super(UseIpamMixin, self).setUp()
@@ -69,7 +71,6 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
plugin = 'neutron.tests.unit.db.test_ipam_backend_mixin.TestPlugin'
super(TestDbBasePluginIpam, self).setUp(plugin=plugin)
cfg.CONF.set_override("ipam_driver", 'internal')
- self.tenant_id = uuidutils.generate_uuid()
self.subnet_id = uuidutils.generate_uuid()
self.admin_context = ncontext.get_admin_context()
@@ -87,7 +88,7 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
},
'subnet_request': ipam_req.SpecificSubnetRequest(
- self.tenant_id,
+ self._tenant_id,
self.subnet_id,
'10.0.0.0/24',
'10.0.0.1',
diff --git a/neutron/tests/unit/db/test_l3_db.py b/neutron/tests/unit/db/test_l3_db.py
index 9b65b60f09..43d4ec4a49 100644
--- a/neutron/tests/unit/db/test_l3_db.py
+++ b/neutron/tests/unit/db/test_l3_db.py
@@ -928,7 +928,8 @@ class L3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
with db_api.CONTEXT_WRITER.using(self.ctx):
res = self._create_network(
self.fmt, name, True,
- arg_list=(extnet_apidef.EXTERNAL,), **kwargs)
+ arg_list=(extnet_apidef.EXTERNAL,),
+ as_admin=True, **kwargs)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
diff --git a/neutron/tests/unit/db/test_ovn_revision_numbers_db.py b/neutron/tests/unit/db/test_ovn_revision_numbers_db.py
index 62dfc9caa1..f375d3602d 100644
--- a/neutron/tests/unit/db/test_ovn_revision_numbers_db.py
+++ b/neutron/tests/unit/db/test_ovn_revision_numbers_db.py
@@ -237,7 +237,7 @@ class TestRevisionNumberMaintenance(test_securitygroup.SecurityGroupsTestCase,
'10.0.0.0/24')['subnet']
self._set_net_external(self.net['id'])
info = {'network_id': self.net['id']}
- router = self._make_router(self.fmt, None,
+ router = self._make_router(self.fmt, self._tenant_id,
external_gateway_info=info)['router']
fip = self._make_floatingip(self.fmt, self.net['id'])['floatingip']
port = self._make_port(self.fmt, self.net['id'])['port']
diff --git a/neutron/tests/unit/db/test_securitygroups_db.py b/neutron/tests/unit/db/test_securitygroups_db.py
index 593272027b..9036a11e9d 100644
--- a/neutron/tests/unit/db/test_securitygroups_db.py
+++ b/neutron/tests/unit/db/test_securitygroups_db.py
@@ -404,6 +404,38 @@ class SecurityGroupDbMixinTestCase(testlib_api.SqlTestCase):
self.assertEqual([mock.ANY, mock.ANY],
payload.metadata.get('security_group_rule_ids'))
+ def test_security_group_rule_after_delete_event_for_remot_group(self):
+ sg1_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
+ sg2_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP)
+
+ fake_rule = copy.deepcopy(FAKE_SECGROUP_RULE)
+ fake_rule['security_group_rule']['security_group_id'] = sg1_dict['id']
+ fake_rule['security_group_rule']['remote_group_id'] = sg2_dict['id']
+ fake_rule['security_group_rule']['remote_ip_prefix'] = None
+ remote_rule = self.mixin.create_security_group_rule(
+ self.ctx, fake_rule)
+
+ with mock.patch.object(registry, "publish") as mock_publish:
+ self.mixin.delete_security_group(self.ctx, sg2_dict['id'])
+ mock_publish.assert_has_calls(
+ [mock.call('security_group', 'before_delete',
+ mock.ANY, payload=mock.ANY),
+ mock.call('security_group', 'precommit_delete',
+ mock.ANY,
+ payload=mock.ANY),
+ mock.call('security_group', 'after_delete',
+ mock.ANY,
+ payload=mock.ANY),
+ mock.call('security_group_rule', 'after_delete',
+ mock.ANY,
+ payload=mock.ANY)])
+ rule_payload = mock_publish.mock_calls[3][2]['payload']
+ self.assertEqual(remote_rule['id'], rule_payload.resource_id)
+ self.assertEqual(sg1_dict['id'],
+ rule_payload.metadata['security_group_id'])
+ self.assertEqual(sg2_dict['id'],
+ rule_payload.metadata['remote_group_id'])
+
def test_security_group_rule_precommit_create_event_fail(self):
registry.subscribe(fake_callback, resources.SECURITY_GROUP_RULE,
events.PRECOMMIT_CREATE)
diff --git a/neutron/tests/unit/extensions/test_address_group.py b/neutron/tests/unit/extensions/test_address_group.py
index ff37ba684f..e3339b0320 100644
--- a/neutron/tests/unit/extensions/test_address_group.py
+++ b/neutron/tests/unit/extensions/test_address_group.py
@@ -84,9 +84,8 @@ class AddressGroupTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_address_group_actions(self, addr_group_id, data, action,
expected=None, tenant_id=None):
act_req = self.new_action_request(
- 'address-groups', data, addr_group_id, action)
- act_req.environ['neutron.context'] = context.Context(
- '', tenant_id or self._tenant_id)
+ 'address-groups', data, addr_group_id, action,
+ tenant_id=tenant_id or self._tenant_id)
act_res = act_req.get_response(self.ext_api)
if expected:
diff --git a/neutron/tests/unit/extensions/test_address_scope.py b/neutron/tests/unit/extensions/test_address_scope.py
index 6d5eb30031..7e33980686 100644
--- a/neutron/tests/unit/extensions/test_address_scope.py
+++ b/neutron/tests/unit/extensions/test_address_scope.py
@@ -49,39 +49,40 @@ class AddressScopeTestExtensionManager(object):
class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _create_address_scope(self, fmt, ip_version=constants.IP_VERSION_4,
- expected_res_status=None, admin=False, **kwargs):
+ expected_res_status=None, admin=False,
+ tenant_id=None, **kwargs):
address_scope = {'address_scope': {}}
address_scope['address_scope']['ip_version'] = ip_version
+ tenant_id = tenant_id or self._tenant_id
for k, v in kwargs.items():
address_scope['address_scope'][k] = str(v)
address_scope_req = self.new_create_request('address-scopes',
- address_scope, fmt)
-
- if not admin:
- neutron_context = context.Context('', kwargs.get('tenant_id',
- self._tenant_id))
- address_scope_req.environ['neutron.context'] = neutron_context
+ address_scope, fmt,
+ tenant_id=tenant_id,
+ as_admin=admin)
address_scope_res = address_scope_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, address_scope_res.status_int)
return address_scope_res
- def _make_address_scope(self, fmt, ip_version, admin=False, **kwargs):
+ def _make_address_scope(self, fmt, ip_version, admin=False, tenant_id=None,
+ **kwargs):
res = self._create_address_scope(fmt, ip_version,
- admin=admin, **kwargs)
+ admin=admin, tenant_id=tenant_id,
+ **kwargs)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def address_scope(self, ip_version=constants.IP_VERSION_4,
- admin=False, **kwargs):
- if 'project_id' in kwargs:
- kwargs['tenant_id'] = kwargs['project_id']
+ admin=False, tenant_id=None, **kwargs):
+ tenant_id = tenant_id if tenant_id else kwargs.pop(
+ 'tenant_id', None)
addr_scope = self._make_address_scope(self.fmt, ip_version,
- admin, **kwargs)
+ admin, tenant_id, **kwargs)
yield addr_scope
def _test_create_address_scope(self, ip_version=constants.IP_VERSION_4,
@@ -99,9 +100,9 @@ class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_update_address_scope(self, addr_scope_id, data, admin=False,
expected=None, tenant_id=None):
update_req = self.new_update_request(
- 'address-scopes', data, addr_scope_id)
- update_req.environ['neutron.context'] = context.Context(
- '', tenant_id or self._tenant_id, is_admin=admin)
+ 'address-scopes', data, addr_scope_id,
+ tenant_id=tenant_id or self._tenant_id,
+ as_admin=admin)
update_res = update_req.get_response(self.ext_api)
if expected:
@@ -244,8 +245,7 @@ class TestAddressScope(AddressScopeTestCase):
admin=True)
admin_res = self._list('address-scopes')
mortal_res = self._list(
- 'address-scopes',
- neutron_context=context.Context('', 'not-the-owner'))
+ 'address-scopes', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['address_scopes']))
self.assertEqual(1, len(mortal_res['address_scopes']))
@@ -254,8 +254,7 @@ class TestAddressScope(AddressScopeTestCase):
name='foo-address-scope')
admin_res = self._list('address-scopes')
mortal_res = self._list(
- 'address-scopes',
- neutron_context=context.Context('', 'not-the-owner'))
+ 'address-scopes', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['address_scopes']))
self.assertEqual(0, len(mortal_res['address_scopes']))
diff --git a/neutron/tests/unit/extensions/test_agent.py b/neutron/tests/unit/extensions/test_agent.py
index 2a084bf4dd..0cb1cb7037 100644
--- a/neutron/tests/unit/extensions/test_agent.py
+++ b/neutron/tests/unit/extensions/test_agent.py
@@ -59,11 +59,10 @@ class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2,
class AgentDBTestMixIn(object):
def _list_agents(self, expected_res_status=None,
- neutron_context=None,
query_string=None):
agent_res = self._list('agents',
- neutron_context=neutron_context,
- query_params=query_string)
+ query_params=query_string,
+ as_admin=True)
if expected_res_status:
self.assertEqual(expected_res_status, agent_res.status_int)
return agent_res
@@ -107,14 +106,12 @@ class AgentDBTestCase(AgentDBTestMixIn,
def test_create_agent(self):
data = {'agent': {}}
_req = self.new_create_request('agents', data, self.fmt)
- _req.environ['neutron.context'] = context.Context(
- '', 'tenant_id')
res = _req.get_response(self.ext_api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_list_agent(self):
agents = self._register_agent_states()
- res = self._list('agents')
+ res = self._list('agents', as_admin=True)
self.assertEqual(len(agents), len(res['agents']))
def test_show_agent(self):
@@ -122,7 +119,7 @@ class AgentDBTestCase(AgentDBTestMixIn,
agents = self._list_agents(
query_string='binary=' + constants.AGENT_PROCESS_L3)
self.assertEqual(2, len(agents['agents']))
- agent = self._show('agents', agents['agents'][0]['id'])
+ agent = self._show('agents', agents['agents'][0]['id'], as_admin=True)
self.assertEqual(constants.AGENT_PROCESS_L3, agent['agent']['binary'])
def test_update_agent(self):
@@ -132,13 +129,13 @@ class AgentDBTestCase(AgentDBTestMixIn,
'&host=' + L3_HOSTB))
self.assertEqual(1, len(agents['agents']))
com_id = agents['agents'][0]['id']
- agent = self._show('agents', com_id)
+ agent = self._show('agents', com_id, as_admin=True)
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = False
new_agent['agent']['description'] = 'description'
- self._update('agents', com_id, new_agent)
- agent = self._show('agents', com_id)
+ self._update('agents', com_id, new_agent, as_admin=True)
+ agent = self._show('agents', com_id, as_admin=True)
self.assertFalse(agent['agent']['admin_state_up'])
self.assertEqual('description', agent['agent']['description'])
diff --git a/neutron/tests/unit/extensions/test_availability_zone.py b/neutron/tests/unit/extensions/test_availability_zone.py
index 78534c541f..e00054a026 100644
--- a/neutron/tests/unit/extensions/test_availability_zone.py
+++ b/neutron/tests/unit/extensions/test_availability_zone.py
@@ -71,12 +71,11 @@ class TestAZAgentCase(AZTestCommon):
{'name': 'nova2', 'resource': 'network', 'state': 'available'},
{'name': 'nova2', 'resource': 'router', 'state': 'available'},
{'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}]
- res = self._list('availability_zones')
+ res = self._list('availability_zones', as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected, azs)
# not admin case
- ctx = context.Context('', 'noadmin')
- res = self._list('availability_zones', neutron_context=ctx)
+ res = self._list('availability_zones', as_admin=False)
azs = res['availability_zones']
self.assertCountEqual(expected, azs)
@@ -89,33 +88,37 @@ class TestAZAgentCase(AZTestCommon):
{'name': 'nova2', 'resource': 'network', 'state': 'available'},
{'name': 'nova2', 'resource': 'router', 'state': 'available'},
{'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}]
- res = self._list('availability_zones')
+ res = self._list('availability_zones', as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected, azs)
# list with filter of 'name'
res = self._list('availability_zones',
- query_params="name=nova1")
+ query_params="name=nova1",
+ as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[:1], azs)
# list with filter of 'resource'
res = self._list('availability_zones',
- query_params="resource=router")
+ query_params="resource=router",
+ as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[-2:], azs)
# list with filter of 'state' as 'available'
res = self._list('availability_zones',
- query_params="state=available")
+ query_params="state=available",
+ as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[:3], azs)
# list with filter of 'state' as 'unavailable'
res = self._list('availability_zones',
- query_params="state=unavailable")
+ query_params="state=unavailable",
+ as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[-1:], azs)
def test_list_agent_with_az(self):
helpers.register_dhcp_agent(host='host1', az='nova1')
- res = self._list('agents')
+ res = self._list('agents', as_admin=True)
self.assertEqual('nova1',
res['agents'][0]['availability_zone'])
diff --git a/neutron/tests/unit/extensions/test_data_plane_status.py b/neutron/tests/unit/extensions/test_data_plane_status.py
index a6c99f1b77..f48752c7be 100644
--- a/neutron/tests/unit/extensions/test_data_plane_status.py
+++ b/neutron/tests/unit/extensions/test_data_plane_status.py
@@ -80,7 +80,8 @@ class DataPlaneStatusExtensionTestCase(
data = {'port': {'data_plane_status': constants.ACTIVE}}
req = self.new_update_request(port_def.COLLECTION_NAME,
data,
- port['port']['id'])
+ port['port']['id'],
+ as_admin=True)
res = req.get_response(self.api)
p = self.deserialize(self.fmt, res)['port']
self.assertEqual(200, res.status_code)
@@ -106,9 +107,11 @@ class DataPlaneStatusExtensionTestCase(
with self.port(name='port1') as port:
res = self._update(port_def.COLLECTION_NAME, port['port']['id'],
{'port': {dps_lib.DATA_PLANE_STATUS:
- constants.ACTIVE}})
+ constants.ACTIVE}},
+ as_admin=True)
res = self._update(port_def.COLLECTION_NAME, port['port']['id'],
- {'port': {'name': 'port2'}})
+ {'port': {'name': 'port2'}},
+ as_admin=True)
self.assertEqual(res['port']['name'], 'port2')
self.assertEqual(res['port'][dps_lib.DATA_PLANE_STATUS],
constants.ACTIVE)
@@ -125,7 +128,8 @@ class DataPlaneStatusExtensionTestCase(
with self.port(name='port1') as port:
self._update(port_def.COLLECTION_NAME, port['port']['id'],
{'port': {dps_lib.DATA_PLANE_STATUS:
- constants.ACTIVE}})
+ constants.ACTIVE}},
+ as_admin=True)
notify = set(n['event_type'] for n in fake_notifier.NOTIFICATIONS)
duplicated_notify = expect_notify & notify
self.assertEqual(expect_notify, duplicated_notify)
diff --git a/neutron/tests/unit/extensions/test_default_subnetpools.py b/neutron/tests/unit/extensions/test_default_subnetpools.py
index b089cdc4ad..c0fb2e1bec 100644
--- a/neutron/tests/unit/extensions/test_default_subnetpools.py
+++ b/neutron/tests/unit/extensions/test_default_subnetpools.py
@@ -71,9 +71,13 @@ class DefaultSubnetpoolsExtensionTestCase(
return self.deserialize(self.fmt, res)['subnet']
- def _update_subnetpool(self, subnetpool_id, **data):
+ def _update_subnetpool(self, subnetpool_id, tenant_id=None,
+ as_admin=False, **data):
+ if 'shared' in data or 'is_default' in data:
+ as_admin = True
update_req = self.new_update_request(
- 'subnetpools', {'subnetpool': data}, subnetpool_id)
+ 'subnetpools', {'subnetpool': data}, subnetpool_id,
+ tenant_id=tenant_id, as_admin=as_admin)
res = update_req.get_response(self.api)
return self.deserialize(self.fmt, res)['subnetpool']
diff --git a/neutron/tests/unit/extensions/test_dns.py b/neutron/tests/unit/extensions/test_dns.py
index 34d8f76c98..aca700c94e 100644
--- a/neutron/tests/unit/extensions/test_dns.py
+++ b/neutron/tests/unit/extensions/test_dns.py
@@ -109,10 +109,8 @@ class DnsExtensionTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertEqual(expected_res_status, port_res.status_int)
return port_res
- def _test_list_resources(self, resource, items, neutron_context=None,
- query_params=None):
+ def _test_list_resources(self, resource, items, query_params=None):
res = self._list('%ss' % resource,
- neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertCountEqual([i['id'] for i in res['%ss' % resource]],
diff --git a/neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py b/neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py
index 7b2561b53b..f11d8c03bb 100644
--- a/neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py
+++ b/neutron/tests/unit/extensions/test_expose_port_forwarding_in_fip.py
@@ -112,7 +112,8 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as extnet, self.network() as innet:
+ with self.network(as_admin=True, **kwargs) as extnet, \
+ self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'), \
self.subnet(network=innet, cidr='10.0.0.0/24') as insub, \
self.router() as router:
@@ -148,7 +149,8 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as extnet, self.network() as innet:
+ with self.network(as_admin=True, **kwargs) as extnet,\
+ self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'),\
self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\
self.router() as router:
@@ -241,7 +243,8 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as extnet, self.network() as innet:
+ with self.network(as_admin=True, **kwargs) as extnet,\
+ self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'),\
self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\
self.subnet(network=innet, cidr='10.0.8.0/24') as insub2,\
@@ -317,10 +320,11 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as extnet, self.network() as innet:
+ with self.network(as_admin=True, **kwargs) as extnet,\
+ self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'),\
self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\
- self.router(distributed=True) as router:
+ self.router(distributed=True, as_admin=True) as router:
fip = self._make_floatingip(self.fmt, extnet['network']['id'])
# check the floatingip response contains port_forwarding field
self.assertIn(apidef.COLLECTION_NAME, fip['floatingip'])
diff --git a/neutron/tests/unit/extensions/test_external_net.py b/neutron/tests/unit/extensions/test_external_net.py
index edc76a2dbe..6e22ab658a 100644
--- a/neutron/tests/unit/extensions/test_external_net.py
+++ b/neutron/tests/unit/extensions/test_external_net.py
@@ -65,7 +65,8 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _set_net_external(self, net_id):
self._update('networks', net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
def test_list_nets_external(self):
with self.network() as n1:
@@ -111,13 +112,14 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'network': {'router:external': True}}
req = self.new_update_request('networks',
data,
- network['network']['id'])
- req.environ['neutron.context'] = context.Context('', 'noadmin')
+ network['network']['id'],
+ tenant_id='noadmin')
res = req.get_response(self.api)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
def test_update_network_external_net_with_ports_set_not_shared(self):
- with self.network(router__external=True, shared=True) as ext_net,\
+ with self.network(router__external=True, shared=True,
+ as_admin=True) as ext_net,\
self.subnet(network=ext_net) as ext_subnet, \
self.port(subnet=ext_subnet,
tenant_id='',
@@ -125,7 +127,8 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
- ext_net['network']['id'])
+ ext_net['network']['id'],
+ as_admin=True)
res = req.get_response(self.api)
self.assertEqual(exc.HTTPOk.code, res.status_int)
ctx = context.Context(None, None, is_admin=True)
@@ -158,18 +161,18 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt2))
def test_create_port_external_network_non_admin_fails(self):
- with self.network(router__external=True) as ext_net:
+ with self.network(as_admin=True, router__external=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with testtools.ExpectedException(
exc.HTTPClientError) as ctx_manager:
with self.port(subnet=ext_subnet,
- set_context='True',
+ is_admin=False,
tenant_id='noadmin'):
pass
self.assertEqual(403, ctx_manager.exception.code)
def test_create_port_external_network_admin_succeeds(self):
- with self.network(router__external=True) as ext_net:
+ with self.network(router__external=True, as_admin=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with self.port(subnet=ext_subnet) as port:
self.assertEqual(port['port']['network_id'],
@@ -178,13 +181,13 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def test_create_external_network_non_admin_fails(self):
with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager:
with self.network(router__external=True,
- set_context='True',
+ as_admin=False,
tenant_id='noadmin'):
pass
self.assertEqual(403, ctx_manager.exception.code)
def test_create_external_network_admin_succeeds(self):
- with self.network(router__external=True) as ext_net:
+ with self.network(router__external=True, as_admin=True) as ext_net:
self.assertTrue(ext_net['network'][extnet_apidef.EXTERNAL])
def test_delete_network_check_disassociated_floatingips(self):
diff --git a/neutron/tests/unit/extensions/test_extraroute.py b/neutron/tests/unit/extensions/test_extraroute.py
index 4e6b948dbd..48866e2f42 100644
--- a/neutron/tests/unit/extensions/test_extraroute.py
+++ b/neutron/tests/unit/extensions/test_extraroute.py
@@ -17,7 +17,6 @@ from neutron_lib.api.definitions import external_net as enet_apidef
from neutron_lib.api.definitions import extraroute as xroute_apidef
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib import constants
-from neutron_lib import context
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -62,14 +61,15 @@ class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin,
class ExtraRouteDBTestCaseBase(object):
def _routes_update_prepare(
self, router_id, subnet_id,
- port_id, routes, skip_add=False, tenant_id=None):
+ port_id, routes, skip_add=False, tenant_id=None, as_admin=False):
if not skip_add:
self._router_interface_action(
- 'add', router_id, subnet_id, port_id, tenant_id=None)
- ctxt = context.Context('', tenant_id) if tenant_id else None
+ 'add', router_id, subnet_id, port_id, tenant_id=tenant_id,
+ as_admin=as_admin)
+ tenant_id = tenant_id or self._tenant_id
self._update('routers', router_id, {'router': {'routes': routes}},
- neutron_context=ctxt)
- return self._show('routers', router_id)
+ request_tenant_id=tenant_id, as_admin=as_admin)
+ return self._show('routers', router_id, tenant_id=tenant_id)
def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes):
self._update('routers', router_id, {'router': {'routes': routes}})
@@ -91,7 +91,8 @@ class ExtraRouteDBTestCaseBase(object):
def test_route_update_with_external_route(self):
my_tenant = 'tenant1'
with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as ext_subnet,\
- self.port(subnet=ext_subnet) as nexthop_port:
+ self.port(subnet=ext_subnet,
+ tenant_id='notme') as nexthop_port:
nexthop_ip = nexthop_port['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': nexthop_ip}]
@@ -107,14 +108,14 @@ class ExtraRouteDBTestCaseBase(object):
def test_route_update_with_route_via_another_tenant_subnet(self):
my_tenant = 'tenant1'
with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as subnet,\
- self.port(subnet=subnet) as nexthop_port:
+ self.port(subnet=subnet, tenant_id='notme') as nexthop_port:
nexthop_ip = nexthop_port['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': nexthop_ip}]
with self.router(tenant_id=my_tenant) as r:
body = self._routes_update_prepare(
r['router']['id'], subnet['subnet']['id'], None, routes,
- tenant_id=my_tenant)
+ tenant_id=my_tenant, as_admin=True)
self.assertEqual(routes, body['router']['routes'])
def test_route_clear_routes_with_None(self):
diff --git a/neutron/tests/unit/extensions/test_flavors.py b/neutron/tests/unit/extensions/test_flavors.py
index cf8dadc707..5589a88356 100644
--- a/neutron/tests/unit/extensions/test_flavors.py
+++ b/neutron/tests/unit/extensions/test_flavors.py
@@ -198,7 +198,9 @@ class FlavorExtensionTestCase(extension.ExtensionTestCase):
'service_profiles': ['profile-1']}}
instance = self.plugin.return_value
instance.get_flavor.return_value = expected['flavor']
- res = self.api.get(_get_path('flavors', id=flavor_id, fmt=self.fmt))
+ res = self.api.get(
+ _get_path('flavors', id=flavor_id, fmt=self.fmt),
+ extra_environ=test_base._get_neutron_env(as_admin=True))
instance.get_flavor.assert_called_with(mock.ANY,
flavor_id,
fields=mock.ANY)
@@ -218,7 +220,9 @@ class FlavorExtensionTestCase(extension.ExtensionTestCase):
'service_profiles': ['profile-2', 'profile-1']}]}
instance = self.plugin.return_value
instance.get_flavors.return_value = data['flavors']
- res = self.api.get(_get_path('flavors', fmt=self.fmt))
+ res = self.api.get(
+ _get_path('flavors', fmt=self.fmt),
+ extra_environ=test_base._get_neutron_env(as_admin=True))
instance.get_flavors.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
diff --git a/neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py b/neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py
index b4658a180b..b90b0c73d7 100644
--- a/neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py
+++ b/neutron/tests/unit/extensions/test_floating_ip_port_forwarding.py
@@ -14,7 +14,6 @@
from unittest import mock
-from neutron_lib import context
from oslo_utils import uuidutils
from webob import exc
@@ -50,8 +49,9 @@ class FloatingIPPorForwardingTestCase(test_l3.L3BaseForIntTests,
tenant_id=None,
description=None,
external_port_range=None,
- internal_port_range=None):
- tenant_id = tenant_id or _uuid()
+ internal_port_range=None,
+ as_admin=False):
+ tenant_id = tenant_id or self._tenant_id
data = {'port_forwarding': {
"protocol": protocol,
"internal_ip_address": internal_ip_address,
@@ -69,28 +69,29 @@ class FloatingIPPorForwardingTestCase(test_l3.L3BaseForIntTests,
if description:
data['port_forwarding']['description'] = description
- fip_pf_req = self._req(
- 'POST', 'floatingips', data,
- fmt or self.fmt, id=floating_ip_id,
- subresource='port_forwardings')
-
- fip_pf_req.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ fip_pf_req = self.new_create_request(
+ 'floatingips', data, fmt or self.fmt, floating_ip_id,
+ subresource='port_forwardings',
+ tenant_id=tenant_id, as_admin=as_admin)
return fip_pf_req.get_response(self.ext_api)
def _update_fip_port_forwarding(self, fmt, floating_ip_id,
- port_forwarding_id, **kwargs):
+ port_forwarding_id,
+ req_tenant_id=None, as_admin=False,
+ **kwargs):
+ req_tenant_id = req_tenant_id or self._tenant_id
port_forwarding = {}
for k, v in kwargs.items():
port_forwarding[k] = v
data = {'port_forwarding': port_forwarding}
- fip_pf_req = self._req(
- 'PUT', 'floatingips', data,
- fmt or self.fmt, id=floating_ip_id,
+ fip_pf_req = self.new_update_request(
+ 'floatingips', data, floating_ip_id, fmt or self.fmt,
sub_id=port_forwarding_id,
- subresource='port_forwardings')
+ subresource='port_forwardings',
+ tenant_id=req_tenant_id,
+ as_admin=as_admin)
return fip_pf_req.get_response(self.ext_api)
diff --git a/neutron/tests/unit/extensions/test_l3.py b/neutron/tests/unit/extensions/test_l3.py
index ab6be0ad52..f07472fcbc 100644
--- a/neutron/tests/unit/extensions/test_l3.py
+++ b/neutron/tests/unit/extensions/test_l3.py
@@ -377,10 +377,10 @@ class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin,
class L3NatTestCaseMixin(object):
- def _create_router(self, fmt, tenant_id, name=None,
- admin_state_up=None, set_context=False,
- arg_list=None, **kwargs):
- tenant_id = tenant_id or _uuid()
+ def _create_router(self, fmt, tenant_id=None, name=None,
+ admin_state_up=None, arg_list=None,
+ as_admin=False, **kwargs):
+ tenant_id = tenant_id or self._tenant_id
data = {'router': {'tenant_id': tenant_id}}
if name:
data['router']['name'] = name
@@ -400,29 +400,27 @@ class L3NatTestCaseMixin(object):
if 'enable_ndp_proxy' in kwargs:
data['router']['enable_ndp_proxy'] = \
bool(kwargs['enable_ndp_proxy'])
- router_req = self.new_create_request('routers', data, fmt)
- if set_context and tenant_id:
- # create a specific auth context for this request
- router_req.environ['neutron.context'] = context.Context(
- '', tenant_id)
+ router_req = self.new_create_request('routers', data, fmt,
+ tenant_id=tenant_id,
+ as_admin=as_admin)
return router_req.get_response(self.ext_api)
- def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None,
- external_gateway_info=None, set_context=False,
- arg_list=None, **kwargs):
+ def _make_router(self, fmt, tenant_id=None, name=None, admin_state_up=None,
+ external_gateway_info=None,
+ arg_list=None, as_admin=False, **kwargs):
if external_gateway_info:
arg_list = ('external_gateway_info', ) + (arg_list or ())
res = self._create_router(fmt, tenant_id, name,
- admin_state_up, set_context,
+ admin_state_up,
arg_list=arg_list,
external_gateway_info=external_gateway_info,
- **kwargs)
+ as_admin=as_admin, **kwargs)
return self.deserialize(fmt, res)
def _add_external_gateway_to_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
- neutron_context=None, ext_ips=None,
+ ext_ips=None, as_admin=False,
**kwargs):
ext_ips = ext_ips or []
body = {'router':
@@ -435,7 +433,7 @@ class L3NatTestCaseMixin(object):
'qos_policy_id'] = kwargs.get('policy_id')
return self._update('routers', router_id, body,
expected_code=expected_code,
- neutron_context=neutron_context)
+ as_admin=as_admin)
def _remove_external_gateway_from_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
@@ -449,7 +447,8 @@ class L3NatTestCaseMixin(object):
expected_code=exc.HTTPOk.code,
expected_body=None,
tenant_id=None,
- msg=None):
+ msg=None,
+ as_admin=False):
interface_data = {}
if subnet_id is not None:
interface_data.update({'subnet_id': subnet_id})
@@ -457,11 +456,8 @@ class L3NatTestCaseMixin(object):
interface_data.update({'port_id': port_id})
req = self.new_action_request('routers', interface_data, router_id,
- "%s_router_interface" % action)
- # if tenant_id was specified, create a tenant context for this request
- if tenant_id:
- req.environ['neutron.context'] = context.Context(
- '', tenant_id)
+ "%s_router_interface" % action,
+ tenant_id=tenant_id, as_admin=as_admin)
res = req.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int, msg)
response = self.deserialize(self.fmt, res)
@@ -472,23 +468,23 @@ class L3NatTestCaseMixin(object):
@contextlib.contextmanager
def router(self, name='router1', admin_state_up=True,
fmt=None, project_id=None,
- external_gateway_info=None, set_context=False,
+ external_gateway_info=None, as_admin=False,
**kwargs):
tenant_id = project_id if project_id else kwargs.pop(
'tenant_id', None)
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
- set_context, **kwargs)
+ as_admin=as_admin, **kwargs)
yield router
def _set_net_external(self, net_id):
self._update('networks', net_id,
- {'network': {extnet_apidef.EXTERNAL: True}})
+ {'network': {extnet_apidef.EXTERNAL: True}},
+ as_admin=True)
def _create_floatingip(self, fmt, network_id, port_id=None,
- fixed_ip=None, set_context=False,
- floating_ip=None, subnet_id=None,
- tenant_id=None, **kwargs):
+ fixed_ip=None, floating_ip=None, subnet_id=None,
+ tenant_id=None, as_admin=False, **kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'floatingip': {'floating_network_id': network_id,
'tenant_id': tenant_id}}
@@ -505,20 +501,18 @@ class L3NatTestCaseMixin(object):
data['floatingip'].update(kwargs)
- floatingip_req = self.new_create_request('floatingips', data, fmt)
- if set_context and tenant_id:
- # create a specific auth context for this request
- floatingip_req.environ['neutron.context'] = context.Context(
- '', tenant_id)
+ floatingip_req = self.new_create_request(
+ 'floatingips', data, fmt, tenant_id=tenant_id, as_admin=as_admin)
return floatingip_req.get_response(self.ext_api)
def _make_floatingip(self, fmt, network_id, port_id=None,
- fixed_ip=None, set_context=False, tenant_id=None,
+ fixed_ip=None, tenant_id=None,
floating_ip=None, http_status=exc.HTTPCreated.code,
- **kwargs):
+ as_admin=False, **kwargs):
res = self._create_floatingip(fmt, network_id, port_id,
- fixed_ip, set_context, floating_ip,
- tenant_id=tenant_id, **kwargs)
+ fixed_ip, floating_ip,
+ tenant_id=tenant_id, as_admin=as_admin,
+ **kwargs)
self.assertEqual(http_status, res.status_int)
return self.deserialize(fmt, res)
@@ -534,16 +528,15 @@ class L3NatTestCaseMixin(object):
@contextlib.contextmanager
def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None,
- public_cidr='11.0.0.0/24', set_context=False,
- project_id=None, flavor_id=None, **kwargs):
+ public_cidr='11.0.0.0/24', project_id=None,
+ flavor_id=None, as_admin=False, **kwargs):
tenant_id = project_id if project_id else kwargs.pop(
'tenant_id', None)
with self.subnet(cidr=public_cidr,
- set_context=set_context,
- tenant_id=tenant_id) as public_sub:
+ tenant_id=tenant_id,
+ as_admin=as_admin) as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
- args_list = {'set_context': set_context,
- 'tenant_id': tenant_id}
+ args_list = {'tenant_id': tenant_id}
if flavor_id:
args_list['flavor_id'] = flavor_id
private_port = None
@@ -551,8 +544,8 @@ class L3NatTestCaseMixin(object):
private_port = self._show('ports', port_id)
with test_db_base_plugin_v2.optional_ctx(
private_port, self.port,
- set_context=set_context,
- tenant_id=tenant_id) as private_port:
+ tenant_id=tenant_id,
+ is_admin=as_admin) as private_port:
with self.router(**args_list) as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
@@ -571,7 +564,7 @@ class L3NatTestCaseMixin(object):
port_id=private_port['port']['id'],
fixed_ip=fixed_ip,
tenant_id=tenant_id,
- set_context=set_context,
+ as_admin=as_admin,
**kwargs)
yield floatingip
@@ -581,8 +574,8 @@ class L3NatTestCaseMixin(object):
@contextlib.contextmanager
def floatingip_no_assoc_with_public_sub(self, private_sub, fmt=None,
- set_context=False, public_sub=None,
- flavor_id=None, **kwargs):
+ public_sub=None, flavor_id=None,
+ as_admin=False, **kwargs):
if 'project_id' in kwargs:
kwargs['tenant_id'] = kwargs['project_id']
self._set_net_external(public_sub['subnet']['network_id'])
@@ -606,7 +599,7 @@ class L3NatTestCaseMixin(object):
floatingip = self._make_floatingip(
fmt or self.fmt,
public_sub['subnet']['network_id'],
- set_context=set_context,
+ as_admin=as_admin,
**kwargs)
yield floatingip, r
@@ -615,14 +608,14 @@ class L3NatTestCaseMixin(object):
floatingip['floatingip']['id'])
@contextlib.contextmanager
- def floatingip_no_assoc(self, private_sub, fmt=None,
- set_context=False, flavor_id=None, **kwargs):
+ def floatingip_no_assoc(self, private_sub, fmt=None, flavor_id=None,
+ as_admin=False, **kwargs):
if 'project_id' in kwargs:
kwargs['tenant_id'] = kwargs['project_id']
with self.subnet(cidr='12.0.0.0/24') as public_sub:
with self.floatingip_no_assoc_with_public_sub(
- private_sub, fmt, set_context, public_sub,
- flavor_id, **kwargs) as (f, r):
+ private_sub, fmt, public_sub, flavor_id,
+ as_admin=as_admin, **kwargs) as (f, r):
# Yield only the floating ip object
yield f
@@ -707,10 +700,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_create_with_gwinfo(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
- data = {'router': {'tenant_id': _uuid()}}
- data['router']['name'] = 'router1'
- data['router']['external_gateway_info'] = {
- 'network_id': s['subnet']['network_id']}
+ data = {'router': {
+ 'name': 'router1',
+ 'external_gateway_info': {
+ 'network_id': s['subnet']['network_id']}}}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
@@ -726,8 +719,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'external_fixed_ips': [{'ip_address': '10.0.0.99'}]
}
res = self._create_router(
- self.fmt, _uuid(), arg_list=('external_gateway_info',),
- external_gateway_info=ext_info
+ self.fmt, arg_list=('external_gateway_info',),
+ external_gateway_info=ext_info,
+ as_admin=True
)
router = self.deserialize(self.fmt, res)
self.assertEqual(
@@ -749,8 +743,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
{'subnet_id': s['subnet']['id']}]
}
res = self._create_router(
- self.fmt, _uuid(), arg_list=('external_gateway_info',),
- external_gateway_info=ext_info
+ self.fmt,
+ arg_list=('external_gateway_info',),
+ external_gateway_info=ext_info,
+ as_admin=True
)
router = self.deserialize(self.fmt, res)
ext_ips = router['router']['external_gateway_info'][
@@ -768,8 +764,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'external_fixed_ips': [{'ip_address': '10.0.0.99'}]
}
res = self._create_router(
- self.fmt, _uuid(), arg_list=('external_gateway_info',),
- set_context=True, external_gateway_info=ext_info
+ self.fmt, arg_list=('external_gateway_info',),
+ external_gateway_info=ext_info
)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
@@ -873,7 +869,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
r['router']['id'],
s['subnet']['network_id'],
ext_ips=[{'ip_address': s['subnet']['gateway_ip']}],
- expected_code=exc.HTTPBadRequest.code)
+ expected_code=exc.HTTPBadRequest.code,
+ as_admin=True)
def test_router_update_gateway_with_invalid_external_ip(self):
with self.router() as r:
@@ -883,7 +880,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
r['router']['id'],
s['subnet']['network_id'],
ext_ips=[{'ip_address': '99.99.99.99'}],
- expected_code=exc.HTTPBadRequest.code)
+ expected_code=exc.HTTPBadRequest.code,
+ as_admin=True)
def test_router_update_gateway_with_invalid_external_subnet(self):
with self.subnet() as s1,\
@@ -895,7 +893,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
s1['subnet']['network_id'],
# this subnet is not on the same network so this should fail
ext_ips=[{'subnet_id': s2['subnet']['id']}],
- expected_code=exc.HTTPBadRequest.code)
+ expected_code=exc.HTTPBadRequest.code,
+ as_admin=True)
def test_router_update_gateway_with_different_external_subnet(self):
with self.network() as n:
@@ -906,11 +905,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res1 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s1['subnet']['id']}])
+ ext_ips=[{'subnet_id': s1['subnet']['id']}],
+ as_admin=True)
res2 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s2['subnet']['id']}])
+ ext_ips=[{'subnet_id': s2['subnet']['id']}],
+ as_admin=True)
fip1 = res1['router']['external_gateway_info']['external_fixed_ips'][0]
fip2 = res2['router']['external_gateway_info']['external_fixed_ips'][0]
self.assertEqual(s1['subnet']['id'], fip1['subnet_id'])
@@ -944,7 +945,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res1 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s1['subnet']['id']}])
+ ext_ips=[{'subnet_id': s1['subnet']['id']}],
+ as_admin=True)
fip1 = (res1['router']['external_gateway_info']
['external_fixed_ips'][0])
self.assertEqual(s1['subnet']['id'], fip1['subnet_id'])
@@ -953,7 +955,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
n['network']['id'],
ext_ips=[{'ip_address': fip1['ip_address'],
'subnet_id': s1['subnet']['id']},
- {'subnet_id': s2['subnet']['id']}])
+ {'subnet_id': s2['subnet']['id']}],
+ as_admin=True)
self.assertEqual(fip1, res2['router']['external_gateway_info']
['external_fixed_ips'][0])
fip2 = (res2['router']['external_gateway_info']
@@ -971,7 +974,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s1['subnet']['id']}])
+ ext_ips=[{'subnet_id': s1['subnet']['id']}],
+ as_admin=True)
plugin = directory.get_plugin(plugin_constants.L3)
mock.patch.object(
plugin, 'update_router',
@@ -990,7 +994,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res1 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
- ext_ips=[{'subnet_id': s1['subnet']['id']}])
+ ext_ips=[{'subnet_id': s1['subnet']['id']}],
+ as_admin=True)
fip1 = (res1['router']['external_gateway_info']
['external_fixed_ips'][0])
sres = self._create_subnet(self.fmt, net_id=n['network']['id'],
@@ -1028,7 +1033,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
n['network']['id'],
ext_ips=[{'subnet_id': s1['subnet']['id']},
{'subnet_id': s2['subnet']['id']}],
- expected_code=exc.HTTPOk.code)
+ expected_code=exc.HTTPOk.code,
+ as_admin=True)
res1 = self._show('routers', r['router']['id'])
original_fips = (res1['router']['external_gateway_info']
['external_fixed_ips'])
@@ -1309,9 +1315,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_add_interface_subnet_with_bad_tenant_returns_404(self):
tenant_id = _uuid()
- with self.router(tenant_id=tenant_id, set_context=True) as r:
- with self.network(tenant_id=tenant_id, set_context=True) as n:
- with self.subnet(network=n, set_context=True) as s:
+ with self.router(tenant_id=tenant_id) as r:
+ with self.network(tenant_id=tenant_id) as n:
+ with self.subnet(network=n, tenant_id=tenant_id) as s:
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
@@ -1322,7 +1328,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
body = self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
- None)
+ None,
+ tenant_id=tenant_id)
self.assertIn('port_id', body)
self._router_interface_action('remove',
r['router']['id'],
@@ -1334,8 +1341,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_add_interface_by_subnet_other_tenant_subnet_returns_400(
self):
router_tenant_id = _uuid()
- with self.router(tenant_id=router_tenant_id, set_context=True) as r:
- with self.network(shared=True) as n:
+ with self.router(tenant_id=router_tenant_id) as r:
+ with self.network(as_admin=True, shared=True) as n:
with self.subnet(network=n) as s:
err_code = exc.HTTPBadRequest.code
self._router_interface_action('add',
@@ -1350,10 +1357,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
):
router_tenant_id = _uuid()
with mock.patch.object(network_obj.NetworkRBAC, "get_projects") as g:
- with self.router(
- tenant_id=router_tenant_id, set_context=True
- ) as r:
- with self.network(shared=True) as n:
+ with self.router(tenant_id=router_tenant_id) as r:
+ with self.network(as_admin=True, shared=True) as n:
with self.subnet(network=n) as s:
g.return_value = [router_tenant_id]
self._router_interface_action(
@@ -1369,8 +1374,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self, out_of_pool=False, router_action_as_admin=False,
expected_code=exc.HTTPOk.code):
router_tenant_id = _uuid()
- with self.router(tenant_id=router_tenant_id, set_context=True) as r:
- with self.network(shared=True) as n:
+ with self.router(tenant_id=router_tenant_id) as r:
+ with self.network(as_admin=True, shared=True) as n:
with self.subnet(network=n) as s1, (
self.subnet(network=n, cidr='fd00::/64',
ip_version=lib_constants.IP_VERSION_6)
@@ -1386,13 +1391,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'ip_address':
s2['subnet']['gateway_ip']}
with self.port(subnet=s1, fixed_ips=fixed_ips,
- tenant_id=router_tenant_id) as p:
- kwargs = {'expected_code': expected_code}
- if not router_action_as_admin:
- kwargs['tenant_id'] = router_tenant_id
+ tenant_id=router_tenant_id,
+ is_admin=True) as p:
self._router_interface_action(
'add', r['router']['id'], None, p['port']['id'],
- **kwargs)
+ expected_code=expected_code,
+ tenant_id=router_tenant_id,
+ as_admin=router_action_as_admin)
def test_router_add_interface_by_port_other_tenant_address_in_pool(
self):
@@ -1414,13 +1419,17 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
with self.router(tenant_id=tenant_id) as r,\
self.network(tenant_id=tenant_id) as n1,\
self.network(tenant_id=other_tenant_id) as n2:
- with self.subnet(network=n1, cidr='10.0.0.0/24') as s1,\
- self.subnet(network=n2, cidr='10.1.0.0/24') as s2:
+ with self.subnet(network=n1, cidr='10.0.0.0/24',
+ tenant_id=tenant_id) as s1,\
+ self.subnet(network=n2, cidr='10.1.0.0/24',
+ tenant_id=other_tenant_id) as s2:
body = self._router_interface_action(
'add',
r['router']['id'],
s2['subnet']['id'],
- None)
+ None,
+ tenant_id=other_tenant_id,
+ as_admin=True)
self.assertIn('port_id', body)
self._router_interface_action(
'add',
@@ -1472,7 +1481,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
{'ip_address': '1.1.1.1'},
{'ip_address': '2.2.2.2'}]}}
self._update('ports', p['port']['id'], data,
- neutron_context=context.get_admin_context(),
+ as_admin=True,
expected_code=exc.HTTPBadRequest.code)
self._router_interface_action('remove',
@@ -1666,12 +1675,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_add_interface_port_bad_tenant_returns_404(self):
tenant_id = _uuid()
- with self.router(tenant_id=tenant_id, set_context=True) as r:
- with self.network(tenant_id=tenant_id, set_context=True) as n:
- with self.subnet(tenant_id=tenant_id, network=n,
- set_context=True) as s:
- with self.port(tenant_id=tenant_id, subnet=s,
- set_context=True) as p:
+ with self.router(tenant_id=tenant_id) as r:
+ with self.network(tenant_id=tenant_id) as n:
+ with self.subnet(tenant_id=tenant_id, network=n) as s:
+ with self.port(tenant_id=tenant_id, subnet=s) as p:
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
@@ -1837,7 +1844,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res = self._add_external_gateway_to_router(
r['router']['id'], ext_net_id,
ext_ips=[{'subnet_id': s1['subnet']['id']}],
- expected_code=exc.HTTPBadRequest.code)
+ expected_code=exc.HTTPBadRequest.code,
+ as_admin=True)
expected_msg = (
"Bad router request: Cidr 10.0.2.0/24 of subnet "
"%(external_subnet_id)s overlaps with cidr 10.0.2.0/24 of "
@@ -1967,15 +1975,12 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self.assertIsNone(gw_info)
def test_router_add_and_remove_gateway_tenant_ctx(self):
- with self.router(tenant_id='noadmin',
- set_context=True) as r:
+ with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
- ctx = context.Context('', 'noadmin')
self._add_external_gateway_to_router(
r['router']['id'],
- s['subnet']['network_id'],
- neutron_context=ctx)
+ s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
@@ -1988,8 +1993,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_create_router_port_with_device_id_of_other_tenants_router(self):
with self.router() as admin_router:
- with self.network(tenant_id='tenant_a',
- set_context=True) as n:
+ with self.network(tenant_id='tenant_a') as n:
with self.subnet(network=n):
for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS:
self._create_port(
@@ -1997,7 +2001,6 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
tenant_id='tenant_a',
device_id=admin_router['router']['id'],
device_owner=device_owner,
- set_context=True,
expected_res_status=exc.HTTPConflict.code)
def test_create_non_router_port_device_id_of_other_tenants_router_update(
@@ -2006,38 +2009,32 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
# port that matches the device_id of another tenants router and then
# we change the device_owner to be network:router_interface.
with self.router() as admin_router:
- with self.network(tenant_id='tenant_a',
- set_context=True) as n:
+ with self.network(tenant_id='tenant_a') as n:
with self.subnet(network=n):
for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS:
port_res = self._create_port(
self.fmt, n['network']['id'],
tenant_id='tenant_a',
- device_id=admin_router['router']['id'],
- set_context=True)
+ device_id=admin_router['router']['id'])
port = self.deserialize(self.fmt, port_res)
- neutron_context = context.Context('', 'tenant_a')
data = {'port': {'device_owner': device_owner}}
self._update('ports', port['port']['id'], data,
- neutron_context=neutron_context,
- expected_code=exc.HTTPConflict.code)
+ expected_code=exc.HTTPConflict.code,
+ request_tenant_id='tenant_a')
def test_update_port_device_id_to_different_tenants_router(self):
with self.router() as admin_router:
- with self.router(tenant_id='tenant_a',
- set_context=True) as tenant_router:
- with self.network(tenant_id='tenant_a',
- set_context=True) as n:
+ with self.router(tenant_id='tenant_a') as tenant_router:
+ with self.network(tenant_id='tenant_a') as n:
with self.subnet(network=n) as s:
port = self._router_interface_action(
'add', tenant_router['router']['id'],
s['subnet']['id'], None, tenant_id='tenant_a')
- neutron_context = context.Context('', 'tenant_a')
data = {'port':
{'device_id': admin_router['router']['id']}}
self._update('ports', port['port_id'], data,
- neutron_context=neutron_context,
- expected_code=exc.HTTPConflict.code)
+ expected_code=exc.HTTPConflict.code,
+ request_tenant_id='tenant_a')
def test_router_add_gateway_invalid_network_returns_400(self):
with self.router() as r:
@@ -2122,7 +2119,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res = self._add_external_gateway_to_router(
r['router']['id'], n['network']['id'],
ext_ips=[{'subnet_id': s['subnet']['id'],
- 'ip_address': '10.0.0.4'}])
+ 'ip_address': '10.0.0.4'}],
+ as_admin=True)
gw_info = res['router']['external_gateway_info']
ext_ips = gw_info['external_fixed_ips'][0]
expected_gw_ips = [ext_ips['ip_address']]
@@ -2314,7 +2312,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_delete_with_port_existed_returns_409(self):
with self.subnet() as subnet:
- res = self._create_router(self.fmt, _uuid())
+ res = self._create_router(self.fmt)
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
@@ -2329,7 +2327,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
- res = self._create_router(self.fmt, _uuid())
+ res = self._create_router(self.fmt)
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
@@ -2346,12 +2344,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_show(self):
name = 'router1'
- tenant_id = _uuid()
- expected_value = [('name', name), ('tenant_id', tenant_id),
+ expected_value = [('name', name), ('tenant_id', self._tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
- with self.router(name='router1', admin_state_up=True,
- tenant_id=tenant_id) as router:
+ with self.router(name='router1', admin_state_up=True) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
@@ -2365,7 +2361,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
s1['subnet']['network_id'])
self._update('networks', s1['subnet']['network_id'],
{'network': {extnet_apidef.EXTERNAL: False}},
- expected_code=exc.HTTPConflict.code)
+ expected_code=exc.HTTPConflict.code,
+ as_admin=True)
def test_network_update_external(self):
with self.router() as r:
@@ -2377,7 +2374,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
r['router']['id'],
s1['subnet']['network_id'])
self._update('networks', testnet['network']['id'],
- {'network': {extnet_apidef.EXTERNAL: False}})
+ {'network': {extnet_apidef.EXTERNAL: False}},
+ as_admin=True)
def test_floatingip_crd_ops(self):
with self.floatingip_with_assoc() as fip:
@@ -2457,8 +2455,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self._make_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
- port_id=private_port['port']['id'],
- set_context=False)
+ port_id=private_port['port']['id'])
self.assertTrue(agent_notification.called)
def test_floating_port_status_not_applicable(self):
@@ -2903,23 +2900,23 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
with self.subnet(cidr='11.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as private_port:
- with self.router(tenant_id='router-owner',
- set_context=True) as r:
+ with self.router(tenant_id='router-owner') as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
self._add_external_gateway_to_router(
r['router']['id'],
- public_sub['subnet']['network_id'])
+ public_sub['subnet']['network_id'],
+ as_admin=True)
self._router_interface_action(
'add', r['router']['id'],
- private_sub['subnet']['id'], None)
+ private_sub['subnet']['id'], None,
+ as_admin=True)
self._make_floatingip(self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'],
- fixed_ip=None,
- set_context=True)
+ fixed_ip=None)
def test_floatingip_update_different_router(self):
# Create subnet with different CIDRs to account for plugins which
@@ -2983,10 +2980,12 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_floatingip_update_different_port_owner_as_admin(self):
with self.subnet() as private_sub:
with self.floatingip_no_assoc(private_sub) as fip:
- with self.port(subnet=private_sub, tenant_id='other') as p:
+ with self.port(subnet=private_sub, tenant_id='other',
+ is_admin=True) as p:
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip':
- {'port_id': p['port']['id']}})
+ {'port_id': p['port']['id']}},
+ as_admin=True)
self.assertEqual(p['port']['id'],
body['floatingip']['port_id'])
@@ -3032,7 +3031,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
- res = self._create_router(self.fmt, _uuid())
+ res = self._create_router(self.fmt)
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
@@ -3060,8 +3059,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res = self._create_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
- subnet_id=public_sub['subnet']['id'],
- set_context=True)
+ subnet_id=public_sub['subnet']['id'])
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def test_create_floatingip_with_subnet_id_and_fip_address(self):
@@ -3073,7 +3071,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self.fmt,
ext_net['network']['id'],
subnet_id=ext_subnet['subnet']['id'],
- floating_ip='10.10.10.100')
+ floating_ip='10.10.10.100',
+ as_admin=True)
fip = self.deserialize(self.fmt, res)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
self.assertEqual('10.10.10.100',
@@ -3088,7 +3087,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self.fmt,
ext_net['network']['id'],
subnet_id=ext_subnet['subnet']['id'],
- floating_ip='20.20.20.200')
+ floating_ip='20.20.20.200',
+ as_admin=True)
data = self.deserialize(self.fmt, res)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
msg = str(n_exc.InvalidIpForSubnet(ip_address='20.20.20.200'))
@@ -3472,7 +3472,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
fp = self._make_floatingip(self.fmt, network_id,
- floating_ip='10.0.0.10')
+ floating_ip='10.0.0.10',
+ as_admin=True)
self.assertEqual('10.0.0.10',
fp['floatingip']['floating_ip_address'])
@@ -3484,18 +3485,17 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
fp = self._make_floatingip(self.fmt, network_id,
- floating_ip='10.0.0.30')
+ floating_ip='10.0.0.30',
+ as_admin=True)
self.assertEqual('10.0.0.30',
fp['floatingip']['floating_ip_address'])
def test_create_floatingip_with_specific_ip_non_admin(self):
- ctx = context.Context('user_id', 'tenant_id')
-
with self.subnet(cidr='10.0.0.0/24') as s:
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
self._make_floatingip(self.fmt, network_id,
- set_context=ctx,
+ tenant_id='tenant_id',
floating_ip='10.0.0.10',
http_status=exc.HTTPForbidden.code)
@@ -3506,7 +3506,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self._set_net_external(network_id)
self._make_floatingip(self.fmt, network_id,
floating_ip='10.0.1.10',
- http_status=exc.HTTPBadRequest.code)
+ http_status=exc.HTTPBadRequest.code,
+ as_admin=True)
def test_create_floatingip_with_duplicated_specific_ip(self):
@@ -3514,11 +3515,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
self._make_floatingip(self.fmt, network_id,
- floating_ip='10.0.0.10')
+ floating_ip='10.0.0.10',
+ as_admin=True)
self._make_floatingip(self.fmt, network_id,
floating_ip='10.0.0.10',
- http_status=exc.HTTPConflict.code)
+ http_status=exc.HTTPConflict.code,
+ as_admin=True)
def test_create_floatingips_native_quotas(self):
quota = 1
@@ -3711,7 +3714,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
{'port_id': port['port']['id']})
# fetch port and confirm device_id and device_owner
- body = self._show('ports', port['port']['id'])
+ body = self._show('ports', port['port']['id'],
+ tenant_id=tenant_id)
self.assertEqual('', body['port']['device_owner'])
self.assertEqual('', body['port']['device_id'])
@@ -3756,7 +3760,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
data = {'port': {'fixed_ips': [
{'ip_address': gw_ip}]}}
req = self.new_update_request('ports', data,
- gw_port_id)
+ gw_port_id,
+ as_admin=True)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(gw_ip_len, len(res['port']['fixed_ips']))
@@ -3833,9 +3838,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'network_id': network['network']['id'],
'subnetpool_id': subnetpool_id,
'prefixlen': 24,
- 'ip_version': lib_constants.IP_VERSION_4,
- 'tenant_id': tenant_id}}
- req = self.new_create_request('subnets', data)
+ 'ip_version': lib_constants.IP_VERSION_4}}
+ req = self.new_create_request('subnets', data, tenant_id=tenant_id)
subnet = self.deserialize(self.fmt, req.get_response(self.api))
admin_ctx = context.get_admin_context()
@@ -3881,7 +3885,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
# simulate a failed update by just setting the device_id of
# the fip port back to PENDING
data = {'port': {'device_id': 'PENDING'}}
- self._update('ports', fip_port['id'], data)
+ self._update('ports', fip_port['id'], data, as_admin=True)
plugin._clean_garbage()
# first call just marks as candidate, so it shouldn't be changed
port = self._show('ports', fip_port['id'])
@@ -3925,7 +3929,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
events.BEFORE_DELETE)
with self.subnet():
- res = self._create_router(self.fmt, _uuid())
+ res = self._create_router(self.fmt)
router = self.deserialize(self.fmt, res)
self._delete('routers', router['router']['id'],
exc.HTTPForbidden.code)
@@ -4151,8 +4155,7 @@ class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
f = self._make_floatingip(self.fmt,
public_sub['subnet']['network_id'],
port_id=None,
- fixed_ip=None,
- set_context=True)
+ fixed_ip=None)
self._delete('floatingips', f['floatingip']['id'])
fake_method.assert_called_once_with(
resources.FLOATING_IP, events.AFTER_DELETE, mock.ANY,
@@ -4194,7 +4197,7 @@ class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
# converted into its API equivalent of 404
e404 = mock.Mock(side_effect=l3_exc.RouterNotFound(router_id='1'))
registry.subscribe(e404, resources.ROUTER, events.PRECOMMIT_CREATE)
- res = self._create_router(self.fmt, 'tenid')
+ res = self._create_router(self.fmt)
self.assertEqual(exc.HTTPNotFound.code, res.status_int)
# make sure nothing committed
body = self._list('routers')
@@ -4521,7 +4524,7 @@ class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin):
self.mock_admin_client.reset_mock()
def _create_network(self, fmt, name, admin_state_up,
- arg_list=None, set_context=False, tenant_id=None,
+ arg_list=None, tenant_id=None, as_admin=False,
**kwargs):
new_arg_list = ('dns_domain',)
if arg_list is not None:
@@ -4529,12 +4532,12 @@ class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin):
return super(L3NatDBFloatingIpTestCaseWithDNS,
self)._create_network(fmt, name, admin_state_up,
arg_list=new_arg_list,
- set_context=set_context,
tenant_id=tenant_id,
+ as_admin=as_admin,
**kwargs)
def _create_port(self, fmt, name, admin_state_up,
- arg_list=None, set_context=False, tenant_id=None,
+ arg_list=None, tenant_id=None, is_admin=False,
**kwargs):
new_arg_list = ('dns_name',)
if arg_list is not None:
@@ -4542,8 +4545,8 @@ class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin):
return super(L3NatDBFloatingIpTestCaseWithDNS,
self)._create_port(fmt, name, admin_state_up,
arg_list=new_arg_list,
- set_context=set_context,
tenant_id=tenant_id,
+ is_admin=is_admin,
**kwargs)
def _create_net_sub_port(self, dns_domain='', dns_name=''):
diff --git a/neutron/tests/unit/extensions/test_l3_conntrack_helper.py b/neutron/tests/unit/extensions/test_l3_conntrack_helper.py
index 12c1c273ab..e49125bb0a 100644
--- a/neutron/tests/unit/extensions/test_l3_conntrack_helper.py
+++ b/neutron/tests/unit/extensions/test_l3_conntrack_helper.py
@@ -19,7 +19,6 @@ from webob import exc
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import l3_conntrack_helper as l3_ct
-from neutron_lib import context
from oslo_utils import uuidutils
from neutron.extensions import l3
@@ -67,19 +66,16 @@ class L3NConntrackHelperTestCase(test_l3.L3BaseForIntTests,
def _create_router_conntrack_helper(self, fmt, router_id,
protocol, port, helper):
- tenant_id = self.tenant_id or _uuid()
data = {'conntrack_helper': {
"protocol": protocol,
"port": port,
"helper": helper}
}
- router_ct_req = self._req(
- 'POST', 'routers', data,
+ router_ct_req = self.new_create_request(
+ 'routers', data,
fmt or self.fmt, id=router_id,
- subresource='conntrack_helpers')
-
- router_ct_req.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ subresource='conntrack_helpers',
+ as_admin=True)
return router_ct_req.get_response(self.ext_api)
@@ -90,11 +86,10 @@ class L3NConntrackHelperTestCase(test_l3.L3BaseForIntTests,
conntrack_helper[k] = v
data = {'conntrack_helper': conntrack_helper}
- router_ct_req = self._req(
- 'PUT', 'routers', data,
- fmt or self.fmt, id=router_id,
- sub_id=conntrack_helper_id,
- subresource='conntrack_helpers')
+ router_ct_req = self.new_update_request(
+ 'routers', data, router_id,
+ fmt or self.fmt, sub_id=conntrack_helper_id,
+ subresource='conntrack_helpers', as_admin=True)
return router_ct_req.get_response(self.ext_api)
def test_create_ct_with_duplicate_entry(self):
diff --git a/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py b/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py
index 9c941738ab..f8cc6a4bca 100644
--- a/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py
+++ b/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py
@@ -380,7 +380,7 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
def _set_router_external_gateway(self, router_id, network_id,
snat_enabled=None,
expected_code=exc.HTTPOk.code,
- neutron_context=None):
+ tenant_id=None, as_admin=False):
ext_gw_info = {'network_id': network_id}
# Need to set enable_snat also if snat_enabled == False
if snat_enabled is not None:
@@ -389,7 +389,8 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
{'router': {'external_gateway_info':
ext_gw_info}},
expected_code=expected_code,
- neutron_context=neutron_context)
+ request_tenant_id=tenant_id,
+ as_admin=as_admin)
def test_router_gateway_set_fail_after_port_create(self):
with self.router() as r, self.subnet() as s:
@@ -444,7 +445,8 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
('external_gateway_info', None)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
- res = self._show('routers', router['router']['id'])
+ res = self._show('routers', router['router']['id'],
+ tenant_id=tenant_id)
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
@@ -468,8 +470,10 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'subnet_id': s['subnet']['id']}]})]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id,
- external_gateway_info=input_value) as router:
- res = self._show('routers', router['router']['id'])
+ external_gateway_info=input_value,
+ as_admin=True) as router:
+ res = self._show('routers', router['router']['id'],
+ tenant_id=tenant_id)
for k, v in expected_value:
self.assertEqual(v, res['router'][k])
@@ -493,7 +497,8 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
self._set_router_external_gateway(
r['router']['id'], ext_net_id,
snat_enabled=snat_input_value,
- expected_code=expected_http_code)
+ expected_code=expected_http_code,
+ as_admin=True)
if expected_http_code != exc.HTTPOk.code:
return
body = self._show('routers', r['router']['id'])
diff --git a/neutron/tests/unit/extensions/test_l3_ndp_proxy.py b/neutron/tests/unit/extensions/test_l3_ndp_proxy.py
index d5ce2d728a..1836219cc7 100644
--- a/neutron/tests/unit/extensions/test_l3_ndp_proxy.py
+++ b/neutron/tests/unit/extensions/test_l3_ndp_proxy.py
@@ -22,7 +22,6 @@ from neutron_lib.api.definitions import external_net as enet_apidef
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import l3_ext_gw_mode
from neutron_lib import constants
-from neutron_lib import context
from neutron_lib import fixture
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -66,7 +65,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
test_l3.L3BaseForIntTests,
test_l3.L3NatTestCaseMixin):
fmt = 'json'
- tenant_id = _uuid()
+ _tenant_id = _uuid()
def setUp(self):
mock.patch('neutron.api.rpc.handlers.resources_rpc.'
@@ -81,11 +80,11 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
self.address_scope_id = self._make_address_scope(
self.fmt, constants.IP_VERSION_6,
- **{'tenant_id': self.tenant_id})['address_scope']['id']
+ **{'tenant_id': self._tenant_id})['address_scope']['id']
self.subnetpool_id = self._make_subnetpool(
self.fmt, ['2001::0/96'],
**{'address_scope_id': self.address_scope_id,
- 'default_prefixlen': 112, 'tenant_id': self.tenant_id,
+ 'default_prefixlen': 112,
'name': "test-ipv6-pool"})['subnetpool']['id']
self.ext_net = self._make_network(
self.fmt, 'ext-net', True)
@@ -103,7 +102,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
self._ext_subnet_v6_id = self._ext_subnet_v6['subnet']['id']
- self.router1 = self._make_router(self.fmt, self.tenant_id)
+ self.router1 = self._make_router(self.fmt, self._tenant_id)
self.router1_id = self.router1['router']['id']
self.private_net = self._make_network(self.fmt, 'private-net', True)
self.private_subnet = self._make_subnet(
@@ -125,7 +124,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
description=None, fmt=None, tenant_id=None,
expected_code=exc.HTTPCreated.code,
expected_message=None):
- tenant_id = tenant_id or self.tenant_id
+ tenant_id = tenant_id or self._tenant_id
data = {'ndp_proxy': {
"port_id": port_id,
"router_id": router_id}
@@ -135,11 +134,9 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
if description:
data['ndp_proxy']['description'] = description
- req_res = self._req(
- 'POST', 'ndp-proxies', data,
- fmt or self.fmt)
- req_res.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ req_res = self.new_create_request(
+ 'ndp-proxies', data, fmt or self.fmt,
+ tenant_id=tenant_id, as_admin=True)
res = req_res.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
@@ -152,15 +149,14 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
tenant_id=None, fmt=None,
expected_code=exc.HTTPOk.code,
expected_message=None, **kwargs):
- tenant_id = tenant_id or self.tenant_id
+ tenant_id = tenant_id or self._tenant_id
data = {}
for k, v in kwargs.items():
data[k] = v
- req_res = self._req(
- 'PUT', 'ndp-proxies', {'ndp_proxy': data},
- fmt or self.fmt, id=ndp_proxy_id)
- req_res.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ req_res = self.new_update_request(
+ 'ndp-proxies', {'ndp_proxy': data},
+ ndp_proxy_id, fmt or self.fmt,
+ tenant_id=tenant_id, as_admin=True)
res = req_res.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
if expected_message:
@@ -208,13 +204,12 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
def _update_router(self, router_id, update_date, tenant_id=None,
fmt=None, expected_code=exc.HTTPOk.code,
expected_message=None):
- tenant_id = tenant_id or self.tenant_id
+ tenant_id = tenant_id or self._tenant_id
data = {'router': update_date}
router_req = self.new_update_request(
'routers', id=router_id, data=data,
- fmt=(fmt or self.fmt))
- router_req.environ['neutron.context'] = context.Context(
- '', tenant_id, is_admin=True)
+ fmt=(fmt or self.fmt),
+ tenant_id=tenant_id, as_admin=True)
res = router_req.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
if expected_message:
@@ -275,7 +270,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
ipv6_address_mode=constants.DHCPV6_STATEFUL):
self._set_net_external(ext_net['network']['id'])
res = self._make_router(
- self.fmt, self.tenant_id,
+ self.fmt, self._tenant_id,
external_gateway_info={'network_id': ext_net['network']['id']},
**{'enable_ndp_proxy': True})
expected_msg = (
@@ -284,7 +279,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
"scope.") % ext_net['network']['id']
self.assertTrue(expected_msg in res['NeutronError']['message'])
router = self._make_router(
- self.fmt, self.tenant_id,
+ self.fmt, self._tenant_id,
external_gateway_info={'network_id': ext_net['network']['id']})
expected_msg = (
"Can not enable ndp proxy on router %s, The router has no "
@@ -473,18 +468,18 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
def test_create_ndp_proxy_with_different_address_scope(self):
with self.address_scope(
ip_version=constants.IP_VERSION_6,
- tenant_id=self.tenant_id) as addr_scope, \
+ tenant_id=self._tenant_id) as addr_scope, \
self.subnetpool(['2001::100:0:0/100'],
**{'address_scope_id': addr_scope['address_scope']['id'],
'default_prefixlen': 112, 'name': 'test1',
- 'tenant_id': self.tenant_id}) as subnetpool, \
+ 'tenant_id': self._tenant_id}) as subnetpool, \
self.subnet(
cidr='2001::100:1:0/112',
ip_version=constants.IP_VERSION_6,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL,
subnetpool_id=subnetpool['subnetpool']['id'],
- tenant_id=self.tenant_id) as subnet, \
+ tenant_id=self._tenant_id) as subnet, \
self.port(subnet) as port:
subnet_id = subnet['subnet']['id']
port_id = port['port']['id']
@@ -503,9 +498,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
def _create_router(self, data, expected_code=exc.HTTPCreated.code,
expected_message=None):
router_req = self.new_create_request(
- 'routers', data, self.fmt)
- router_req.environ['neutron.context'] = context.Context(
- '', self.tenant_id, is_admin=True)
+ 'routers', data, self.fmt, as_admin=True)
res = router_req.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
if expected_message:
diff --git a/neutron/tests/unit/extensions/test_local_ip.py b/neutron/tests/unit/extensions/test_local_ip.py
index 5de762d8b1..e15b31e821 100644
--- a/neutron/tests/unit/extensions/test_local_ip.py
+++ b/neutron/tests/unit/extensions/test_local_ip.py
@@ -19,7 +19,6 @@ from unittest import mock
import netaddr
from neutron_lib.api.definitions import local_ip as apidef
from neutron_lib import constants
-from neutron_lib import context
import webob.exc
from neutron.extensions import local_ip as lip_ext
@@ -46,10 +45,8 @@ class LocalIPTestBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
for k, v in kwargs.items():
local_ip['local_ip'][k] = v
- req = self.new_create_request('local-ips', local_ip)
- neutron_context = context.Context(
- '', kwargs.get('project_id', self._tenant_id), is_admin=True)
- req.environ['neutron.context'] = neutron_context
+ req = self.new_create_request('local-ips', local_ip,
+ tenant_id=self._tenant_id, as_admin=True)
res = req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -57,9 +54,7 @@ class LocalIPTestBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _update_local_ip(self, lip_id, data):
update_req = self.new_update_request(
- 'local-ips', data, lip_id)
- update_req.environ['neutron.context'] = context.Context(
- '', self._tenant_id)
+ 'local-ips', data, lip_id, tenant_id=self._tenant_id)
res = update_req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -73,9 +68,8 @@ class LocalIPTestBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
req = self.new_create_request('local_ips',
data=local_ip_assoc,
id=local_ip_id,
- subresource='port_associations')
- neutron_context = context.Context('', self._tenant_id)
- req.environ['neutron.context'] = neutron_context
+ subresource='port_associations',
+ tenant_id=self._tenant_id)
res = req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
diff --git a/neutron/tests/unit/extensions/test_network_ip_availability.py b/neutron/tests/unit/extensions/test_network_ip_availability.py
index 833ee89c95..f52d000d9a 100644
--- a/neutron/tests/unit/extensions/test_network_ip_availability.py
+++ b/neutron/tests/unit/extensions/test_network_ip_availability.py
@@ -65,7 +65,9 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# list by query fields: total_ips
params = 'fields=total_ips'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE,
+ params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -83,7 +85,8 @@ class TestNetworkIPAvailabilityAPI(
params = ['total_ips']
request = self.new_show_request(API_RESOURCE,
network['id'],
- fields=params)
+ fields=params,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@@ -103,7 +106,9 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
network = net['network']
# Get ALL
- request = self.new_list_request(API_RESOURCE, self.fmt)
+ request = self.new_list_request(API_RESOURCE,
+ self.fmt,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -112,7 +117,8 @@ class TestNetworkIPAvailabilityAPI(
net, 0)
# Get single via id
- request = self.new_show_request(API_RESOURCE, network['id'])
+ request = self.new_show_request(API_RESOURCE, network['id'],
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@@ -134,7 +140,8 @@ class TestNetworkIPAvailabilityAPI(
self.port(subnet=subnet3_1):
# Test get ALL
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -148,7 +155,8 @@ class TestNetworkIPAvailabilityAPI(
# Test get single via network id
network = n1['network']
request = self.new_show_request(API_RESOURCE,
- network['id'])
+ network['id'],
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@@ -165,7 +173,8 @@ class TestNetworkIPAvailabilityAPI(
self.port(subnet=subnet1_2),\
self.port(subnet=subnet1_2):
# Get ALL
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -176,7 +185,8 @@ class TestNetworkIPAvailabilityAPI(
# Get single via network id
network = n1['network']
request = self.new_show_request(API_RESOURCE,
- network['id'])
+ network['id'],
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@@ -186,7 +196,8 @@ class TestNetworkIPAvailabilityAPI(
def test_usages_port_consumed_v4(self):
with self.network() as net:
with self.subnet(network=net) as subnet:
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
# Consume 2 ports
with self.port(subnet=subnet), self.port(subnet=subnet):
response = self.deserialize(self.fmt,
@@ -200,7 +211,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get IPv4
params = 'ip_version=%s' % constants.IP_VERSION_4
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -210,7 +222,8 @@ class TestNetworkIPAvailabilityAPI(
# Get IPv6 should return empty array
params = 'ip_version=%s' % constants.IP_VERSION_6
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -225,7 +238,8 @@ class TestNetworkIPAvailabilityAPI(
ipv6_address_mode=constants.DHCPV6_STATELESS):
# Get IPv6
params = 'ip_version=%s' % constants.IP_VERSION_6
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
@@ -234,7 +248,8 @@ class TestNetworkIPAvailabilityAPI(
# Get IPv4 should return empty array
params = 'ip_version=%s' % constants.IP_VERSION_4
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -247,7 +262,8 @@ class TestNetworkIPAvailabilityAPI(
network=net, cidr=cidr_ipv6,
ip_version=constants.IP_VERSION_6,
ipv6_address_mode=constants.DHCPV6_STATELESS) as subnet:
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
# Consume 3 ports
with self.port(subnet=subnet),\
self.port(subnet=subnet), \
@@ -266,7 +282,8 @@ class TestNetworkIPAvailabilityAPI(
test_id = network['id']
# Get by query param: network_id
params = 'network_id=%s' % test_id
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -276,7 +293,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: network_id
params = 'network_id=clearlywontmatch'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -287,7 +305,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get by query param: network_name
params = 'network_name=%s' % test_name
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -297,7 +316,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: network_name
params = 'network_name=clearly-wont-match'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -308,7 +328,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get by query param: tenant_id
params = 'tenant_id=%s' % test_tenant_id
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -320,7 +341,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: tenant_id
params = 'tenant_id=clearly-wont-match'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -331,7 +353,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get by query param: project_id
params = 'project_id=%s' % test_project_id
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@@ -343,7 +366,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: project_id
params = 'project_id=clearly-wont-match'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@@ -369,7 +393,8 @@ class TestNetworkIPAvailabilityAPI(
self.port(subnet=s42), self.port(subnet=s42):
# Verify consumption across all
- request = self.new_list_request(API_RESOURCE)
+ request = self.new_list_request(API_RESOURCE,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
avails_list = response[IP_AVAILS_KEY]
@@ -387,7 +412,8 @@ class TestNetworkIPAvailabilityAPI(
constants.IP_VERSION_6]:
params = 'ip_version=%i' % ip_ver
request = self.new_list_request(API_RESOURCE,
- params=params)
+ params=params,
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
for net_avail in response[IP_AVAILS_KEY]:
@@ -399,7 +425,8 @@ class TestNetworkIPAvailabilityAPI(
API_RESOURCE,
params='network_id=%s&network_id=%s'
% (net_v4_2['network']['id'],
- net_v6_2['network']['id']))
+ net_v6_2['network']['id']),
+ as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
avails_list = response[IP_AVAILS_KEY]
@@ -414,7 +441,8 @@ class TestNetworkIPAvailabilityAPI(
networks = (net1, net2, net3, net4)
for idx in range(1, len(networks) + 1):
params = 'limit=%s' % idx
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(idx, len(response[IP_AVAILS_KEY]))
@@ -426,14 +454,16 @@ class TestNetworkIPAvailabilityAPI(
network_ids = sorted([net['network']['id'] for net in networks])
params = 'sort_key=network_id;sort_dir=asc'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
res = [net['network_id'] for net in response[IP_AVAILS_KEY]]
self.assertEqual(network_ids, res)
params = 'sort_key=network_id;sort_dir=desc'
- request = self.new_list_request(API_RESOURCE, params=params)
+ request = self.new_list_request(API_RESOURCE, params=params,
+ as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
res = [net['network_id'] for net in response[IP_AVAILS_KEY]]
diff --git a/neutron/tests/unit/extensions/test_network_segment_range.py b/neutron/tests/unit/extensions/test_network_segment_range.py
index af43081e6f..9bf4ca3660 100644
--- a/neutron/tests/unit/extensions/test_network_segment_range.py
+++ b/neutron/tests/unit/extensions/test_network_segment_range.py
@@ -19,6 +19,7 @@ from neutron_lib import context
from oslo_config import cfg
import webob.exc
+from neutron.common import config
from neutron.db import db_base_plugin_v2
from neutron.db import segments_db
from neutron.extensions import network_segment_range as ext_range
@@ -55,7 +56,8 @@ class NetworkSegmentRangeTestBase(test_db_base_plugin_v2.
network_segment_range['network_segment_range'][k] = str(v)
network_segment_range_req = self.new_create_request(
- 'network-segment-ranges', network_segment_range, fmt)
+ 'network-segment-ranges', network_segment_range, fmt,
+ as_admin=True)
network_segment_range_res = network_segment_range_req.get_response(
self.ext_api)
@@ -83,7 +85,7 @@ class NetworkSegmentRangeTestBase(test_db_base_plugin_v2.
def _test_update_network_segment_range(self, range_id,
data, expected=None):
update_req = self.new_update_request(
- 'network-segment-ranges', data, range_id)
+ 'network-segment-ranges', data, range_id, as_admin=True)
update_res = update_req.get_response(self.ext_api)
if expected:
@@ -112,6 +114,7 @@ class NetworkSegmentRangeTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
def setUp(self, plugin=None):
+ config.register_common_config_options()
if not plugin:
plugin = TEST_PLUGIN_KLASS
service_plugins = {'network_segment_range_plugin_name':
@@ -262,7 +265,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'name': 'foo-name'}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual('foo-name',
result['network_segment_range']['name'])
@@ -275,7 +279,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'name': ''}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual('', result['network_segment_range']['name'])
def test_update_network_segment_range_min_max(self):
@@ -286,7 +291,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'minimum': 1200, 'maximum': 1300}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual(1200, result['network_segment_range']['minimum'])
self.assertEqual(1300, result['network_segment_range']['maximum'])
@@ -294,7 +300,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
network_segment_range = self._test_create_network_segment_range()
req = self.new_show_request(
'network-segment-ranges',
- network_segment_range['network_segment_range']['id'])
+ network_segment_range['network_segment_range']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(
network_segment_range['network_segment_range']['id'],
@@ -304,7 +311,7 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
self._test_create_network_segment_range(name='foo-range1')
self._test_create_network_segment_range(
name='foo-range2', minimum=400, maximum=500)
- res = self._list('network-segment-ranges')
+ res = self._list('network-segment-ranges', as_admin=True)
self.assertEqual(2, len(res['network_segment_ranges']))
def test_list_network_segment_ranges_with_sort(self):
@@ -314,7 +321,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
name='foo-range2', physical_network='phys_net2')
self._test_list_with_sort('network-segment-range',
(range2, range1),
- [('name', 'desc')])
+ [('name', 'desc')],
+ as_admin=True)
def test_list_network_segment_ranges_with_pagination(self):
range1 = self._test_create_network_segment_range(
@@ -326,7 +334,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
self._test_list_with_pagination(
'network-segment-range',
(range1, range2, range3),
- ('name', 'asc'), 2, 2)
+ ('name', 'asc'), 2, 2,
+ as_admin=True)
def test_list_network_segment_ranges_with_pagination_reverse(self):
range1 = self._test_create_network_segment_range(
@@ -338,14 +347,17 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
self._test_list_with_pagination_reverse(
'network-segment-range',
(range1, range2, range3),
- ('name', 'asc'), 2, 2)
+ ('name', 'asc'), 2, 2,
+ as_admin=True)
def test_delete_network_segment_range(self):
network_segment_range = self._test_create_network_segment_range()
with mock.patch.object(segments_db, 'network_segments_exist_in_range',
return_value=False):
self._delete('network-segment-ranges',
- network_segment_range['network_segment_range']['id'])
+ network_segment_range['network_segment_range']['id'],
+ as_admin=True)
self._show('network-segment-ranges',
network_segment_range['network_segment_range']['id'],
- expected_code=webob.exc.HTTPNotFound.code)
+ expected_code=webob.exc.HTTPNotFound.code,
+ as_admin=True)
diff --git a/neutron/tests/unit/extensions/test_portsecurity.py b/neutron/tests/unit/extensions/test_portsecurity.py
index 027740c2f2..aedd75251d 100644
--- a/neutron/tests/unit/extensions/test_portsecurity.py
+++ b/neutron/tests/unit/extensions/test_portsecurity.py
@@ -18,7 +18,6 @@ from unittest import mock
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api import validators
-from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.db import utils as db_utils
from neutron_lib.exceptions import port_security as psec_exc
@@ -311,11 +310,11 @@ class TestPortSecurity(PortSecurityDBTestCase):
self.skipTest("Plugin does not support security groups")
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
- set_context=True,
tenant_id='admin_tenant',
port_security_enabled=False)
net = self.deserialize('json', res)
- self._create_subnet('json', net['network']['id'], '10.0.0.0/24')
+ self._create_subnet('json', net['network']['id'], '10.0.0.0/24',
+ tenant_id='admin_tenant')
security_group = self.deserialize(
'json', self._create_security_group(self.fmt, 'asdf', 'asdf',
tenant_id='other_tenant'))
@@ -323,7 +322,6 @@ class TestPortSecurity(PortSecurityDBTestCase):
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
- set_context=True,
is_admin=True,
tenant_id='admin_tenant',
port_security_enabled=True,
@@ -331,19 +329,18 @@ class TestPortSecurity(PortSecurityDBTestCase):
port = self.deserialize('json', res)
self.assertTrue(port['port'][psec.PORTSECURITY])
self.assertEqual(port['port']['security_groups'], [security_group_id])
- self._delete('ports', port['port']['id'])
+ self._delete('ports', port['port']['id'], tenant_id='admin_tenant')
def test_create_port_with_no_admin_use_other_tenant_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
- set_context=True,
tenant_id='demo_tenant',
port_security_enabled=False)
net = self.deserialize('json', res)
self._create_subnet('json', net['network']['id'], '10.0.0.0/24',
- set_context=True, tenant_id='demo_tenant')
+ tenant_id='demo_tenant')
security_group = self.deserialize(
'json', self._create_security_group(self.fmt, 'asdf', 'asdf',
tenant_id='other_tenant'))
@@ -351,7 +348,6 @@ class TestPortSecurity(PortSecurityDBTestCase):
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
- set_context=True,
tenant_id='demo_tenant',
port_security_enabled=True,
security_groups=[security_group_id])
@@ -396,7 +392,7 @@ class TestPortSecurity(PortSecurityDBTestCase):
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
- set_context=True, is_admin=True,
+ is_admin=True,
tenant_id='admin_tenant',)
port = self.deserialize('json', res)
self.assertTrue(port['port'][psec.PORTSECURITY])
@@ -408,7 +404,9 @@ class TestPortSecurity(PortSecurityDBTestCase):
update_port = {'port':
{'security_groups': [security_group_id]}}
req = self.new_update_request('ports', update_port,
- port['port']['id'])
+ port['port']['id'],
+ tenant_id='admin_tenant',
+ as_admin=True)
port = self.deserialize('json', req.get_response(self.api))
security_groups = port['port']['security_groups']
self.assertIn(security_group_id, security_groups)
@@ -420,7 +418,6 @@ class TestPortSecurity(PortSecurityDBTestCase):
with self.network(tenant_id='demo_tenant') as net:
with self.subnet(network=net, tenant_id='demo_tenant'):
res = self._create_port('json', net['network']['id'],
- set_context=True,
tenant_id='demo_tenant',)
port = self.deserialize('json', res)
self.assertTrue(port['port'][psec.PORTSECURITY])
@@ -432,9 +429,8 @@ class TestPortSecurity(PortSecurityDBTestCase):
update_port = {'port':
{'security_groups': [security_group_id]}}
req = self.new_update_request('ports', update_port,
- port['port']['id'])
- req.environ['neutron.context'] = context.Context(
- '', 'other_tenant')
+ port['port']['id'],
+ tenant_id='other_tenant')
res = req.get_response(self.api)
self.assertEqual(404, res.status_int)
@@ -490,29 +486,26 @@ class TestPortSecurity(PortSecurityDBTestCase):
self._delete('ports', port['port']['id'])
def test_create_port_security_off_shared_network(self):
- with self.network(shared=True) as net:
+ with self.network(as_admin=True, shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=False,
- tenant_id='not_network_owner',
- set_context=True)
+ tenant_id='not_network_owner')
self.deserialize('json', res)
self.assertEqual(403, res.status_int)
def test_update_port_security_off_shared_network(self):
- with self.network(shared=True) as net:
+ with self.network(as_admin=True, shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
- tenant_id='not_network_owner',
- set_context=True)
+ tenant_id='not_network_owner')
port = self.deserialize('json', res)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
- port['port']['id'])
- req.environ['neutron.context'] = context.Context(
- '', 'not_network_owner')
+ port['port']['id'],
+ tenant_id='not_network_owner')
res = req.get_response(self.api)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
diff --git a/neutron/tests/unit/extensions/test_providernet.py b/neutron/tests/unit/extensions/test_providernet.py
index 61e1422aa2..fc40dae6c1 100644
--- a/neutron/tests/unit/extensions/test_providernet.py
+++ b/neutron/tests/unit/extensions/test_providernet.py
@@ -92,21 +92,28 @@ class ProvidernetExtensionTestCase(testlib_api.WebTestCase):
def _put_network_with_provider_attrs(self, ctx, expect_errors=False):
data = self._prepare_net_data()
+ ctx.roles = ['member', 'reader']
+ if ctx.is_admin:
+ ctx.roles.append('admin')
env = {'neutron.context': ctx}
instance = self.plugin.return_value
- instance.get_network.return_value = {'tenant_id': ctx.tenant_id,
+ instance.get_network.return_value = {'project_id': ctx.tenant_id,
'shared': False}
net_id = uuidutils.generate_uuid()
res = self.api.put(test_base._get_path('networks',
id=net_id,
fmt=self.fmt),
self.serialize({'network': data}),
+ content_type='application/' + self.fmt,
extra_environ=env,
expect_errors=expect_errors)
return res, data, net_id
def _post_network_with_provider_attrs(self, ctx, expect_errors=False):
data = self._prepare_net_data()
+ ctx.roles = ['member', 'reader']
+ if ctx.is_admin:
+ ctx.roles.append('admin')
env = {'neutron.context': ctx}
res = self.api.post(test_base._get_path('networks', fmt=self.fmt),
self.serialize({'network': data}),
@@ -119,6 +126,9 @@ class ProvidernetExtensionTestCase(testlib_api.WebTestCase):
expect_errors=False):
data = self._prepare_net_data()
data.update(bad_data)
+ ctx.roles = ['member', 'reader']
+ if ctx.is_admin:
+ ctx.roles.append('admin')
env = {'neutron.context': ctx}
res = self.api.post(test_base._get_path('networks', fmt=self.fmt),
self.serialize({'network': data}),
diff --git a/neutron/tests/unit/extensions/test_qos_gateway_ip.py b/neutron/tests/unit/extensions/test_qos_gateway_ip.py
index 246ed917b3..b8c6b73817 100644
--- a/neutron/tests/unit/extensions/test_qos_gateway_ip.py
+++ b/neutron/tests/unit/extensions/test_qos_gateway_ip.py
@@ -69,7 +69,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
- project_id='tenant', name='pol1',
+ project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
@@ -88,7 +88,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
- project_id='tenant', name='pol1',
+ project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
@@ -115,7 +115,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
- project_id='tenant', name='pol1',
+ project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
@@ -153,7 +153,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
- project_id='tenant', name='pol1',
+ project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
diff --git a/neutron/tests/unit/extensions/test_quotasv2.py b/neutron/tests/unit/extensions/test_quotasv2.py
index fab52f8be6..c5d914a944 100644
--- a/neutron/tests/unit/extensions/test_quotasv2.py
+++ b/neutron/tests/unit/extensions/test_quotasv2.py
@@ -120,8 +120,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_default_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
@@ -137,8 +136,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_default_quotas_with_owner_project(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.get(_get_path('quotas', id=project_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
@@ -154,8 +152,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_default_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=False)
res = self.api.get(_get_path('quotas', id=project_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
@@ -164,8 +161,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@@ -179,16 +175,14 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=False)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_project(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@@ -202,8 +196,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_list_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@@ -212,16 +205,14 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_list_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -230,8 +221,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_non_integer_returns_400(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -240,8 +230,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_negative_integer_returns_400(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -250,8 +239,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_out_of_range_integer_returns_400(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -260,8 +248,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_to_unlimited(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -270,8 +257,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_exceeding_current_limit(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -280,8 +266,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_non_support_resource_returns_400(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -290,8 +275,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
@@ -306,8 +290,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_attributes(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
@@ -321,8 +304,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
@mock.patch.object(driver_nolock.DbQuotaNoLockDriver, 'get_resource_usage')
def test_update_quotas_check_limit(self, mock_get_resource_usage):
tenant_id = 'tenant_id1'
- env = {'neutron.context': context.Context('', tenant_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(tenant_id, as_admin=True)
quotas = {'quota': {'network': 100, 'check_limit': False}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@@ -338,8 +320,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_delete_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
# Create a quota to ensure we have something to delete
quotas = {'quota': {'network': 100}}
self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
@@ -350,16 +331,14 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_delete_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.delete(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quota_with_unknown_project_returns_404(self):
project_id = 'idnotexist'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.delete(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(exc.HTTPNotFound.code, res.status_int)
@@ -373,8 +352,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_quotas_limit_check(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=project_id,
fmt=self.fmt),
@@ -465,8 +443,7 @@ class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
def test_show_quotas_with_admin(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id + '2',
- is_admin=True)}
+ env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@@ -489,8 +466,7 @@ class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
def test_delete_quotas_forbidden(self):
project_id = 'project_id1'
- env = {'neutron.context': context.Context('', project_id,
- is_admin=False)}
+ env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.delete(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
diff --git a/neutron/tests/unit/extensions/test_securitygroup.py b/neutron/tests/unit/extensions/test_securitygroup.py
index d0ca88b81a..bbf752e4cb 100644
--- a/neutron/tests/unit/extensions/test_securitygroup.py
+++ b/neutron/tests/unit/extensions/test_securitygroup.py
@@ -92,41 +92,39 @@ class SecurityGroupTestExtensionManager(object):
class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
- def _build_security_group(self, name, description, **kwargs):
+ def _build_security_group(self, name, description):
data = {
'security_group': {
'name': name,
- 'tenant_id': kwargs.get(
- 'tenant_id', test_db_base_plugin_v2.TEST_TENANT_ID),
'description': description}}
return data
- def _create_security_group_response(self, fmt, data, **kwargs):
- security_group_req = self.new_create_request('security-groups', data,
- fmt)
- if (kwargs.get('set_context') and 'tenant_id' in kwargs):
- # create a specific auth context for this request
- security_group_req.environ['neutron.context'] = (
- context.Context('', kwargs['tenant_id']))
+ def _create_security_group_response(self, fmt, data, tenant_id=None,
+ as_admin=False, **kwargs):
+ security_group_req = self.new_create_request(
+ 'security-groups', data, fmt, tenant_id=tenant_id,
+ as_admin=as_admin)
return security_group_req.get_response(self.ext_api)
- def _create_security_group(self, fmt, name, description, **kwargs):
- data = self._build_security_group(name, description, **kwargs)
- return self._create_security_group_response(fmt, data, **kwargs)
+ def _create_security_group(self, fmt, name, description, tenant_id=None,
+ as_admin=False, **kwargs):
+ data = self._build_security_group(name, description)
+ return self._create_security_group_response(
+ fmt, data, tenant_id=tenant_id, as_admin=as_admin, **kwargs)
def _build_security_group_rule(
self, security_group_id, direction, proto,
port_range_min=None, port_range_max=None,
remote_ip_prefix=None, remote_group_id=None,
remote_address_group_id=None,
- tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID,
- ethertype=const.IPv4):
+ tenant_id=None,
+ ethertype=const.IPv4,
+ as_admin=False):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': proto,
- 'ethertype': ethertype,
- 'tenant_id': tenant_id}}
+ 'ethertype': ethertype}}
if port_range_min:
data['security_group_rule']['port_range_min'] = port_range_min
@@ -145,19 +143,13 @@ class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
return data
- def _create_security_group_rule(self, fmt, rules, **kwargs):
+ def _create_security_group_rule(self, fmt, rules, tenant_id=None,
+ as_admin=False, **kwargs):
security_group_rule_req = self.new_create_request(
- 'security-group-rules', rules, fmt)
-
- if (kwargs.get('set_context') and 'tenant_id' in kwargs):
- # create a specific auth context for this request
- security_group_rule_req.environ['neutron.context'] = (
- context.Context('', kwargs['tenant_id']))
- elif kwargs.get('admin_context'):
- security_group_rule_req.environ['neutron.context'] = (
- context.Context(user_id='admin', tenant_id='admin-tenant',
- is_admin=True))
+ 'security-group-rules', rules, fmt, tenant_id=tenant_id,
+ as_admin=as_admin)
+
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, **kwargs):
@@ -166,8 +158,10 @@ class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
- def _make_security_group_rule(self, fmt, rules, **kwargs):
- res = self._create_security_group_rule(self.fmt, rules)
+ def _make_security_group_rule(self, fmt, rules, tenant_id=None,
+ as_admin=False, **kwargs):
+ res = self._create_security_group_rule(
+ self.fmt, rules, tenant_id=tenant_id, as_admin=as_admin)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@@ -819,9 +813,10 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
sg['security_group']['id'], "ingress", const.PROTO_NAME_TCP,
port_range_min=22, port_range_max=22,
remote_ip_prefix="10.0.2.0/24",
- ethertype=const.IPv4,
- tenant_id='admin-tenant')
- self._make_security_group_rule(self.fmt, rule, admin_context=True)
+ ethertype=const.IPv4)
+ self._make_security_group_rule(self.fmt, rule,
+ tenant_id='admin-tenant',
+ as_admin=True)
# Now, let's make sure all the rules are there, with their odd
# tenant_id behavior.
@@ -878,23 +873,20 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
- webob.exc.HTTPNoContent.code)
+ webob.exc.HTTPNoContent.code, as_admin=True)
def test_delete_default_security_group_nonadmin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
- neutron_context = context.Context(
- '', test_db_base_plugin_v2.TEST_TENANT_ID)
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPConflict.code,
- neutron_context=neutron_context)
+ tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID)
def test_security_group_list_creates_default_security_group(self):
- neutron_context = context.Context(
- '', test_db_base_plugin_v2.TEST_TENANT_ID)
sg = self._list('security-groups',
- neutron_context=neutron_context).get('security_groups')
+ tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID).get(
+ 'security_groups')
self.assertEqual(1, len(sg))
def test_security_group_port_create_creates_default_security_group(self):
@@ -2112,13 +2104,15 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP)
- rule['security_group_rule'].update({'id': specified_id,
- 'port_range_min': None,
- 'port_range_max': None,
- 'remote_ip_prefix': None,
- 'remote_group_id': None,
- 'remote_address_group_id':
- None})
+ rule['security_group_rule'].update({
+ 'id': specified_id,
+ 'port_range_min': None,
+ 'port_range_max': None,
+ 'remote_ip_prefix': None,
+ 'remote_group_id': None,
+ 'tenant_id': test_db_base_plugin_v2.TEST_TENANT_ID,
+ 'remote_address_group_id':
+ None})
result = self.plugin.create_security_group_rule(
neutron_context, rule)
self.assertEqual(specified_id, result['id'])
diff --git a/neutron/tests/unit/extensions/test_segment.py b/neutron/tests/unit/extensions/test_segment.py
index 049205497f..1a3e33e8cf 100644
--- a/neutron/tests/unit/extensions/test_segment.py
+++ b/neutron/tests/unit/extensions/test_segment.py
@@ -36,6 +36,7 @@ from oslo_config import cfg
from oslo_utils import uuidutils
import webob.exc
+from neutron.common import config
from neutron.conf import experimental as c_experimental
from neutron.conf.plugins.ml2 import config as ml2_config
from neutron.conf.plugins.ml2.drivers import driver_type
@@ -81,6 +82,7 @@ class SegmentTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
VLAN_MAX = 209
def setUp(self, plugin=None):
+ config.register_common_config_options()
# Remove MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
@@ -112,7 +114,7 @@ class SegmentTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
segment['segment'][k] = None if v is None else str(v)
segment_req = self.new_create_request(
- 'segments', segment, fmt)
+ 'segments', segment, fmt, as_admin=True)
segment_res = segment_req.get_response(self.ext_api)
if expected_res_status:
@@ -187,7 +189,8 @@ class TestSegmentNameDescription(SegmentTestCase):
result = self._update('segments',
segment['segment']['id'],
{'segment': {'name': 'Segment name'}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual('Segment name', result['segment']['name'])
def test_update_segment_set_description(self):
@@ -195,7 +198,8 @@ class TestSegmentNameDescription(SegmentTestCase):
result = self._update('segments',
segment['segment']['id'],
{'segment': {'description': 'Segment desc'}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertEqual('Segment desc', result['segment']['description'])
def test_update_segment_set_name_to_none(self):
@@ -204,7 +208,8 @@ class TestSegmentNameDescription(SegmentTestCase):
result = self._update('segments',
segment['segment']['id'],
{'segment': {'name': None}},
- expected_code=webob.exc.HTTPOk.code)
+ expected_code=webob.exc.HTTPOk.code,
+ as_admin=True)
self.assertIsNone(result['segment']['name'])
def test_update_segment_set_description_to_none(self):
@@ -271,7 +276,8 @@ class TestSegment(SegmentTestCase):
with self.network() as network:
network = network['network']
- local_segment = self._list('segments')['segments'][0]
+ local_segment = self._list('segments',
+ as_admin=True)['segments'][0]
with mock.patch.object(registry, 'publish') as publish:
publish.side_effect = exceptions.CallbackFailure(errors=Exception)
self.assertRaises(webob.exc.HTTPClientError,
@@ -310,7 +316,7 @@ class TestSegment(SegmentTestCase):
physical_network='physnet0')
segment = self.segment(network_id=network['id'], segmentation_id=201,
physical_network='physnet1')
- self._delete('segments', segment['segment']['id'])
+ self._delete('segments', segment['segment']['id'], as_admin=True)
self._show('segments', segment['segment']['id'],
expected_code=webob.exc.HTTPNotFound.code)
@@ -324,8 +330,10 @@ class TestSegment(SegmentTestCase):
segment_id = segment['segment']['id']
with self.subnet(network=network, segment_id=segment_id):
self._delete('segments', segment_id,
- expected_code=webob.exc.HTTPConflict.code)
- exist_segment = self._show('segments', segment_id)
+ expected_code=webob.exc.HTTPConflict.code,
+ as_admin=True)
+ exist_segment = self._show('segments', segment_id,
+ as_admin=True)
self.assertEqual(segment_id, exist_segment['segment']['id'])
def test_get_segment(self):
@@ -334,7 +342,8 @@ class TestSegment(SegmentTestCase):
segment = self._test_create_segment(network_id=network['id'],
physical_network='physnet',
segmentation_id=200)
- req = self.new_show_request('segments', segment['segment']['id'])
+ req = self.new_show_request('segments', segment['segment']['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(segment['segment']['id'], res['segment']['id'])
@@ -347,14 +356,15 @@ class TestSegment(SegmentTestCase):
self._test_create_segment(network_id=network['id'],
physical_network='physnet2',
segmentation_id=201)
- res = self._list('segments')
+ res = self._list('segments', as_admin=True)
self.assertEqual(3, len(res['segments']))
def test_list_segments_with_sort(self):
with self.network() as network:
network = network['network']
- local_segment = {'segment': self._list('segments')['segments'][0]}
+ local_segment = {'segment': self._list('segments',
+ as_admin=True)['segments'][0]}
s1 = self._test_create_segment(network_id=network['id'],
physical_network='physnet1',
segmentation_id=200)
@@ -364,13 +374,15 @@ class TestSegment(SegmentTestCase):
self._test_list_with_sort('segment',
(s2, s1, local_segment),
[('physical_network', 'desc')],
- query_params='network_id=%s' % network['id'])
+ query_params='network_id=%s' % network['id'],
+ as_admin=True)
def test_list_segments_with_pagination(self):
with self.network() as network:
network = network['network']
- local_segment = {'segment': self._list('segments')['segments'][0]}
+ local_segment = {'segment': self._list('segments',
+ as_admin=True)['segments'][0]}
s1 = self._test_create_segment(network_id=network['id'],
physical_network='physnet0',
segmentation_id=200)
@@ -384,7 +396,8 @@ class TestSegment(SegmentTestCase):
'segment',
(local_segment, s1, s2, s3),
('physical_network', 'asc'), 3, 2,
- query_params='network_id=%s' % network['id'])
+ query_params='network_id=%s' % network['id'],
+ as_admin=True)
def test_list_segments_with_pagination_reverse(self):
with self.network() as network:
@@ -403,7 +416,8 @@ class TestSegment(SegmentTestCase):
'segment',
(s1, s2, s3),
('physical_network', 'asc'), 2, 2,
- query_params='network_id=%s' % network['id'])
+ query_params='network_id=%s' % network['id'],
+ as_admin=True)
def test_update_segments(self):
with self.network() as network:
@@ -454,7 +468,7 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
with self.subnet(network=network, segment_id=segment_id) as subnet:
subnet = subnet['subnet']
- request = self.new_show_request('subnets', subnet['id'])
+ request = self.new_show_request('subnets', subnet['id'], as_admin=True)
response = request.get_response(self.api)
res = self.deserialize(self.fmt, response)
self.assertEqual(segment_id,
@@ -554,12 +568,14 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
with self.network() as network:
pass
- segment_id = self._list('segments')['segments'][0]['id']
+ segment_id = self._list('segments',
+ as_admin=True)['segments'][0]['id']
with self.subnet(network=network, segment_id=None) as subnet:
subnet = subnet['subnet']
data = {'subnet': {'segment_id': segment_id}}
- request = self.new_update_request('subnets', data, subnet['id'])
+ request = self.new_update_request('subnets', data, subnet['id'],
+ as_admin=True)
response = request.get_response(self.api)
res = self.deserialize(self.fmt, response)
@@ -580,7 +596,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
subnet = subnet['subnet']
data = {'subnet': {'segment_id': segment1['id']}}
- request = self.new_update_request('subnets', data, subnet['id'])
+ request = self.new_update_request('subnets', data, subnet['id'],
+ as_admin=True)
response = request.get_response(self.api)
res = self.deserialize(self.fmt, response)
@@ -602,7 +619,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
subnet = subnet['subnet']
data = {'subnet': {'segment_id': segment1['id']}}
- request = self.new_update_request('subnets', data, subnet['id'])
+ request = self.new_update_request('subnets', data, subnet['id'],
+ as_admin=True)
response = request.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)
@@ -625,7 +643,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
subnet2 = subnet2['subnet']
data = {'subnet': {'segment_id': segment1['id']}}
- request = self.new_update_request('subnets', data, subnet1['id'])
+ request = self.new_update_request('subnets', data, subnet1['id'],
+ as_admin=True)
response = request.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)
@@ -634,7 +653,7 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
with self.network() as network:
net = network['network']
- segment_id = self._list('segments')['segments'][0]['id']
+ segment_id = self._list('segments', as_admin=True)['segments'][0]['id']
with self.subnet(network=network, segment_id=segment_id) as subnet:
subnet = subnet['subnet']
@@ -643,7 +662,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
segmentation_id=202)['segment']
data = {'subnet': {'segment_id': segment2['id']}}
- request = self.new_update_request('subnets', data, subnet['id'])
+ request = self.new_update_request('subnets', data, subnet['id'],
+ as_admin=True)
response = request.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)
@@ -853,7 +873,7 @@ class TestMl2HostSegmentMappingOVS(HostSegmentMappingTestCase):
def test_segment_deletion_removes_host_mapping(self):
host = 'host1'
segment = self._test_one_segment_one_host(host)
- self._delete('segments', segment['id'])
+ self._delete('segments', segment['id'], as_admin=True)
segments_host_db = self._get_segments_for_host(host)
self.assertFalse(segments_host_db)
@@ -1019,7 +1039,8 @@ class SegmentAwareIpamTestCase(SegmentTestCase):
segment_id=segment['segment']['id'],
ip_version=ip_version,
cidr=cidr,
- allocation_pools=allocation_pools) as subnet:
+ allocation_pools=allocation_pools,
+ as_admin=True) as subnet:
self._validate_l2_adjacency(network['network']['id'],
is_adjacent=False)
return subnet
@@ -1096,6 +1117,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
fixed_ips=[
{'subnet_id': subnet['subnet']['id']}
])
@@ -1123,6 +1145,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@@ -1145,6 +1168,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@@ -1171,6 +1195,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
@@ -1186,6 +1211,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@@ -1199,6 +1225,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@@ -1218,6 +1245,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
self.deserialize(self.fmt, response)
@@ -1280,6 +1308,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
**kwargs)
port = self.deserialize(self.fmt, response)
request = self.new_show_request('ports', port['port']['id'])
@@ -1324,6 +1353,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
port = self.deserialize(self.fmt, response)
@@ -1360,6 +1390,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
port = self.deserialize(self.fmt, response)
@@ -1401,7 +1432,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
def _create_deferred_ip_port(self, network):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
- tenant_id=network['network']['tenant_id'])
+ tenant_id=network['network']['tenant_id'],
+ is_admin=True)
port = self.deserialize(self.fmt, response)
ips = port['port']['fixed_ips']
self.assertEqual(0, len(ips))
@@ -1421,7 +1453,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the only subnet is on a segment)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
# Port update succeeds and allocates a new IP address.
@@ -1439,7 +1472,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
with self.subnet(network=network):
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@@ -1455,7 +1489,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
- fixed_ips=[])
+ fixed_ips=[],
+ is_admin=True)
port = self.deserialize(self.fmt, response)
ips = port['port']['fixed_ips']
self.assertEqual(0, len(ips))
@@ -1463,7 +1498,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Create the subnet and try to update the port to get an IP
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@@ -1483,7 +1519,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
portbindings.HOST_ID: 'fakehost',
'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@@ -1508,7 +1545,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
portbindings.HOST_ID: 'fakehost',
'fixed_ips': []}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@@ -1526,7 +1564,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the only subnet is on a segment)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
res = self.deserialize(self.fmt, response)
@@ -1549,7 +1588,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the only subnet is on a segment)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
self.deserialize(self.fmt, response)
@@ -1597,7 +1637,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the subnet ran out of ips)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
res = self.deserialize(self.fmt, response)
@@ -1617,6 +1658,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])
@@ -1624,7 +1666,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Now, try to update binding to a host on the other segment
data = {'port': {portbindings.HOST_ID: 'fakehost2'}}
- port_req = self.new_update_request('ports', data, port['port']['id'])
+ port_req = self.new_update_request('ports', data, port['port']['id'],
+ as_admin=True)
response = port_req.get_response(self.api)
# It fails since the IP address isn't compatible with the new segment
@@ -1642,6 +1685,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])
@@ -1649,7 +1693,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Now, try to update binding to another host in same segment
data = {'port': {portbindings.HOST_ID: 'fakehost1'}}
- port_req = self.new_update_request('ports', data, port['port']['id'])
+ port_req = self.new_update_request('ports', data, port['port']['id'],
+ as_admin=True)
response = port_req.get_response(self.api)
# Since the new host is in the same segment, it succeeds.
@@ -1669,7 +1714,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
data = {'port': {portbindings.HOST_ID: 'fakehost',
port_apidef.PORT_MAC_ADDRESS: '00:00:00:00:00:01'}}
port_id = port['port']['id']
- port_req = self.new_update_request('ports', data, port_id)
+ port_req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
response = port_req.get_response(self.api)
# Port update succeeds and allocates a new IP address.
@@ -1720,6 +1766,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost_a'})
res = self.deserialize(self.fmt, response)
@@ -1847,7 +1894,8 @@ class TestSegmentAwareIpamML2(TestSegmentAwareIpam):
network, segment, subnet = self._create_test_segment_with_subnet()
self.assertTrue(self.VLAN_MIN <=
segment['segment']['segmentation_id'] <= self.VLAN_MAX)
- retrieved_segment = self._show('segments', segment['segment']['id'])
+ retrieved_segment = self._show('segments', segment['segment']['id'],
+ as_admin=True)
self.assertEqual(segment['segment']['segmentation_id'],
retrieved_segment['segment']['segmentation_id'])
@@ -1973,7 +2021,8 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
def test_update_subnet_association_with_segment(self, cidr='10.0.0.0/24',
allocation_pools=None):
with self.network() as network:
- segment_id = self._list('segments')['segments'][0]['id']
+ segment_id = self._list('segments',
+ as_admin=True)['segments'][0]['id']
network_id = network['network']['id']
self._setup_host_mappings([(segment_id, 'fakehost')])
@@ -1991,9 +2040,11 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
segment_id=None) as subnet:
self._validate_l2_adjacency(network_id, is_adjacent=True)
data = {'subnet': {'segment_id': segment_id}}
- self.new_update_request('subnets', data, subnet['subnet']['id'])
+ self.new_update_request('subnets', data, subnet['subnet']['id'],
+ as_admin=True)
self.new_update_request(
- 'subnets', data, subnet['subnet']['id']).get_response(self.api)
+ 'subnets', data, subnet['subnet']['id'],
+ as_admin=True).get_response(self.api)
self._validate_l2_adjacency(network_id, is_adjacent=False)
self._assert_inventory_creation(segment_id, aggregate, subnet)
@@ -2285,7 +2336,8 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
def _create_test_port(self, network_id, tenant_id, subnet, **kwargs):
port = self._make_port(self.fmt, network_id, tenant_id=tenant_id,
- arg_list=(portbindings.HOST_ID,), **kwargs)
+ as_admin=True, arg_list=(portbindings.HOST_ID,),
+ **kwargs)
self.batch_notifier._notify()
return port
@@ -2401,7 +2453,7 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
if compute_owned:
port_data['port']['device_owner'] = (
constants.DEVICE_OWNER_COMPUTE_PREFIX)
- self._update('ports', port['port']['id'], port_data)
+ self._update('ports', port['port']['id'], port_data, as_admin=True)
self.batch_notifier._notify()
self._assert_inventory_update_port(
first_subnet['subnet']['segment_id'], original_inventory,
diff --git a/neutron/tests/unit/extensions/test_servicetype.py b/neutron/tests/unit/extensions/test_servicetype.py
index 7552b8a86a..5af9c14ac3 100644
--- a/neutron/tests/unit/extensions/test_servicetype.py
+++ b/neutron/tests/unit/extensions/test_servicetype.py
@@ -25,6 +25,7 @@ import webob.exc as webexc
import webtest
from neutron.api import extensions
+from neutron.common import config
from neutron.db import servicetype_db as st_db
from neutron.extensions import servicetype
from neutron.objects import servicetype as servicetype_obj
@@ -48,6 +49,7 @@ class ServiceTypeManagerTestCase(testlib_api.SqlTestCase):
provconf.NeutronModule, 'service_providers').start()
super(ServiceTypeManagerTestCase, self).setUp()
self.ctx = context.get_admin_context()
+ config.register_common_config_options()
self.setup_coreplugin(PLUGIN_NAME)
def _set_override(self, service_providers):
@@ -192,6 +194,7 @@ class ServiceTypeExtensionTestCaseBase(testlib_api.WebTestCase):
def setUp(self):
# This is needed because otherwise a failure will occur due to
# nonexisting core_plugin
+ config.register_common_config_options()
self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS)
cfg.CONF.set_override('service_plugins',
@@ -248,7 +251,8 @@ class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase):
super(ServiceTypeManagerExtTestCase, self).setUp()
def _list_service_providers(self):
- return self.api.get(_get_path('service-providers', fmt=self.fmt))
+ return self.api.get(_get_path('service-providers', fmt=self.fmt),
+ extra_environ=test_base._get_neutron_env())
def test_list_service_providers(self):
res = self._list_service_providers()
diff --git a/neutron/tests/unit/extensions/test_subnet_onboard.py b/neutron/tests/unit/extensions/test_subnet_onboard.py
index bc81600349..d0471ee57e 100644
--- a/neutron/tests/unit/extensions/test_subnet_onboard.py
+++ b/neutron/tests/unit/extensions/test_subnet_onboard.py
@@ -37,7 +37,7 @@ class SubnetOnboardTestsBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
- tenant_id = _uuid()
+ tenant_id = self._tenant_id
scope_data = {'tenant_id': tenant_id, 'ip_version': ip_version,
'shared': shared, 'name': name + '-scope'}
@@ -53,7 +53,7 @@ class SubnetOnboardTestsBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
- tenant_id = _uuid()
+ tenant_id = self._tenant_id
pool_data = {'tenant_id': tenant_id, 'shared': shared, 'name': name,
'address_scope_id': address_scope_id,
'prefixes': prefixes, 'is_default': is_default_pool}
diff --git a/neutron/tests/unit/extensions/test_subnet_service_types.py b/neutron/tests/unit/extensions/test_subnet_service_types.py
index a086ebef74..2d2e5c0c28 100644
--- a/neutron/tests/unit/extensions/test_subnet_service_types.py
+++ b/neutron/tests/unit/extensions/test_subnet_service_types.py
@@ -344,13 +344,14 @@ class SubnetServiceTypesExtensionTestCase(
tenant_id=network['tenant_id'],
device_owner=service_type,
arg_list=(portbindings.HOST_ID,),
- **{portbindings.HOST_ID: 'fakehost'})
+ **{portbindings.HOST_ID: 'fakehost'},
+ is_admin=True)
port = self.deserialize('json', port)['port']
# Update the port's host binding.
data = {'port': {portbindings.HOST_ID: 'fakehost2'}}
# self._update will fail with a MismatchError if the update cannot be
# applied
- port = self._update('ports', port['id'], data)
+ port = self._update('ports', port['id'], data, as_admin=True)
class SubnetServiceTypesExtensionTestCasev6(
diff --git a/neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py b/neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py
index 84e38073f5..01ef193be4 100644
--- a/neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py
+++ b/neutron/tests/unit/extensions/test_subnetpool_prefix_ops.py
@@ -36,7 +36,7 @@ class SubnetpoolPrefixOpsTestBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
- tenant_id = _uuid()
+ tenant_id = self._tenant_id
scope_data = {'tenant_id': tenant_id, 'ip_version': ip_version,
'shared': shared, 'name': name + '-scope'}
@@ -52,7 +52,7 @@ class SubnetpoolPrefixOpsTestBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
- tenant_id = _uuid()
+ tenant_id = self._tenant_id
pool_data = {'tenant_id': tenant_id, 'shared': shared, 'name': name,
'address_scope_id': address_scope_id,
'prefixes': prefixes, 'is_default': is_default_pool}
diff --git a/neutron/tests/unit/fake_resources.py b/neutron/tests/unit/fake_resources.py
index 2e2737fe63..ca745f6b64 100644
--- a/neutron/tests/unit/fake_resources.py
+++ b/neutron/tests/unit/fake_resources.py
@@ -163,6 +163,7 @@ class FakeOvsdbNbOvnIdl(object):
self.ha_chassis_group_del = mock.Mock()
self.ha_chassis_group_add_chassis = mock.Mock()
self.ha_chassis_group_del_chassis = mock.Mock()
+ self.get_lrouter_gw_ports = mock.Mock()
class FakeOvsdbSbOvnIdl(object):
@@ -321,6 +322,7 @@ class FakeNetwork(object):
'availability_zone_hints': [],
'is_default': False,
'standard_attr_id': 1,
+ 'mtu': 1500,
}
# Overwrite default attributes.
diff --git a/neutron/tests/unit/objects/test_router.py b/neutron/tests/unit/objects/test_router.py
index 5c86aec2fc..f1886db0c8 100644
--- a/neutron/tests/unit/objects/test_router.py
+++ b/neutron/tests/unit/objects/test_router.py
@@ -17,6 +17,7 @@ from unittest import mock
import netaddr
+from neutron_lib import constants
from neutron_lib.db import api as db_api
from oslo_utils import uuidutils
@@ -206,6 +207,36 @@ class RouterPortDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
{'router_id': lambda: self._create_test_router_id(),
'port_id': lambda: self._create_test_port_id()})
+ def test_get_gw_port_ids_by_router_id(self):
+ router_id = self._create_test_router_id()
+ router_gws = [
+ self._make_object({
+ 'router_id': router_id,
+ 'port_id': self._create_test_port_id(
+ device_owner=constants.DEVICE_OWNER_ROUTER_GW),
+ 'port_type': constants.DEVICE_OWNER_ROUTER_GW}),
+ self._make_object({
+ 'router_id': router_id,
+ 'port_id': self._create_test_port_id(
+ device_owner=constants.DEVICE_OWNER_ROUTER_GW),
+ 'port_type': constants.DEVICE_OWNER_ROUTER_GW,
+ })
+ ]
+ for gw in router_gws:
+ gw.create()
+
+ other = self._make_object({
+ 'router_id': router_id,
+ 'port_id': self._create_test_port_id(
+ device_owner=constants.DEVICE_OWNER_ROUTER_INTF),
+ 'port_type': constants.DEVICE_OWNER_ROUTER_INTF,
+ })
+ other.create()
+
+ res_gws = self._test_class.get_gw_port_ids_by_router_id(self.context,
+ router_id)
+ self.assertCountEqual(res_gws, [rp.port_id for rp in router_gws])
+
class DVRMacAddressIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase):
diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py
index ef694a059e..b1be2da6f9 100644
--- a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py
+++ b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py
@@ -78,6 +78,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '1'}
self._network = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
@@ -86,6 +87,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '2'}
self._network2 = self._make_network(self.fmt, 'net2', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
@@ -94,6 +96,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
net_arg = {pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'noagent'}
self._network3 = self._make_network(self.fmt, 'net3', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,),
**net_arg)
@@ -299,6 +302,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -329,6 +333,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -357,6 +362,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network,
enable_dhcp=False) as snet:
with self.port(
+ is_admin=True,
subnet=snet,
project_id=self.tenant,
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)\
@@ -365,8 +371,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
plugin.update_distributed_port_binding(self.adminContext,
port_id, {'port': {portbindings.HOST_ID: HOST_4,
'device_id': router['id']}})
- port = self._show('ports', port_id,
- neutron_context=self.adminContext)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.callbacks.update_device_up(self.adminContext,
@@ -388,6 +393,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -423,6 +429,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -478,10 +485,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
@@ -512,9 +521,11 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_3'}
with self.port(subnet=subnet,
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
@@ -535,10 +546,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network2) as subnet:
host_arg = {portbindings.HOST_ID: host}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
@@ -569,11 +582,13 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
@@ -610,12 +625,14 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST,
'admin_state_up': True}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2',
'admin_state_up': True}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,
'admin_state_up',),
@@ -669,16 +686,19 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.subnet(cidr='10.1.0.0/24') as subnet2:
with self.port(subnet=subnet2,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
@@ -742,6 +762,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
ipv6_address_mode=constants.IPV6_SLAAC) as subnet2:
with self.port(
subnet,
+ is_admin=True,
fixed_ips=[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}],
device_owner=DEVICE_OWNER_COMPUTE,
@@ -783,10 +804,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST}
# 2 ports on host 1
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@@ -794,6 +817,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
# agent on host 1
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
@@ -833,10 +857,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@@ -877,10 +903,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@@ -919,6 +947,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -954,6 +983,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@@ -966,6 +996,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
device=device)
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@@ -995,10 +1026,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@@ -1029,6 +1062,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_5'}
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -1043,7 +1077,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
new_mac = ':'.join(mac)
data = {'port': {'mac_address': new_mac,
portbindings.HOST_ID: HOST}}
- req = self.new_update_request('ports', data, p1['id'])
+ req = self.new_update_request('ports', data, p1['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIn('port', res)
self.assertEqual(new_mac, res['port']['mac_address'])
@@ -1080,6 +1115,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
with self.port(subnet=subnet, cidr='10.0.0.0/24',
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
fixed_ips=fixed_ips,
@@ -1094,7 +1130,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.10'}]}}
- self.new_update_request('ports', data, p['id'])
+ self.new_update_request('ports', data, p['id'],
+ as_admin=True)
l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
l2pop_mech.L2PopulationAgentNotify = mock.Mock()
l2notify = l2pop_mech.L2PopulationAgentNotify
@@ -1109,6 +1146,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
with self.port(subnet=subnet, cidr='10.0.0.0/24',
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
fixed_ips=fixed_ips,
@@ -1125,7 +1163,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.10'}]}}
- req = self.new_update_request('ports', data, p1['id'])
+ req = self.new_update_request('ports', data, p1['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
@@ -1143,7 +1182,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.16'}]}}
- req = self.new_update_request('ports', data, p1['id'])
+ req = self.new_update_request('ports', data, p1['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
@@ -1162,7 +1202,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}}
- req = self.new_update_request('ports', data, p1['id'])
+ req = self.new_update_request('ports', data, p1['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(1, len(ips))
@@ -1182,6 +1223,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -1204,6 +1246,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
self._register_ml2_agents()
host_arg = {portbindings.HOST_ID: HOST}
with self.port(arg_list=(portbindings.HOST_ID,),
+ is_admin=True,
**host_arg) as port:
port_id = port['port']['id']
# ensure various formats all result in correct port_id
@@ -1217,7 +1260,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
def _update_and_check_portbinding(self, port_id, host_id):
data = {'port': {portbindings.HOST_ID: host_id}}
- req = self.new_update_request('ports', data, port_id)
+ req = self.new_update_request('ports', data, port_id,
+ as_admin=True)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(host_id, res['port'][portbindings.HOST_ID])
@@ -1227,6 +1271,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@@ -1326,6 +1371,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True}
with self.port(subnet=snet,
+ is_admin=True,
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
arg_list=(portbindings.HOST_ID,),
**host_arg) as p:
diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py
index 39bf2a2334..864099f3b8 100644
--- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py
+++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_commands.py
@@ -1214,6 +1214,7 @@ class TestDeleteLRouterExtGwCommand(TestBaseCommand):
fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={'static_routes': [fake_route_1, fake_route_2],
'nat': []})
+ self.ovn_api.get_lrouter_gw_ports.return_value = []
with mock.patch.object(self.ovn_api, "is_col_present",
return_value=True):
with mock.patch.object(idlutils, 'row_by_value',
@@ -1234,6 +1235,7 @@ class TestDeleteLRouterExtGwCommand(TestBaseCommand):
fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row(
attrs={'nat': [fake_nat_1, fake_nat_2],
'static_routes': []})
+ self.ovn_api.get_lrouter_gw_ports.return_value = []
with mock.patch.object(self.ovn_api, "is_col_present",
return_value=True):
with mock.patch.object(idlutils, 'row_by_value',
@@ -1246,10 +1248,11 @@ class TestDeleteLRouterExtGwCommand(TestBaseCommand):
def test_delete_lrouter_extgw_ports(self):
port_id = 'fake-port-id'
+ fake_lrp = fakes.FakeOvsdbRow.create_one_ovsdb_row(
+ attrs={'gateway_chassis': ['fake_gwc']})
fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row(
- attrs={'external_ids':
- {ovn_const.OVN_GW_PORT_EXT_ID_KEY: port_id},
- 'static_routes': [], 'nat': []})
+ attrs={'ports': [fake_lrp], 'static_routes': [], 'nat': []})
+ self.ovn_api.get_lrouter_gw_ports.return_value = [fake_lrp]
with mock.patch.object(self.ovn_api, "is_col_present",
return_value=True):
with mock.patch.object(idlutils, 'row_by_value',
@@ -1258,22 +1261,21 @@ class TestDeleteLRouterExtGwCommand(TestBaseCommand):
self.ovn_api, fake_lrouter.name, False)
cmd.run_idl(self.transaction)
fake_lrouter.delvalue.assert_called_once_with(
- 'ports', port_id)
+ 'ports', fake_lrp)
def test_delete_lrouter_extgw_ports_not_found(self):
- port_id = 'fake-port-id'
fake_lrouter = fakes.FakeOvsdbRow.create_one_ovsdb_row(
- attrs={'external_ids':
- {ovn_const.OVN_GW_PORT_EXT_ID_KEY: port_id},
- 'static_routes': [], 'nat': []})
+ attrs={'static_routes': [], 'nat': []})
+ self.ovn_api.get_lrouter_gw_ports.return_value = []
with mock.patch.object(self.ovn_api, "is_col_present",
return_value=True):
with mock.patch.object(idlutils, 'row_by_value',
- side_effect=[fake_lrouter,
- idlutils.RowNotFound]):
+ side_effect=[fake_lrouter]):
cmd = commands.DeleteLRouterExtGwCommand(
self.ovn_api, fake_lrouter.name, False)
cmd.run_idl(self.transaction)
+ self.ovn_api.get_lrouter_gw_ports.assert_called_once_with(
+ fake_lrouter.name)
fake_lrouter.delvalue.assert_not_called()
def _test_delete_lrouter_no_lrouter_exist(self, if_exists=True):
diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py
index 209a41bf98..2854672389 100644
--- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py
+++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_impl_idl_ovn.py
@@ -161,7 +161,10 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
'lr-name-d'}},
{'name': utils.ovn_name('lr-id-e'),
'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:
- 'lr-name-e'}}],
+ 'lr-name-e'}},
+ {'name': utils.ovn_name('lr-id-f'),
+ 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:
+ 'lr-name-f'}}],
'lrouter_ports': [
{'name': utils.ovn_lrouter_port_name('orp-id-a1'),
'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:
@@ -169,10 +172,14 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
'networks': ['10.0.1.0/24'],
'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-1'}},
{'name': utils.ovn_lrouter_port_name('orp-id-a2'),
- 'external_ids': {}, 'networks': ['10.0.2.0/24'],
+ 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:
+ 'lr-id-a'},
+ 'networks': ['10.0.2.0/24'],
'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-1'}},
{'name': utils.ovn_lrouter_port_name('orp-id-a3'),
- 'external_ids': {}, 'networks': ['10.0.3.0/24'],
+ 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:
+ 'lr-id-a'},
+ 'networks': ['10.0.3.0/24'],
'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY:
ovn_const.OVN_GATEWAY_INVALID_CHASSIS}},
{'name': 'xrp-id-b1',
@@ -182,7 +189,15 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
'options': {ovn_const.OVN_GATEWAY_CHASSIS_KEY: 'host-2'}},
{'name': utils.ovn_lrouter_port_name('orp-id-b3'),
'external_ids': {}, 'networks': ['20.0.3.0/24'],
+ 'options': {}},
+ {'name': utils.ovn_lrouter_port_name('gwc'),
+ 'external_ids': {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:
+ 'lr-id-f'},
+ 'networks': ['10.0.4.0/24'],
'options': {}}],
+ 'gateway_chassis': [
+ {'chassis_name': 'fake-chassis',
+ 'name': utils.ovn_lrouter_port_name('gwc') + '_fake-chassis'}],
'static_routes': [{'ip_prefix': '20.0.0.0/16',
'nexthop': '10.0.3.253'},
{'ip_prefix': '10.0.0.0/16',
@@ -317,7 +332,12 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
utils.ovn_lrouter_port_name('orp-id-a3')],
utils.ovn_name('lr-id-b'): [
'xrp-id-b1',
- utils.ovn_lrouter_port_name('orp-id-b2')]},
+ utils.ovn_lrouter_port_name('orp-id-b2')],
+ utils.ovn_name('lr-id-f'): [
+ utils.ovn_lrouter_port_name('gwc')]},
+ 'lrptogwc': {
+ utils.ovn_lrouter_port_name('gwc'): [
+ utils.ovn_lrouter_port_name('gwc') + '_fake-chassis']},
'lrtosroute': {
utils.ovn_name('lr-id-a'): ['20.0.0.0/16'],
utils.ovn_name('lr-id-b'): ['10.0.0.0/16']
@@ -346,6 +366,7 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
self.dhcp_table = fakes.FakeOvsdbTable.create_one_ovsdb_table()
self.address_set_table = fakes.FakeOvsdbTable.create_one_ovsdb_table()
self.lb_table = fakes.FakeOvsdbTable.create_one_ovsdb_table()
+ self.gwc_table = fakes.FakeOvsdbTable.create_one_ovsdb_table()
self._tables = {}
self._tables['Logical_Switch'] = self.lswitch_table
@@ -358,6 +379,7 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
self._tables['Address_Set'] = self.address_set_table
self._tables['Load_Balancer'] = self.lb_table
self._tables['NAT'] = self.nat_table
+ self._tables['Gateway_Chassis'] = self.gwc_table
with mock.patch.object(impl_idl_ovn.OvsdbNbOvnIdl, 'from_worker',
return_value=mock.Mock()):
@@ -379,16 +401,23 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
TestNBImplIdlOvn.fake_associations['lstolsp'],
self.lswitch_table, self.lsp_table,
'name', 'name', 'ports')
- # Load Routers and Router Ports
+ # Load Routers, Router Ports and Gateway Chassis
fake_lrouters = TestNBImplIdlOvn.fake_set['lrouters']
self._load_ovsdb_fake_rows(self.lrouter_table, fake_lrouters)
fake_lrps = TestNBImplIdlOvn.fake_set['lrouter_ports']
self._load_ovsdb_fake_rows(self.lrp_table, fake_lrps)
+ fake_gwc = TestNBImplIdlOvn.fake_set['gateway_chassis']
+ self._load_ovsdb_fake_rows(self.gwc_table, fake_gwc)
# Associate routers and router ports
self._construct_ovsdb_references(
TestNBImplIdlOvn.fake_associations['lrtolrp'],
self.lrouter_table, self.lrp_table,
'name', 'name', 'ports')
+ # Associate router ports and gateway chassis
+ self._construct_ovsdb_references(
+ TestNBImplIdlOvn.fake_associations['lrptogwc'],
+ self.lrp_table, self.gwc_table,
+ 'name', 'name', 'gateway_chassis')
# Load static routes
fake_sroutes = TestNBImplIdlOvn.fake_set['static_routes']
self._load_ovsdb_fake_rows(self.sroute_table, fake_sroutes)
@@ -484,6 +513,9 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
{'name': 'lr-id-d', 'ports': {}, 'static_routes': [],
'snats': [], 'dnat_and_snats': []},
{'name': 'lr-id-e', 'ports': {}, 'static_routes': [],
+ 'snats': [], 'dnat_and_snats': []},
+ {'name': 'lr-id-f', 'static_routes': [],
+ 'ports': {'gwc': ['10.0.4.0/24']},
'snats': [], 'dnat_and_snats': []}]
self.assertCountEqual(mapping, expected)
@@ -556,6 +588,11 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
def test_get_all_chassis_gateway_bindings(self):
self._load_nb_db()
+
+ # NOTE(fnordahl): The `Gateway_Chassis` table being present without
+ # proper associations fools the test, remove for now.
+ del(self._tables['Gateway_Chassis'])
+
bindings = self.nb_ovn_idl.get_all_chassis_gateway_bindings()
expected = {'host-1': [utils.ovn_lrouter_port_name('orp-id-a1'),
utils.ovn_lrouter_port_name('orp-id-a2')],
@@ -574,6 +611,11 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
def test_get_gateway_chassis_binding(self):
self._load_nb_db()
+
+ # NOTE(fnordahl): The `Gateway_Chassis` table being present without
+ # proper associations fools the test, remove for now.
+ del(self._tables['Gateway_Chassis'])
+
chassis = self.nb_ovn_idl.get_gateway_chassis_binding(
utils.ovn_lrouter_port_name('orp-id-a1'))
self.assertEqual(chassis, ['host-1'])
@@ -591,6 +633,11 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
def test_get_unhosted_gateways(self):
self._load_nb_db()
+
+ # NOTE(fnordahl): The `Gateway_Chassis` table being present without
+ # proper associations fools the test, remove for now.
+ del(self._tables['Gateway_Chassis'])
+
# Port physnet-dict
port_physnet_dict = {
'orp-id-a1': 'physnet1', # scheduled
@@ -626,6 +673,11 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
def test_get_unhosted_gateways_deleted_physnet(self):
self._load_nb_db()
+
+ # NOTE(fnordahl): The `Gateway_Chassis` table being present without
+ # proper associations fools the test, remove for now.
+ del(self._tables['Gateway_Chassis'])
+
# The LRP is on host-2 now
router_row = self._find_ovsdb_fake_row(self.lrp_table,
'name', 'lrp-orp-id-a1')
@@ -813,6 +865,29 @@ class TestNBImplIdlOvn(TestDBImplIdlOvn):
lb = self.nb_ovn_idl.get_floatingip_in_nat_or_lb(fip_id)
self.assertEqual(lb['_uuid'], lb_row.uuid)
+ def test_get_lrouter_gw_ports_legacy_option(self):
+ self._load_nb_db()
+
+ gw1_row = self._find_ovsdb_fake_row(
+ self.lrp_table, 'name', utils.ovn_lrouter_port_name('orp-id-a1'))
+ gw2_row = self._find_ovsdb_fake_row(
+ self.lrp_table, 'name', utils.ovn_lrouter_port_name('orp-id-a2'))
+ gw3_row = self._find_ovsdb_fake_row(
+ self.lrp_table, 'name', utils.ovn_lrouter_port_name('orp-id-a3'))
+
+ gw_ports = self.nb_ovn_idl.get_lrouter_gw_ports(
+ utils.ovn_name('lr-id-a'))
+ self.assertEqual([gw1_row, gw2_row, gw3_row], gw_ports)
+
+ def test_get_lrouter_gw_ports_gwc(self):
+ self._load_nb_db()
+ gw1_row = self._find_ovsdb_fake_row(
+ self.lrp_table, 'name', utils.ovn_lrouter_port_name('gwc'))
+
+ gw_ports = self.nb_ovn_idl.get_lrouter_gw_ports(
+ utils.ovn_name('lr-id-f'))
+ self.assertEqual([gw1_row], gw_ports)
+
class TestSBImplIdlOvnBase(TestDBImplIdlOvn):
diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py
index b22e0a5470..e05f324deb 100644
--- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py
+++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py
@@ -586,6 +586,102 @@ class TestDBInconsistenciesPeriodics(testlib_api.SqlTestCaseLight,
nb_idl.lsp_set_options.assert_has_calls(expected_calls)
+ def test_check_localnet_port_has_learn_fdb(self):
+ cfg.CONF.set_override('localnet_learn_fdb', 'True',
+ group='ovn')
+ nb_idl = self.fake_ovn_client._nb_idl
+
+ # Already has the learn fdb option enabled
+ lsp0 = fakes.FakeOvsdbRow.create_one_ovsdb_row(
+ attrs={
+ "name": "lsp0",
+ "options": {
+ constants.LSP_OPTIONS_LOCALNET_LEARN_FDB: "true",
+ },
+ }
+ )
+
+ # learn fdb option missing, needs update
+ lsp1 = fakes.FakeOvsdbRow.create_one_ovsdb_row(
+ attrs={
+ "name": "lsp1",
+ "options": {},
+ }
+ )
+
+ # learn fdb option set to false, needs update
+ lsp2 = fakes.FakeOvsdbRow.create_one_ovsdb_row(
+ attrs={
+ "name": "lsp2",
+ "options": {
+ constants.LSP_OPTIONS_LOCALNET_LEARN_FDB: "false",
+ },
+ }
+ )
+
+ nb_idl.db_find_rows.return_value.execute.return_value = [
+ lsp0,
+ lsp1,
+ lsp2,
+ ]
+
+ self.assertRaises(
+ periodics.NeverAgain,
+ self.periodic.check_localnet_port_has_learn_fdb)
+
+ options = {constants.LSP_OPTIONS_LOCALNET_LEARN_FDB: 'true'}
+ expected_calls = [mock.call('Logical_Switch_Port', 'lsp1',
+ ('options', options)),
+ mock.call('Logical_Switch_Port', 'lsp2',
+ ('options', options))]
+ nb_idl.db_set.assert_has_calls(expected_calls)
+
+ def test_check_localnet_port_has_learn_fdb_disabled(self):
+ nb_idl = self.fake_ovn_client._nb_idl
+
+ # learn fdb option enabled, needs update
+ lsp0 = fakes.FakeOvsdbRow.create_one_ovsdb_row(
+ attrs={
+ "name": "lsp0",
+ "options": {
+ constants.LSP_OPTIONS_LOCALNET_LEARN_FDB: "true",
+ },
+ }
+ )
+
+ # learn fdb option missing, no update needed
+ lsp1 = fakes.FakeOvsdbRow.create_one_ovsdb_row(
+ attrs={
+ "name": "lsp1",
+ "options": {},
+ }
+ )
+
+ # learn fdb option set to false, no update needed
+ lsp2 = fakes.FakeOvsdbRow.create_one_ovsdb_row(
+ attrs={
+ "name": "lsp2",
+ "options": {
+ constants.LSP_OPTIONS_LOCALNET_LEARN_FDB: "false",
+ },
+ }
+ )
+
+ nb_idl.db_find_rows.return_value.execute.return_value = [
+ lsp0,
+ lsp1,
+ lsp2,
+ ]
+
+ self.assertRaises(
+ periodics.NeverAgain,
+ self.periodic.check_localnet_port_has_learn_fdb)
+
+ options = {constants.LSP_OPTIONS_LOCALNET_LEARN_FDB: 'false'}
+ expected_calls = [mock.call('Logical_Switch_Port', 'lsp0',
+ ('options', options))]
+ nb_idl.db_set.assert_has_calls(expected_calls)
+
def test_check_router_mac_binding_options(self):
nb_idl = self.fake_ovn_client._nb_idl
lr0 = fakes.FakeOvsdbRow.create_one_ovsdb_row(
@@ -716,18 +812,18 @@ class TestDBInconsistenciesPeriodics(testlib_api.SqlTestCaseLight,
self.fake_ovn_client._nb_idl.db_set.assert_has_calls(
expected_calls)
- def test_update_logical_router_with_gateway_network_id(self):
+ def test_remove_gw_ext_ids_from_logical_router(self):
nb_idl = self.fake_ovn_client._nb_idl
- # lr0: GW port ID, not GW network ID --> we need to add network ID.
+ # lr0: GW port ID, not GW network ID --> we need to remove port ID.
lr0 = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={
'name': 'lr0',
'external_ids': {constants.OVN_GW_PORT_EXT_ID_KEY: 'port0'}})
- # lr1: GW port ID and not GW network ID --> register already updated.
+ # lr1: GW port ID and GW network ID --> we need to remove both.
lr1 = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={
'name': 'lr1',
'external_ids': {constants.OVN_GW_PORT_EXT_ID_KEY: 'port1',
constants.OVN_GW_NETWORK_EXT_ID_KEY: 'net1'}})
- # lr2: no GW port ID (nor GW network ID) --> no QoS.
+ # lr2: no GW port ID (nor GW network ID) --> no action needed.
lr2 = fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={
'name': 'lr2', 'external_ids': {}})
nb_idl.lr_list.return_value.execute.return_value = (lr0, lr1, lr2)
@@ -736,10 +832,11 @@ class TestDBInconsistenciesPeriodics(testlib_api.SqlTestCaseLight,
self.assertRaises(
periodics.NeverAgain,
- self.periodic.update_logical_router_with_gateway_network_id)
- ext_ids = {constants.OVN_GW_NETWORK_EXT_ID_KEY: 'net0'}
+ self.periodic.remove_gw_ext_ids_from_logical_router)
expected_calls = [mock.call('Logical_Router', lr0.uuid,
- ('external_ids', ext_ids))]
+ ('external_ids', {})),
+ mock.call('Logical_Router', lr1.uuid,
+ ('external_ids', {}))]
nb_idl.db_set.assert_has_calls(expected_calls)
def _test_check_baremetal_ports_dhcp_options(self, dhcp_disabled=False):
diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
index f768bf7635..fb8a747919 100644
--- a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
+++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py
@@ -34,6 +34,7 @@ from neutron_lib import context
from neutron_lib import exceptions as n_exc
from neutron_lib.placement import utils as place_utils
from neutron_lib.plugins import directory
+from neutron_lib.plugins import utils as p_utils
from neutron_lib.tests import tools
from neutron_lib.utils import net as n_net
from oslo_concurrency import processutils
@@ -448,11 +449,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
{'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 'lsw1',
'tag': 1024, 'parent_name': 'fakename'},
]
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
# succeed without binding:profile
- with self.port(subnet=subnet1,
- set_context=True, tenant_id='test'):
+ with self.port(subnet=subnet1):
pass
# fail with invalid binding profiles
for invalid_profile in invalid_binding_profiles:
@@ -464,7 +464,6 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
expected_res_status=403,
arg_list=(
ovn_const.OVN_PORT_BINDING_PROFILE,),
- set_context=True, tenant_id='test',
**kwargs):
pass
except exc.HTTPClientError:
@@ -533,10 +532,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
'opt_value': 'apple'},
{'ip_version': 6, 'opt_name': 'grape',
'opt_value': 'grape'}]}}
- with self.network(set_context=True, tenant_id='test') as net:
+ with self.network() as net:
with self.subnet(network=net) as subnet:
- with self.port(subnet=subnet,
- set_context=True, tenant_id='test') as port:
+ with self.port(subnet=subnet) as port:
port_id = port['port']['id']
self._update('ports', port_id, data)
@@ -547,11 +545,12 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
mock_log.assert_has_calls([expected_call])
def test_create_and_update_ignored_fip_port(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
- with self.port(subnet=subnet1,
- device_owner=const.DEVICE_OWNER_FLOATINGIP,
- set_context=True, tenant_id='test') as port:
+ with self.port(
+ subnet=subnet1,
+ is_admin=True,
+ device_owner=const.DEVICE_OWNER_FLOATINGIP) as port:
self.nb_ovn.create_lswitch_port.assert_not_called()
data = {'port': {'name': 'new'}}
req = self.new_update_request('ports', data,
@@ -561,15 +560,17 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.nb_ovn.set_lswitch_port.assert_not_called()
def test_update_ignored_port_from_fip_device_owner(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
- with self.port(subnet=subnet1,
- device_owner=const.DEVICE_OWNER_FLOATINGIP,
- set_context=True, tenant_id='test') as port:
+ with self.port(
+ subnet=subnet1,
+ is_admin=True,
+ device_owner=const.DEVICE_OWNER_FLOATINGIP) as port:
self.nb_ovn.create_lswitch_port.assert_not_called()
data = {'port': {'device_owner': 'test'}}
req = self.new_update_request('ports', data,
- port['port']['id'])
+ port['port']['id'],
+ as_admin=True)
res = req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
msg = jsonutils.loads(res.body)['NeutronError']['message']
@@ -580,17 +581,18 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.nb_ovn.set_lswitch_port.assert_not_called()
def test_update_ignored_port_to_fip_device_owner(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
- device_owner='test',
- set_context=True, tenant_id='test') as port:
+ is_admin=True,
+ device_owner='test') as port:
self.assertEqual(
1, self.nb_ovn.create_lswitch_port.call_count)
data = {'port': {'device_owner':
const.DEVICE_OWNER_FLOATINGIP}}
req = self.new_update_request('ports', data,
- port['port']['id'])
+ port['port']['id'],
+ as_admin=True)
res = req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
msg = jsonutils.loads(res.body)['NeutronError']['message']
@@ -604,11 +606,11 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
kwargs = {'mac_address': '00:00:00:00:00:01',
'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.4'}]}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
+ is_admin=True,
arg_list=('mac_address', 'fixed_ips'),
- set_context=True, tenant_id='test',
**kwargs) as port:
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
called_args_dict = (
@@ -620,7 +622,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
data = {'port': {'mac_address': '00:00:00:00:00:02'}}
req = self.new_update_request(
'ports',
- data, port['port']['id'])
+ data, port['port']['id'],
+ as_admin=True)
req.get_response(self.api)
self.assertTrue(self.nb_ovn.set_lswitch_port.called)
called_args_dict = (
@@ -634,11 +637,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
# be treated as VIP.
kwargs = {'port_security_enabled': False,
'device_owner': 'compute:nova'}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('port_security_enabled',),
- set_context=True, tenant_id='test',
**kwargs) as port:
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
called_args_dict = (
@@ -652,7 +654,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
data = {'port': {'mac_address': '00:00:00:00:00:01'}}
req = self.new_update_request(
'ports',
- data, port['port']['id'])
+ data, port['port']['id'],
+ as_admin=True)
req.get_response(self.api)
self.assertTrue(self.nb_ovn.set_lswitch_port.called)
called_args_dict = (
@@ -686,11 +689,11 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
{"ip_address": "2.2.2.2",
"mac_address": "22:22:22:22:22:22"}],
'device_owner': 'compute:nova'}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
+ is_admin=True,
arg_list=('allowed_address_pairs',),
- set_context=True, tenant_id='test',
**kwargs) as port:
port_ip = port['port'].get('fixed_ips')[0]['ip_address']
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
@@ -717,7 +720,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
data = {'port': {'mac_address': '00:00:00:00:00:01'}}
req = self.new_update_request(
'ports',
- data, port['port']['id'])
+ data, port['port']['id'],
+ as_admin=True)
req.get_response(self.api)
self.assertTrue(self.nb_ovn.set_lswitch_port.called)
called_args_dict = (
@@ -736,10 +740,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
called_args_dict.get('addresses'))
def test_create_port_ovn_octavia_vip(self):
- with (self.network(set_context=True, tenant_id='test')) as net1, (
- self.subnet(network=net1)) as subnet1, (
+ with self.network() as net1,\
+ self.subnet(network=net1) as subnet1,\
self.port(name=ovn_const.LB_VIP_PORT_PREFIX + 'foo',
- subnet=subnet1, set_context=True, tenant_id='test')):
+ subnet=subnet1):
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
called_args_dict = (
@@ -864,6 +868,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '2'}
net = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
@@ -877,17 +882,17 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
lswitch_name=ovn_utils.ovn_name(net['id']),
options={'network_name': 'physnet1',
ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true',
- ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false'},
+ ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false',
+ ovn_const.LSP_OPTIONS_LOCALNET_LEARN_FDB: 'false'},
tag=2,
type='localnet')
def test_create_port_without_security_groups(self):
kwargs = {'security_groups': []}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('security_groups',),
- set_context=True, tenant_id='test',
**kwargs):
self.assertEqual(
1, self.nb_ovn.create_lswitch_port.call_count)
@@ -895,22 +900,20 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_create_port_without_security_groups_no_ps(self):
kwargs = {'security_groups': [], 'port_security_enabled': False}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('security_groups',
'port_security_enabled'),
- set_context=True, tenant_id='test',
**kwargs):
self.assertEqual(
1, self.nb_ovn.create_lswitch_port.call_count)
self.nb_ovn.add_acl.assert_not_called()
def test_update_port_changed_security_groups(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
- with self.port(subnet=subnet1,
- set_context=True, tenant_id='test') as port1:
+ with self.port(subnet=subnet1) as port1:
sg_id = port1['port']['security_groups'][0]
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
@@ -937,10 +940,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.assertTrue(self.nb_ovn.pg_add_ports.called)
def test_update_port_unchanged_security_groups(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
- with self.port(subnet=subnet1,
- set_context=True, tenant_id='test') as port1:
+ with self.port(subnet=subnet1) as port1:
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
port1['port']))
@@ -966,11 +968,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_update_port_vip(self, is_vip=True):
kwargs = {}
- with (
- self.network(set_context=True, tenant_id='test')) as net1, (
- self.subnet(network=net1)) as subnet1, (
- self.port(subnet=subnet1, set_context=True,
- tenant_id='test', **kwargs)) as port1:
+ with self.network() as net1, \
+ self.subnet(network=net1) as subnet1, \
+ self.port(subnet=subnet1, **kwargs) as port1:
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
port1['port']))
@@ -1000,11 +1000,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_delete_port_without_security_groups(self):
kwargs = {'security_groups': []}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('security_groups',),
- set_context=True, tenant_id='test',
**kwargs) as port1:
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
@@ -1021,10 +1020,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_delete_port_exception_delete_revision(self, mock_del_port,
mock_del_rev):
mock_del_port.side_effect = Exception('BoOoOoOoOmmmmm!!!')
- with self.network(set_context=True, tenant_id='test') as net:
+ with self.network() as net:
with self.subnet(network=net) as subnet:
- with self.port(subnet=subnet,
- set_context=True, tenant_id='test') as port:
+ with self.port(subnet=subnet) as port:
self._delete('ports', port['port']['id'])
# Assert that delete_revision wasn't invoked
mock_del_rev.assert_not_called()
@@ -1034,10 +1032,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_delete_port_not_exist_in_ovn(self, mock_del_port,
mock_del_rev):
mock_del_port.side_effect = idlutils.RowNotFound
- with self.network(set_context=True, tenant_id='test') as net:
+ with self.network() as net:
with self.subnet(network=net) as subnet:
- with self.port(subnet=subnet,
- set_context=True, tenant_id='test') as port:
+ with self.port(subnet=subnet) as port:
self._delete('ports', port['port']['id'])
# Assert that delete_revision wasn't invoked
mock_del_rev.assert_not_called()
@@ -1049,14 +1046,13 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
created_at = timeutils.utcnow() - datetime.timedelta(
seconds=ovn_const.DB_CONSISTENCY_CHECK_INTERVAL * 2)
mock_del_port.side_effect = idlutils.RowNotFound
- with self.network(set_context=True, tenant_id='test') as net:
+ with self.network() as net:
with self.subnet(network=net) as subnet:
- with self.port(subnet=subnet,
- set_context=True, tenant_id='test') as port, \
- mock.patch.object(ovn_revision_numbers_db,
- 'get_revision_row',
- return_value=OvnRevNumberRow(
- created_at=created_at)):
+ with self.port(subnet=subnet) as port, \
+ mock.patch.object(ovn_revision_numbers_db,
+ 'get_revision_row',
+ return_value=OvnRevNumberRow(
+ created_at=created_at)):
self._delete('ports', port['port']['id'])
# Assert that delete_revision was invoked
mock_del_rev.assert_called_once_with(mock.ANY,
@@ -1066,10 +1062,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_set_port_status_up(self, is_compute_port=False):
port_device_owner = 'compute:nova' if is_compute_port else ''
self.mech_driver._plugin.nova_notifier = mock.Mock()
- with self.network(set_context=True, tenant_id='test') as net1, \
+ with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
- self.port(subnet=subnet1, set_context=True,
- tenant_id='test',
+ self.port(subnet=subnet1, is_admin=True,
device_owner=port_device_owner) as port1, \
mock.patch.object(provisioning_blocks,
'provisioning_complete') as pc, \
@@ -1105,10 +1100,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_set_port_status_down(self, is_compute_port=False):
port_device_owner = 'compute:nova' if is_compute_port else ''
self.mech_driver._plugin.nova_notifier = mock.Mock()
- with self.network(set_context=True, tenant_id='test') as net1, \
+ with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
- self.port(subnet=subnet1, set_context=True,
- tenant_id='test',
+ self.port(subnet=subnet1, is_admin=True,
device_owner=port_device_owner) as port1, \
mock.patch.object(provisioning_blocks,
'add_provisioning_component') as apc, \
@@ -1157,10 +1151,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_set_port_status_concurrent_delete(self):
exc = os_db_exc.DBReferenceError('', '', '', '')
- with self.network(set_context=True, tenant_id='test') as net1, \
+ with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
- self.port(subnet=subnet1, set_context=True,
- tenant_id='test') as port1, \
+ self.port(subnet=subnet1) as port1, \
mock.patch.object(provisioning_blocks,
'add_provisioning_component',
side_effect=exc) as apc, \
@@ -1250,6 +1243,28 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
portbindings.VIF_TYPE_OVS,
self.mech_driver.vif_details[portbindings.VIF_TYPE_OVS])
+ def _test_bind_port_virtio_forwarder(self, fake_segments):
+ fake_port = fakes.FakePort.create_one_port(
+ attrs={'binding:vnic_type': 'virtio-forwarder'}).info()
+ fake_host = 'host'
+ fake_port_context = fakes.FakePortContext(
+ fake_port, fake_host, fake_segments)
+ self.mech_driver.bind_port(fake_port_context)
+
+ vif_details = self.mech_driver.\
+ vif_details[portbindings.VIF_TYPE_AGILIO_OVS]
+ vif_details.update({"vhostuser_socket": ovn_utils.ovn_vhu_sockpath(
+ ovn_conf.get_ovn_vhost_sock_dir(), fake_port['id'])})
+ vif_details.update({"vhostuser_mode": "client"})
+
+ neutron_agent.AgentCache().get_agents.assert_called_once_with(
+ {'host': fake_host,
+ 'agent_type': ovn_const.OVN_CONTROLLER_TYPES})
+ fake_port_context.set_binding.assert_called_once_with(
+ fake_segments[0]['id'],
+ portbindings.VIF_TYPE_AGILIO_OVS,
+ vif_details)
+
def _test_bind_port_remote_managed(self, fake_segments):
fake_serial = 'fake-serial'
fake_port = fakes.FakePort.create_one_port(
@@ -1329,6 +1344,15 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
[fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()]
self._test_bind_port_remote_managed(fake_segments)
+ def test_bind_virtio_forwarder_port_geneve(self):
+ """Test binding a VIRTIO_FORWARDER port to a geneve segment."""
+ segment_attrs = {'network_type': 'geneve',
+ 'physical_network': None,
+ 'segmentation_id': 1023}
+ fake_segments = \
+ [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()]
+ self._test_bind_port_virtio_forwarder(fake_segments)
+
def test_bind_remote_managed_port_vlan(self):
"""Test binding a REMOTE_MANAGED port to a geneve segment."""
segment_attrs = {'network_type': 'vlan',
@@ -1362,6 +1386,15 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
[fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()]
self._test_bind_port(fake_segments)
+ def test_bind_virtio_forwarder_port_vxlan(self):
+ """Test binding a VIRTIO_FORWARDER port to a vxlan segment."""
+ segment_attrs = {'network_type': 'vxlan',
+ 'physical_network': None,
+ 'segmentation_id': 1024}
+ fake_segments = \
+ [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()]
+ self._test_bind_port_virtio_forwarder(fake_segments)
+
def test__is_port_provisioning_required(self):
fake_port = fakes.FakePort.create_one_port(
attrs={'binding:vnic_type': 'normal',
@@ -1754,7 +1787,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.mech_driver.update_subnet_postcommit(context)
esd.assert_called_once_with(
context.current, context.network.current, mock.ANY)
- umd.assert_called_once_with(mock.ANY, 'id', subnet=subnet)
+ umd.assert_called_once_with(mock.ANY, context.network.current,
+ subnet=subnet)
def test_update_subnet_postcommit_disable_dhcp(self):
self.mech_driver.nb_ovn.get_subnet_dhcp_options.return_value = {
@@ -1770,7 +1804,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
'update_metadata_port') as umd:
self.mech_driver.update_subnet_postcommit(context)
dsd.assert_called_once_with(context.current['id'], mock.ANY)
- umd.assert_called_once_with(mock.ANY, 'id', subnet=subnet)
+ umd.assert_called_once_with(mock.ANY, context.network.current,
+ subnet=subnet)
def test_update_subnet_postcommit_update_dhcp(self):
self.mech_driver.nb_ovn.get_subnet_dhcp_options.return_value = {
@@ -1787,7 +1822,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.mech_driver.update_subnet_postcommit(context)
usd.assert_called_once_with(
context.current, context.network.current, mock.ANY)
- umd.assert_called_once_with(mock.ANY, 'id', subnet=subnet)
+ umd.assert_called_once_with(mock.ANY, context.network.current,
+ subnet=subnet)
def test__get_port_options(self):
with mock.patch.object(self.mech_driver._plugin, 'get_subnets') as \
@@ -1979,9 +2015,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
mock_metaport.return_value = {'fixed_ips': fixed_ips,
'id': 'metadata_id'}
mock_get_subnets.return_value = [{'id': 'subnet1'}]
+ network = {'id': 'net_id'}
subnet = {'id': 'subnet1', 'enable_dhcp': True}
self.mech_driver._ovn_client.update_metadata_port(
- self.context, 'net_id', subnet=subnet)
+ self.context, network, subnet=subnet)
mock_update_port.assert_not_called()
# Subnet without DHCP, present in port.
@@ -1991,7 +2028,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
mock_get_subnets.return_value = [{'id': 'subnet1'}]
subnet = {'id': 'subnet1', 'enable_dhcp': False}
self.mech_driver._ovn_client.update_metadata_port(
- self.context, 'net_id', subnet=subnet)
+ self.context, network, subnet=subnet)
port = {'id': 'metadata_id',
'port': {'network_id': 'net_id', 'fixed_ips': []}}
mock_update_port.assert_called_once_with(mock.ANY, 'metadata_id',
@@ -2004,7 +2041,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
mock_get_subnets.return_value = []
subnet = {'id': 'subnet1', 'enable_dhcp': True}
self.mech_driver._ovn_client.update_metadata_port(
- self.context, 'net_id', subnet=subnet)
+ self.context, network, subnet=subnet)
fixed_ips = [{'subnet_id': 'subnet1'}]
port = {'id': 'metadata_id',
'port': {'network_id': 'net_id', 'fixed_ips': fixed_ips}}
@@ -2018,7 +2055,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
mock_get_subnets.return_value = []
subnet = {'id': 'subnet1', 'enable_dhcp': False}
self.mech_driver._ovn_client.update_metadata_port(
- self.context, 'net_id', subnet=subnet)
+ self.context, network, subnet=subnet)
mock_update_port.assert_not_called()
def test_update_metadata_port_no_subnet(self):
@@ -2035,10 +2072,11 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
mock_get_subnets.return_value = [{'id': 'subnet1'},
{'id': 'subnet2'}]
fixed_ips = [{'subnet_id': 'subnet1', 'ip_address': 'ip_add1'}]
+ network = {'id': 'net_id'}
mock_metaport.return_value = {'fixed_ips': fixed_ips,
'id': 'metadata_id'}
self.mech_driver._ovn_client.update_metadata_port(self.context,
- 'net_id')
+ network)
port = {'id': 'metadata_id',
'port': {'network_id': 'net_id', 'fixed_ips': fixed_ips}}
fixed_ips.append({'subnet_id': 'subnet2'})
@@ -2049,10 +2087,11 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
# Port with IP in subnet1; subnet1 with DHCP, subnet2 without DHCP.
mock_get_subnets.return_value = [{'id': 'subnet1'}]
fixed_ips = [{'subnet_id': 'subnet1', 'ip_address': 'ip_add1'}]
+ network = {'id': 'net_id'}
mock_metaport.return_value = {'fixed_ips': fixed_ips,
'id': 'metadata_id'}
self.mech_driver._ovn_client.update_metadata_port(self.context,
- 'net_id')
+ network)
mock_update_port.assert_not_called()
# Port with IP in subnet1; subnet1 without DHCP.
@@ -2061,13 +2100,51 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
mock_metaport.return_value = {'fixed_ips': fixed_ips,
'id': 'metadata_id'}
self.mech_driver._ovn_client.update_metadata_port(self.context,
- 'net_id')
+ network)
port = {'id': 'metadata_id',
'port': {'network_id': 'net_id', 'fixed_ips': []}}
mock_update_port.assert_called_once_with(
mock.ANY, 'metadata_id', port)
mock_update_port.reset_mock()
+ def test_update_metadata_port_no_port(self):
+ ovn_conf.cfg.CONF.set_override('ovn_metadata_enabled', True,
+ group='ovn')
+
+ with mock.patch.object(
+ self.mech_driver._ovn_client, '_find_metadata_port') as \
+ mock_find_metaport, \
+ mock.patch.object(self.mech_driver._plugin, 'get_subnets') as \
+ mock_get_subnets, \
+ mock.patch.object(p_utils, 'create_port') as \
+ mock_create_port:
+ # Subnet with DHCP, no port, port created.
+ network = {'id': 'net_id', 'project_id': 'project_id-foo'}
+ subnet = {'id': 'subnet1', 'enable_dhcp': True}
+ fixed_ips = [{'subnet_id': 'subnet1', 'ip_address': 'ip_add1'}]
+ port = {'id': 'metadata_id',
+ 'network_id': 'net_id',
+ 'device_owner': const.DEVICE_OWNER_DISTRIBUTED,
+ 'device_id': 'ovnmeta-%s' % 'net_id',
+ 'fixed_ips': fixed_ips}
+ mock_get_subnets.return_value = [subnet]
+ mock_find_metaport.return_value = None
+
+ # Subnet with DHCP, no port, port create failure.
+ mock_create_port.return_value = None
+ ret_status = self.mech_driver._ovn_client.update_metadata_port(
+ self.context, network, subnet=subnet)
+ self.assertFalse(ret_status)
+ mock_create_port.assert_called_once()
+
+ # Subnet with DHCP, no port, port created successfully.
+ mock_create_port.reset_mock()
+ mock_create_port.return_value = port
+ ret_status = self.mech_driver._ovn_client.update_metadata_port(
+ self.context, network, subnet=subnet)
+ self.assertTrue(ret_status)
+ mock_create_port.assert_called_once()
+
@mock.patch.object(provisioning_blocks, 'is_object_blocked')
@mock.patch.object(provisioning_blocks, 'provisioning_complete')
def test_notify_dhcp_updated(self, mock_prov_complete, mock_is_obj_block):
@@ -2366,7 +2443,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_update_network_fragmentation(self, new_mtu, expected_opts, grps):
network_attrs = {external_net.EXTERNAL: True}
network = self._make_network(
- self.fmt, 'net1', True, arg_list=(external_net.EXTERNAL,),
+ self.fmt, 'net1', True, as_admin=True,
+ arg_list=(external_net.EXTERNAL,),
**network_attrs)
with self.subnet(network=network) as subnet:
@@ -2667,6 +2745,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '1'}
net = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
@@ -2679,7 +2758,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
# Issue an update to the network changing the segmentation_id
data = {'network': {pnet.SEGMENTATION_ID: new_vlan_tag}}
- req = self.new_update_request('networks', data, net['id'])
+ req = self.new_update_request('networks', data, net['id'],
+ as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(new_vlan_tag, res['network'][pnet.SEGMENTATION_ID])
@@ -2809,6 +2889,7 @@ class TestOVNMechanismDriverSubnetsV2(test_plugin.TestMl2SubnetsV2,
net_arg = {pnet.NETWORK_TYPE: 'geneve',
pnet.SEGMENTATION_ID: '1'}
network = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
@@ -2949,7 +3030,8 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
lswitch_name=ovn_utils.ovn_name(net['id']),
options={'network_name': 'physnet1',
ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true',
- ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false'},
+ ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false',
+ ovn_const.LSP_OPTIONS_LOCALNET_LEARN_FDB: 'false'},
tag=200,
type='localnet')
ovn_nb_api.create_lswitch_port.reset_mock()
@@ -2963,7 +3045,8 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
lswitch_name=ovn_utils.ovn_name(net['id']),
options={'network_name': 'physnet2',
ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true',
- ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false'},
+ ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false',
+ ovn_const.LSP_OPTIONS_LOCALNET_LEARN_FDB: 'false'},
tag=300,
type='localnet')
segments = segments_db.get_network_segments(
@@ -2977,7 +3060,7 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
segment = self._test_create_segment(
network_id=net['id'], physical_network='physnet1',
segmentation_id=200, network_type='vlan')['segment']
- self._delete('segments', segment['id'])
+ self._delete('segments', segment['id'], as_admin=True)
ovn_nb_api.delete_lswitch_port.assert_called_once_with(
lport_name=ovn_utils.ovn_provnet_port_name(segment['id']),
lswitch_name=ovn_utils.ovn_name(net['id']))
@@ -3005,12 +3088,12 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
'options': {'network_name': 'physnet2'},
'tag': 300,
'name': ovn_utils.ovn_provnet_port_name(seg_2['id'])})]
- self._delete('segments', seg_1['id'])
+ self._delete('segments', seg_1['id'], as_admin=True)
ovn_nb_api.delete_lswitch_port.assert_called_once_with(
lport_name=ovn_utils.ovn_provnet_port_name(net['id']),
lswitch_name=ovn_utils.ovn_name(net['id']))
ovn_nb_api.delete_lswitch_port.reset_mock()
- self._delete('segments', seg_2['id'])
+ self._delete('segments', seg_2['id'], as_admin=True)
ovn_nb_api.delete_lswitch_port.assert_called_once_with(
lport_name=ovn_utils.ovn_provnet_port_name(seg_2['id']),
lswitch_name=ovn_utils.ovn_name(net['id']))
@@ -3114,8 +3197,8 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
ovn_nb_api.delete_lswitch_port.assert_not_called()
# Delete both segments
- self._delete('segments', self.seg_2['id'])
- self._delete('segments', self.seg_1['id'])
+ self._delete('segments', self.seg_2['id'], as_admin=True)
+ self._delete('segments', self.seg_1['id'], as_admin=True)
# Make sure that the metadata port wasn't deleted.
deleted_ports = [
@@ -3929,6 +4012,10 @@ class TestOVNMechanismDriverSecurityGroup(MechDriverSetupBase,
self._test_create_port_with_vnic_type(
portbindings.VNIC_BAREMETAL)
+ def test_create_port_with_vnic_virtio_forwarder(self):
+ self._test_create_port_with_vnic_type(
+ portbindings.VNIC_VIRTIO_FORWARDER)
+
def test_update_port_with_sgs(self):
with self.network() as n, self.subnet(n):
sg1 = self._create_empty_sg('sg1')
@@ -4051,7 +4138,7 @@ class TestOVNMechanismDriverSecurityGroup(MechDriverSetupBase,
1, self.mech_driver.nb_ovn.pg_acl_del.call_count)
def test_delete_port_with_security_groups_port_doesnt_remove_pg(self):
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1):
sg = self._create_sg('sg')
port = self._make_port(
@@ -4129,21 +4216,15 @@ class TestOVNMechanismDriverMetadataPort(MechDriverSetupBase,
"""
self.mech_driver.nb_ovn.get_subnet_dhcp_options.return_value = {
'subnet': {}, 'ports': {}}
- with self.network(set_context=True, tenant_id='test') as net1:
+ with self.network() as net1:
with self.subnet(network=net1, cidr='10.0.0.0/24') as subnet1:
- # Create a network:dhcp owner port just as how Neutron DHCP
- # agent would do.
- with self.port(subnet=subnet1,
- device_owner=const.DEVICE_OWNER_DISTRIBUTED,
- device_id='dhcpxxxx',
- set_context=True, tenant_id='test'):
- with self.subnet(network=net1,
- cidr='20.0.0.0/24') as subnet2:
- self.assertEqual(
- 2, self.nb_ovn.set_lswitch_port.call_count)
- args, kwargs = self.nb_ovn.set_lswitch_port.call_args
- self.assertEqual(ovn_const.LSP_TYPE_LOCALPORT,
- kwargs['type'])
+ with self.subnet(network=net1,
+ cidr='20.0.0.0/24') as subnet2:
+ self.assertEqual(
+ 2, self.nb_ovn.set_lswitch_port.call_count)
+ args, kwargs = self.nb_ovn.set_lswitch_port.call_args
+ self.assertEqual(ovn_const.LSP_TYPE_LOCALPORT,
+ kwargs['type'])
port_ips = kwargs['external_ids'].get(
ovn_const.OVN_CIDRS_EXT_ID_KEY, '').split()
port_cidrs = [str(netaddr.IPNetwork(cidr).cidr) for cidr in port_ips]
@@ -4176,6 +4257,7 @@ class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase):
self._create_port(
self.fmt, n['network']['id'],
expected_res_status=404,
+ is_admin=True,
arg_list=(OVN_PROFILE,),
**binding)
@@ -4187,6 +4269,7 @@ class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase):
with self.port(s) as p:
binding[OVN_PROFILE]['parent_name'] = p['port']['id']
res = self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
**binding)
port = self.deserialize(self.fmt, res)
@@ -4201,6 +4284,7 @@ class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase):
with self.port(s) as p:
binding[OVN_PROFILE]['parent_name'] = p['port']['id']
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@@ -4214,6 +4298,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
**binding)
port = self.deserialize(self.fmt, res)
@@ -4225,6 +4310,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@@ -4234,6 +4320,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@@ -4244,6 +4331,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@@ -4255,6 +4343,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
+ is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=404,
**binding)
diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py
index ad7d56a39e..a14f3feb29 100644
--- a/neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py
+++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/test_db_migration.py
@@ -39,10 +39,12 @@ class TestMigrateNeutronDatabaseToOvn(
for sid in range(1, 6):
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: sid}
- network_id = self._make_network(self.fmt, 'net%d' % sid, True,
+ network_id = self._make_network(
+ self.fmt, 'net%d' % sid, True, as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
- **net_arg)['network']['id']
+ **net_arg
+ )['network']['id']
for vif_details in vif_details_list:
port = self._make_port(self.fmt, network_id)['port']
diff --git a/neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py b/neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py
index e656e250f5..b658d96c1c 100644
--- a/neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py
+++ b/neutron/tests/unit/plugins/ml2/extensions/test_dns_domain_keywords.py
@@ -50,7 +50,7 @@ class DNSDomainKeywordsTestCase(
net_kwargs.get('arg_list', ()) + (dns_apidef.DNSDOMAIN,)
net_kwargs['shared'] = True
res = self._create_network(self.fmt, 'test_network', True,
- **net_kwargs)
+ as_admin=True, **net_kwargs)
network = self.deserialize(self.fmt, res)
if ipv4:
cidr = '10.0.0.0/24'
@@ -108,8 +108,8 @@ class DNSDomainKeywordsTestCase(
# NOTE(slaweq): Admin context is required here to be able to update
# fixed_ips of the port as by default it is not possible for non-admin
# users
- ctx = context.Context(project_id=PROJECT_ID, is_admin=True)
- req = self.new_update_request('ports', data, port['id'], context=ctx)
+ req = self.new_update_request('ports', data, port['id'],
+ tenant_id=PROJECT_ID, as_admin=True)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
port = self.deserialize(self.fmt, res)['port']
diff --git a/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py b/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py
index 75783ad0b4..702f2b612b 100644
--- a/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py
+++ b/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py
@@ -27,6 +27,7 @@ from oslo_config import cfg
from oslo_utils import uuidutils
import testtools
+from neutron.common import config
from neutron.objects import ports as port_obj
from neutron.plugins.ml2.extensions import dns_integration
from neutron.services.externaldns.drivers.designate import driver
@@ -53,6 +54,7 @@ class DNSIntegrationTestCase(test_plugin.Ml2PluginV2TestCase):
_domain = DNSDOMAIN
def setUp(self):
+ config.register_common_config_options()
cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
@@ -80,7 +82,7 @@ class DNSIntegrationTestCase(test_plugin.Ml2PluginV2TestCase):
net_kwargs['arg_list'] = \
net_kwargs.get('arg_list', ()) + (dns_apidef.DNSDOMAIN,)
res = self._create_network(self.fmt, 'test_network', True,
- **net_kwargs)
+ as_admin=True, **net_kwargs)
network = self.deserialize(self.fmt, res)
if ipv4:
cidr = '10.0.0.0/24'
diff --git a/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py b/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py
index 6131f3611f..daa6e72542 100644
--- a/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py
+++ b/neutron/tests/unit/plugins/ml2/extensions/test_tag_ports_during_bulk_creation.py
@@ -50,12 +50,10 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
def test_create_ports_bulk_with_tags(self):
num_ports = 3
- tenant_id = 'some_tenant'
- with self.network(tenant_id=tenant_id) as network_to_use:
+ with self.network() as network_to_use:
net_id = network_to_use['network']['id']
port = {'port': {'network_id': net_id,
- 'admin_state_up': True,
- 'tenant_id': tenant_id}}
+ 'admin_state_up': True}}
ports = [copy.deepcopy(port) for x in range(num_ports)]
ports_tags_map = {}
for port, tags in zip(ports, TAGS):
@@ -73,13 +71,11 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
def test_create_ports_bulk_no_tags(self):
num_ports = 2
- tenant_id = 'some_tenant'
- with self.network(tenant_id=tenant_id) as network_to_use:
+ with self.network() as network_to_use:
net_id = network_to_use['network']['id']
port = {'port': {'name': 'port',
'network_id': net_id,
- 'admin_state_up': True,
- 'tenant_id': tenant_id}}
+ 'admin_state_up': True}}
ports = [copy.deepcopy(port) for x in range(num_ports)]
req_body = {'ports': ports}
ports_req = self.new_create_request('ports', req_body)
@@ -90,13 +86,11 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertFalse(port['tags'])
def test_create_port_with_tags(self):
- tenant_id = 'some_tenant'
- with self.network(tenant_id=tenant_id) as network_to_use:
+ with self.network() as network_to_use:
net_id = network_to_use['network']['id']
req_body = {'port': {'name': 'port',
'network_id': net_id,
'admin_state_up': True,
- 'tenant_id': tenant_id,
'tags': TAGS[0]}}
port_req = self.new_create_request('ports', req_body)
res = port_req.get_response(self.api)
@@ -106,16 +100,14 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
def test_type_args_passed_to_extension(self):
num_ports = 2
- tenant_id = 'some_tenant'
extension = tag_ports_during_bulk_creation
with mock.patch.object(
extension.TagPortsDuringBulkCreationExtensionDriver,
'process_create_port') as patched_method:
- with self.network(tenant_id=tenant_id) as network_to_use:
+ with self.network() as network_to_use:
net_id = network_to_use['network']['id']
port = {'port': {'network_id': net_id,
- 'admin_state_up': True,
- 'tenant_id': tenant_id}}
+ 'admin_state_up': True}}
ports = [copy.deepcopy(port) for x in range(num_ports)]
ports[0]['port']['tags'] = TAGS[0]
ports[1]['port']['tags'] = TAGS[1]
diff --git a/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py b/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py
index d51a264527..2b673cf499 100644
--- a/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py
+++ b/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py
@@ -16,7 +16,6 @@ from neutron_lib import constants
from neutron_lib import context
from neutron_lib.plugins import directory
from oslo_config import cfg
-from oslo_utils import uuidutils
from neutron.tests.unit.plugins.ml2.drivers import ext_test
from neutron.tests.unit.plugins.ml2 import test_plugin
@@ -35,9 +34,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
self._ctxt = context.get_admin_context()
def _verify_network_create(self, code, exc_reason):
- tenant_id = uuidutils.generate_uuid()
- data = {'network': {'name': 'net1',
- 'tenant_id': tenant_id}}
+ data = {'network': {'name': 'net1'}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(code, res.status_int)
@@ -47,7 +44,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertEqual(exc_reason,
network['NeutronError']['type'])
- return (network, tenant_id)
+ return network
def _verify_network_update(self, network, code, exc_reason):
net_id = network['network']['id']
@@ -64,10 +61,9 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_create_network',
side_effect=TypeError):
- net, tenant_id = self._verify_network_create(500,
- 'HTTPInternalServerError')
+ self._verify_network_create(500, 'HTTPInternalServerError')
# Verify the operation is rolled back
- query_params = "tenant_id=%s" % tenant_id
+ query_params = "tenant_id=%s" % self._tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
@@ -75,7 +71,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_network',
side_effect=TypeError):
- network, tid = self._verify_network_create(201, None)
+ network = self._verify_network_create(201, None)
self._verify_network_update(network, 500,
'HTTPInternalServerError')
@@ -83,7 +79,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
with mock.patch.object(ext_test.TestExtensionDriver,
'extend_network_dict',
side_effect=[None, None, TypeError]):
- network, tid = self._verify_network_create(201, None)
+ network = self._verify_network_create(201, None)
self._verify_network_update(network, 400, 'ExtensionDriverError')
def test_network_attr(self):
diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py
index ccc72464f8..cd72574f0d 100644
--- a/neutron/tests/unit/plugins/ml2/test_plugin.py
+++ b/neutron/tests/unit/plugins/ml2/test_plugin.py
@@ -49,6 +49,7 @@ import webob
from neutron._i18n import _
from neutron.agent import rpc as agent_rpc
+from neutron.common import config
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import ipam_pluggable_backend
@@ -381,7 +382,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
for net_idx, net in enumerate(networks):
# create
req = self.new_create_request('networks',
- {'network': net})
+ {'network': net},
+ as_admin=True)
# verify
network = self.deserialize(self.fmt,
req.get_response(self.api))['network']
@@ -399,7 +401,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
def _lookup_network_by_segmentation_id(self, seg_id, num_expected_nets):
params_str = "%s=%s" % (pnet.SEGMENTATION_ID, seg_id)
net_req = self.new_list_request('networks', None,
- params=params_str)
+ params=params_str,
+ as_admin=True)
networks = self.deserialize(self.fmt, net_req.get_response(self.api))
if num_expected_nets:
self.assertIsNotNone(networks)
@@ -446,9 +449,9 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
plugin.type_manager, 'create_network_segments',
side_effect=db_exc.RetryRequest(ValueError())
) as f:
- data = {'network': {'tenant_id': 'sometenant', 'name': 'dummy',
+ data = {'network': {'name': 'dummy',
'admin_state_up': True, 'shared': False}}
- req = self.new_create_request('networks', data)
+ req = self.new_create_request('networks', data, as_admin=True)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
# 1 + retry count
@@ -459,7 +462,7 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
plugin = directory.get_plugin()
kwargs = {'arg_list': (pnet.NETWORK_TYPE, ),
pnet.NETWORK_TYPE: 'vlan'}
- with self.network(**kwargs) as net:
+ with self.network(as_admin=True, **kwargs) as net:
for attribute in set(pnet.ATTRIBUTES) - {pnet.SEGMENTATION_ID}:
net_data = {attribute: net['network'][attribute]}
self.assertIsNone(
@@ -491,7 +494,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 2}]
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: segments}) as net:
self.assertRaises(
exc.InvalidInput, plugin._update_segmentation_id, self.context,
@@ -518,7 +522,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
mock.patch.object(type(mech_driver), 'agent_type',
new_callable=mock.PropertyMock(return_value=None)).start()
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: segments}) as net, \
mock.patch.object(
port_obj.Port, 'check_network_ports_by_binding_types',
@@ -598,7 +603,8 @@ class TestMl2NetworksV2AgentMechDrivers(Ml2PluginV2TestCase):
segments = [{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}]
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: segments}) as net, \
mock.patch.object(
port_obj.Port, 'check_network_ports_by_binding_types',
@@ -623,9 +629,8 @@ class TestExternalNetwork(Ml2PluginV2TestCase):
def _create_external_network(self):
data = {'network': {'name': 'net1',
- 'router:external': 'True',
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ 'router:external': 'True'}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
return network
@@ -669,10 +674,10 @@ class TestMl2NetworksWithVlanTransparencyBase(TestMl2NetworksV2):
mpnet_apidef.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
- 'tenant_id': 'tenant_one',
'vlan_transparent': 'True'}}
def setUp(self, plugin=None):
+ config.register_common_config_options()
cfg.CONF.set_override('vlan_transparent', True)
super(TestMl2NetworksWithVlanTransparencyBase, self).setUp(plugin)
@@ -685,7 +690,8 @@ class TestMl2NetworksWithVlanTransparency(
with mock.patch.object(mech_test.TestMechanismDriver,
'check_vlan_transparency',
return_value=False):
- network_req = self.new_create_request('networks', self.data)
+ network_req = self.new_create_request(
+ 'networks', self.data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(500, res.status_int)
error_result = self.deserialize(self.fmt, res)['NeutronError']
@@ -696,7 +702,8 @@ class TestMl2NetworksWithVlanTransparency(
with mock.patch.object(mech_test.TestMechanismDriver,
'check_vlan_transparency',
return_value=True):
- network_req = self.new_create_request('networks', self.data)
+ network_req = self.new_create_request(
+ 'networks', self.data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
network = self.deserialize(self.fmt, res)['network']
@@ -713,7 +720,8 @@ class TestMl2NetworksWithVlanTransparencyAndMTU(
return_value=True):
cfg.CONF.set_override('path_mtu', 1000, group='ml2')
cfg.CONF.set_override('global_physnet_mtu', 1000)
- network_req = self.new_create_request('networks', self.data)
+ network_req = self.new_create_request(
+ 'networks', self.data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
network = self.deserialize(self.fmt, res)['network']
@@ -727,8 +735,7 @@ class TestMl2NetworksWithAvailabilityZone(TestMl2NetworksV2):
def test_create_network_availability_zone(self):
az_hints = ['az1', 'az2']
data = {'network': {'name': 'net1',
- az_def.AZ_HINTS: az_hints,
- 'tenant_id': 'tenant_one'}}
+ az_def.AZ_HINTS: az_hints}}
with mock.patch.object(agents_db.AgentAvailabilityZoneMixin,
'validate_availability_zones'):
network_req = self.new_create_request('networks', data)
@@ -879,6 +886,7 @@ class TestMl2SubnetsV2(test_plugin.TestSubnetsV2,
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '1'}
network = self._make_network(self.fmt, 'net1', True,
+ as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
@@ -903,6 +911,9 @@ class TestMl2DbOperationBounds(test_plugin.DbOperationBoundMixin,
def setUp(self):
super(TestMl2DbOperationBounds, self).setUp()
self.kwargs = self.get_api_kwargs()
+ # NOTE(slaweq): In this class we are not testing any operations related
+ # to policy module so we don't need to checu policies
+ mock.patch('neutron.policy.check').start()
def make_network(self):
return self._make_network(self.fmt, 'name', True, **self.kwargs)
@@ -1277,7 +1288,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
def test_update_port_with_empty_data(self):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
- with self.port() as port:
+ with self.port(is_admin=True) as port:
port_id = port['port']['id']
new_port = plugin.update_port(ctx, port_id, {"port": {}})
new_port.pop('standard_attr_id')
@@ -1419,7 +1430,8 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
data = {'port': {'mac_address': None}}
with self.port() as port:
current_mac = port['port']['mac_address']
- req = self.new_update_request('ports', data, port['port']['id'])
+ req = self.new_update_request(
+ 'ports', data, port['port']['id'], as_admin=True)
self.assertEqual(200, req.get_response(self.api).status_int)
new_mac = plugin.get_port(ctx, port['port']['id'])['mac_address']
self.assertNotEqual(current_mac, new_mac)
@@ -1455,7 +1467,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
l3plugin = directory.get_plugin(plugin_constants.L3)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
- with self.network(**kwargs) as n:
+ with self.network(as_admin=True, **kwargs) as n:
with self.subnet(network=n, cidr='200.0.0.0/22'):
l3plugin.create_floatingip(
context.get_admin_context(),
@@ -1485,24 +1497,23 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
res, 'ports', webob.exc.HTTPServerError.code)
def test_create_ports_bulk_with_sec_grp(self):
- ctx = context.get_admin_context()
plugin = directory.get_plugin()
with self.network() as net,\
mock.patch.object(plugin.notifier,
'security_groups_member_updated') as m_upd:
res = self._create_port_bulk(self.fmt, 3, net['network']['id'],
- 'test', True, context=ctx)
+ 'test', True)
ports = self.deserialize(self.fmt, res)
if 'ports' in ports:
used_sg = ports['ports'][0]['security_groups']
m_upd.assert_has_calls(
- [mock.call(ctx, [sg]) for sg in used_sg], any_order=True)
+ [mock.call(mock.ANY, [sg]) for sg in used_sg],
+ any_order=True)
else:
self.assertTrue('ports' in ports)
def test_create_ports_bulk_with_portbinding_attrs(self):
- ctx = context.get_admin_context()
with self.network() as net:
overrides = {0: {portbindings.HOST_ID: 'host1',
portbindings.VNIC_TYPE: 'direct',
@@ -1511,7 +1522,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
portbindings.VNIC_TYPE: 'macvtap',
portbindings.PROFILE: {'bar': 'bar'}}}
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
- 'test', True, context=ctx,
+ 'test', True, as_admin=True,
override=overrides)
ports = self.deserialize(self.fmt, res)['ports']
self.assertCountEqual(['direct', 'macvtap'],
@@ -1522,7 +1533,6 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
[p[portbindings.HOST_ID] for p in ports])
def test_create_ports_bulk_with_sec_grp_member_provider_update(self):
- ctx = context.get_admin_context()
plugin = directory.get_plugin()
bulk_mock_name = "security_groups_member_updated"
with self.network() as net,\
@@ -1531,28 +1541,25 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
net_id = net['network']['id']
data = [{
'network_id': net_id,
- 'tenant_id': self._tenant_id
},
{
'network_id': net_id,
- 'tenant_id': self._tenant_id,
'device_owner': constants.DEVICE_OWNER_DHCP
}
]
- res = self._create_bulk_from_list(self.fmt, 'port',
- data, context=ctx)
+ res = self._create_bulk_from_list(self.fmt, 'port', data,
+ as_admin=True)
ports = self.deserialize(self.fmt, res)
used_sg = ports['ports'][0]['security_groups']
- m_upd.assert_called_with(ctx, used_sg)
+ m_upd.assert_called_with(mock.ANY, used_sg)
m_upd.reset_mock()
data[0]['device_owner'] = constants.DEVICE_OWNER_DHCP
self._create_bulk_from_list(self.fmt, 'port',
- data, context=ctx)
+ data, as_admin=True)
self.assertFalse(m_upd.called)
def test_create_ports_bulk_with_sec_grp_provider_update_ipv6(self):
- ctx = context.get_admin_context()
plugin = directory.get_plugin()
fake_prefix = '2001:db8::/64'
fake_gateway = 'fe80::1'
@@ -1568,13 +1575,12 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
net_id = net['network']['id']
data = [{
'network_id': net_id,
- 'tenant_id': self._tenant_id,
'fixed_ips': [{'subnet_id': snet_v6['subnet']['id']}],
'device_owner': constants.DEVICE_OWNER_ROUTER_INTF
}
]
self._create_bulk_from_list(self.fmt, 'port',
- data, context=ctx)
+ data, as_admin=True)
self.assertFalse(m_upd.called)
def test_create_ports_bulk_ip_allocation_reverted_in_case_of_error(self):
@@ -1839,7 +1845,8 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
port_kwargs = {portbindings.HOST_ID: 'host1',
'subnet': subnet,
'device_id': 'deadlocktest'}
- with self.port(arg_list=(portbindings.HOST_ID,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**port_kwargs) as port:
self.assertTrue(port['port']['id'])
self.assertTrue(get_port_mock.called)
@@ -2034,7 +2041,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
host_arg = {portbindings.HOST_ID: HOST}
- with self.port(arg_list=(portbindings.HOST_ID,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port = plugin.get_port(ctx, port['port']['id'])
updated_ports = []
@@ -2063,7 +2071,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
registry.subscribe(creceiver, resources.PORT,
events.AFTER_CREATE)
host_arg = {portbindings.HOST_ID: HOST}
- with self.port(arg_list=(portbindings.HOST_ID,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg):
self.assertGreater(updated_ports[0]['revision_number'],
created_ports[0]['revision_number'])
@@ -2076,7 +2085,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
registry.subscribe(p_update_receiver, resources.PORT,
events.AFTER_UPDATE)
host_arg = {portbindings.HOST_ID: HOST}
- with self.port(device_owner=constants.DEVICE_OWNER_DVR_INTERFACE,
+ with self.port(is_admin=True,
+ device_owner=constants.DEVICE_OWNER_DVR_INTERFACE,
device_id=TEST_ROUTER_ID,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@@ -2113,7 +2123,8 @@ class TestMl2PortsV2WithL3(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST}
with mock.patch.object(l3plugin.l3_rpc_notifier,
'routers_updated_on_host') as mock_updated:
- with self.port(device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
+ with self.port(is_admin=True,
+ device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
device_id=TEST_ROUTER_ID,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@@ -2396,7 +2407,7 @@ class TestMl2DvrPortsV2(TestMl2PortsV2):
if floating_ip:
router_ids.add(ns_to_delete['router_id'])
- with self.port() as port, \
+ with self.port(is_admin=True) as port, \
mock.patch.object(registry, 'publish') as publish, \
mock.patch.object(self.l3plugin,
'disassociate_floatingips',
@@ -2439,7 +2450,8 @@ class TestMl2DvrPortsV2(TestMl2PortsV2):
def test_delete_port_with_floatingip_create_precommit_event(self):
fake_method = mock.Mock()
- with self.port(device_owner='network:floatingip') as port:
+ with self.port(is_admin=True,
+ device_owner='network:floatingip') as port:
try:
registry.subscribe(fake_method, resources.FLOATING_IP,
events.PRECOMMIT_DELETE)
@@ -2531,6 +2543,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
profile_arg = {portbindings.PROFILE: {'d': s}}
try:
with self.port(expected_res_status=400,
+ is_admin=True,
arg_list=(portbindings.PROFILE,),
**profile_arg):
pass
@@ -2540,15 +2553,17 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_remove_port_binding_profile(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
- with self.port(arg_list=(portbindings.PROFILE,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
profile_arg = {portbindings.PROFILE: None}
port = self._update('ports', port_id,
- {'port': profile_arg})['port']
+ {'port': profile_arg},
+ as_admin=True)['port']
self._check_port_binding_profile(port)
- port = self._show('ports', port_id)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_port_binding_profile(port)
def test_return_on_concurrent_delete_and_binding(self):
@@ -2741,15 +2756,17 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_port_binding_profile_not_changed(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
- with self.port(arg_list=(portbindings.PROFILE,),
+ with self.port(is_admin=True,
+ arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
state_arg = {'admin_state_up': True}
port = self._update('ports', port_id,
- {'port': state_arg})['port']
+ {'port': state_arg},
+ as_admin=True)['port']
self._check_port_binding_profile(port, profile)
- port = self._show('ports', port_id)['port']
+ port = self._show('ports', port_id, as_admin=True)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_host_id_none(self):
@@ -2882,8 +2899,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
super(TestMultiSegmentNetworks, self).setUp()
def test_allocate_dynamic_segment(self):
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@@ -2911,8 +2927,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234)
def test_allocate_dynamic_segment_multiple_physnets(self):
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@@ -2947,8 +2962,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: physnet_name}
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@@ -2997,8 +3011,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
self.assertEqual(1, len(allocs))
def test_allocate_release_dynamic_segment(self):
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@@ -3023,9 +3036,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1,
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
@@ -3036,9 +3048,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def test_fail_update_network_provider_attr(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'flat',
- pnet.PHYSICAL_NETWORK: 'physnet1',
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.PHYSICAL_NETWORK: 'physnet1'}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE])
@@ -3048,7 +3059,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'update_physnet1'}}
network_req = self.new_update_request('networks', data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertIn('NeutronError', network)
@@ -3060,9 +3072,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def test_update_network_provider_attr_no_change(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'flat',
- pnet.PHYSICAL_NETWORK: 'physnet1',
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.PHYSICAL_NETWORK: 'physnet1'}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE])
@@ -3072,7 +3083,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'physnet1'}}
network_req = self.new_update_request('networks', data,
- network['network']['id'])
+ network['network']['id'],
+ as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('updated-net1', network['network']['name'])
@@ -3082,9 +3094,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
mpnet_apidef.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1}],
- 'tenant_id': 'tenant_one'}}
- net_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}]}}
+ net_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
@@ -3092,7 +3103,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
self.assertNotIn(mpnet_apidef.SEGMENTS, network['network'])
# Tests get_network()
- net_req = self.new_show_request('networks', network['network']['id'])
+ net_req = self.new_show_request('networks', network['network']['id'],
+ as_admin=True)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
@@ -3107,9 +3119,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
- pnet.SEGMENTATION_ID: 2}],
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 2}]}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segments = network['network'][mpnet_apidef.SEGMENTS]
@@ -3121,7 +3132,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
segments[segment_index][field])
# Tests get_network()
- net_req = self.new_show_request('networks', network['network']['id'])
+ net_req = self.new_show_request('networks', network['network']['id'],
+ as_admin=True)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
segments = network['network'][mpnet_apidef.SEGMENTS]
for segment_index, segment in enumerate(data['network']
@@ -3154,9 +3166,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1}],
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}]}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
@@ -3166,11 +3177,10 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'},
{pnet.NETWORK_TYPE: 'vlan',
- pnet.PHYSICAL_NETWORK: 'physnet1'}],
- 'tenant_id': 'tenant_one'}}
+ pnet.PHYSICAL_NETWORK: 'physnet1'}]}}
retry_fixture = fixture.DBRetryErrorsFixture(max_retries=2)
retry_fixture.setUp()
- network_req = self.new_create_request('networks', data)
+ network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
@@ -3180,9 +3190,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1,
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
@@ -3214,9 +3223,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
- pnet.SEGMENTATION_ID: 1,
- 'tenant_id': 'tenant_one'}}
- network_req = self.new_create_request('networks', data)
+ pnet.SEGMENTATION_ID: 1}}
+ network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
@@ -3391,9 +3399,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'create_network_postcommit',
side_effect=(exc.InvalidInput(
error_message=err_msg))):
- tenant_id = uuidutils.generate_uuid()
- data = {'network': {'name': 'net1',
- 'tenant_id': tenant_id}}
+ data = {'network': {'name': 'net1'}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(400, res.status_int)
@@ -3402,7 +3408,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
error['NeutronError']['type'])
# Check the client can see the root cause of error.
self.assertIn(err_msg, error['NeutronError']['message'])
- query_params = "tenant_id=%s" % tenant_id
+ query_params = "tenant_id=%s" % self._tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
@@ -3414,8 +3420,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_network_postcommit') as dnp:
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
@@ -3439,8 +3444,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_network_postcommit') as unp:
- data = {'network': {'name': 'net1',
- 'tenant_id': 'tenant_one'}}
+ data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
@@ -3478,8 +3482,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'cidr': '10.0.20.0/24',
'ip_version': constants.IP_VERSION_4,
'name': 'subnet1',
- 'tenant_id':
- network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
@@ -3507,8 +3509,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'cidr': '10.0.20.0/24',
'ip_version': constants.IP_VERSION_4,
'name': 'subnet1',
- 'tenant_id':
- network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
@@ -3540,8 +3540,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'cidr': '10.0.20.0/24',
'ip_version': constants.IP_VERSION_4,
'name': 'subnet1',
- 'tenant_id':
- network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
@@ -3576,8 +3574,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with self.network() as network:
net_id = network['network']['id']
data = {'port': {'network_id': net_id,
- 'tenant_id':
- network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
@@ -3603,8 +3599,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
- 'tenant_id':
- network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
@@ -3652,8 +3646,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
subnet_id = subnet['subnet']['id']
data = {'port': {
'network_id': network['network']['id'],
- 'tenant_id':
- network['network']['tenant_id'],
'name': 'port1',
'device_owner':
constants.DEVICE_OWNER_DVR_INTERFACE,
@@ -3688,7 +3680,7 @@ class TestML2PluggableIPAM(test_ipam.UseIpamMixin, TestMl2SubnetsV2):
request.subnet_cidr = netaddr.IPNetwork(cidr)
request.allocation_pools = []
request.gateway_ip = netaddr.IPAddress(gateway_ip)
- request.tenant_id = uuidutils.generate_uuid()
+ request.tenant_id = self._tenant_id
ipam_subnet = mock.Mock()
ipam_subnet.get_details.return_value = request
@@ -3907,7 +3899,8 @@ class TestML2Segments(Ml2PluginV2TestCase):
driver_api.PHYSICAL_NETWORK: physical_network,
driver_api.SEGMENTATION_ID: segmentation_id}
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: network_segments})\
as test_network:
multisegment_network = test_network['network']
@@ -3939,7 +3932,8 @@ class TestML2Segments(Ml2PluginV2TestCase):
driver_api.PHYSICAL_NETWORK: physical_network,
driver_api.SEGMENTATION_ID: segmentation_id}
- with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
+ with self.network(as_admin=True,
+ **{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: network_segments})\
as test_network:
multisegment_network = test_network['network']
@@ -3965,7 +3959,7 @@ class TestML2Segments(Ml2PluginV2TestCase):
pnet.PHYSICAL_NETWORK: physical_network,
pnet.SEGMENTATION_ID: segmentation_id}
- with self.network() as test_network:
+ with self.network(as_admin=True) as test_network:
# network() implicitaly creates a single segment
single_segment_network = test_network['network']
observed_network = self.driver._build_original_network(
diff --git a/neutron/tests/unit/plugins/ml2/test_port_binding.py b/neutron/tests/unit/plugins/ml2/test_port_binding.py
index cf5db88eff..dcca1b6f62 100644
--- a/neutron/tests/unit/plugins/ml2/test_port_binding.py
+++ b/neutron/tests/unit/plugins/ml2/test_port_binding.py
@@ -78,7 +78,8 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
mac_address = 'aa:aa:aa:aa:aa:aa'
host_arg = {portbindings.HOST_ID: host,
'mac_address': mac_address}
- with self.port(name='name', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
self._check_response(port['port'], vif_type, has_port_filter,
bound, status)
@@ -152,12 +153,12 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
update_body = {'name': 'test_update'}
if new_host is not None:
update_body[portbindings.HOST_ID] = new_host
- with self.port(name='name', arg_list=(portbindings.HOST_ID,),
+ with self.port(name='name', is_admin=True,
+ arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
- neutron_context = context.get_admin_context()
updated_port = self._update('ports', port['port']['id'],
{'port': update_body},
- neutron_context=neutron_context)
+ as_admin=True)
port_data = updated_port['port']
if new_host is not None:
self.assertEqual(new_host,
@@ -190,7 +191,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
host_id = {portbindings.HOST_ID: 'host1'}
- with self.port(**host_id) as port:
+ with self.port(is_admin=True, **host_id) as port:
# Since the port is DOWN at first
# It's necessary to make its status ACTIVE for this test
plugin.update_port_status(ctx, port['port']['id'],
@@ -221,7 +222,8 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
def test_distributed_binding(self):
ctx = context.get_admin_context()
- with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
+ with self.port(is_admin=True,
+ device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
port_id = port['port']['id']
# Verify port's VIF type and status.
@@ -235,7 +237,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
'device_id': 'router1'}})
# Get port and verify VIF type and status unchanged.
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('DOWN', port['port']['status'])
@@ -247,7 +249,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
self.assertEqual('local', details['network_type'])
# Get port and verify VIF type and changed status.
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('BUILD', port['port']['status'])
@@ -258,7 +260,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
host='host-ovs-no_filter')
# Get port and verify VIF type and changed status.
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('ACTIVE', port['port']['status'])
@@ -269,7 +271,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
host='host-ovs-no_filter')
# Get port and verify VIF type and changed status.
- port = self._show('ports', port_id)
+ port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('DOWN', port['port']['status'])
@@ -382,7 +384,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
data = {'binding': kwargs}
binding_req = self.new_update_request('ports', data, port_id, fmt,
subresource='bindings',
- sub_id=host)
+ sub_id=host,
+ as_admin=True)
return binding_req.get_response(self.api)
def _do_update_port_binding(self, fmt, port_id, host, **kwargs):
@@ -457,7 +460,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
def test_create_duplicate_port_binding(self):
device_owner = '%s%s' % (const.DEVICE_OWNER_COMPUTE_PREFIX, 'nova')
host_arg = {portbindings.HOST_ID: self.host}
- with self.port(device_owner=device_owner,
+ with self.port(is_admin=True,
+ device_owner=device_owner,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
response = self._create_port_binding(self.fmt, port['port']['id'],
@@ -540,7 +544,7 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
active_binding = self._activate_port_binding(
port['id'], self.host, raw_response=False)
self._assert_bound_port_binding(active_binding)
- updated_port = self._show('ports', port['id'])['port']
+ updated_port = self._show('ports', port['id'], as_admin=True)['port']
updated_bound_drivers = updated_port[portbindings.VIF_DETAILS].pop(
portbindings.VIF_DETAILS_BOUND_DRIVERS)
self.assertEqual({'0': 'test'}, updated_bound_drivers)
@@ -711,7 +715,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
with mock.patch.object(
mechanism_test.TestMechanismDriver, '_check_port_context'
):
- req = self.new_update_request('ports', update_body, port_id)
+ req = self.new_update_request('ports', update_body, port_id,
+ as_admin=True)
self.assertEqual(200, req.get_response(self.api).status_int)
def test_bind_non_pf_port_with_mac_port_not_updated(self):
@@ -851,7 +856,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
with mock.patch.object(
mechanism_test.TestMechanismDriver, '_check_port_context'
):
- req = self.new_update_request('ports', update_body, port['id'])
+ req = self.new_update_request('ports', update_body, port['id'],
+ as_admin=True)
self.assertEqual(200, req.get_response(self.api).status_int)
# Neutron expected to reset the MAC to a generated one so that the
diff --git a/neutron/tests/unit/plugins/ml2/test_security_group.py b/neutron/tests/unit/plugins/ml2/test_security_group.py
index 3de28b6294..494059abe5 100644
--- a/neutron/tests/unit/plugins/ml2/test_security_group.py
+++ b/neutron/tests/unit/plugins/ml2/test_security_group.py
@@ -159,7 +159,8 @@ class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase,
self.assertFalse(self.was_active)
self._delete(
'security-groups',
- self._list('security-groups')['security_groups'][0]['id'])
+ self._list('security-groups')['security_groups'][0]['id'],
+ as_admin=True)
with self.port(subnet=s):
self.assertFalse(self.was_active)
diff --git a/neutron/tests/unit/plugins/ml2/test_tracked_resources.py b/neutron/tests/unit/plugins/ml2/test_tracked_resources.py
index 264dd8e908..5d5d1c1dd5 100644
--- a/neutron/tests/unit/plugins/ml2/test_tracked_resources.py
+++ b/neutron/tests/unit/plugins/ml2/test_tracked_resources.py
@@ -233,9 +233,8 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_networks_clears_dirty(self):
self._test_init('network')
- net = self._make_network('json', 'meh', True)['network']
- self.ctx.project_id = net['project_id']
- self._list('networks', neutron_context=self.ctx)
+ self._make_network('json', 'meh', True)['network']
+ self._list('networks', as_admin=True)
self._verify_dirty_bit('network', expected_value=False)
def test_create_delete_port_marks_dirty(self):
@@ -252,9 +251,8 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_ports_clears_dirty(self):
self._test_init('port')
net = self._make_network('json', 'meh', True)['network']
- port = self._make_port('json', net['id'])['port']
- self.ctx.project_id = port['project_id']
- self._list('ports', neutron_context=self.ctx)
+ self._make_port('json', net['id'])['port']
+ self._list('ports', as_admin=True)
self._verify_dirty_bit('port', expected_value=False)
def test_create_delete_subnet_marks_dirty(self):
@@ -286,17 +284,14 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_subnets_clears_dirty(self):
self._test_init('subnet')
net = self._make_network('json', 'meh', True)
- subnet = self._make_subnet('json', net, '10.0.0.1',
- '10.0.0.0/24')['subnet']
- self.ctx.project_id = subnet['project_id']
- self._list('subnets', neutron_context=self.ctx)
+ self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet']
+ self._list('subnets', as_admin=True)
self._verify_dirty_bit('subnet', expected_value=False)
def test_create_delete_subnetpool_marks_dirty(self):
self._test_init('subnetpool')
pool = self._make_subnetpool('json', ['10.0.0.0/8'],
- name='meh',
- tenant_id=self._project_id)['subnetpool']
+ name='meh')['subnetpool']
self._verify_dirty_bit('subnetpool')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
@@ -306,17 +301,14 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_subnetpools_clears_dirty(self):
self._test_init('subnetpool')
- pool = self._make_subnetpool('json', ['10.0.0.0/8'],
- name='meh',
- tenant_id=self._project_id)['subnetpool']
- self.ctx.project_id = pool['project_id']
- self._list('subnetpools', neutron_context=self.ctx)
+ self._make_subnetpool('json', ['10.0.0.0/8'], name='meh')['subnetpool']
+ self._list('subnetpools', as_admin=True)
self._verify_dirty_bit('subnetpool', expected_value=False)
def test_create_delete_securitygroup_marks_dirty(self):
self._test_init('security_group')
sec_group = self._make_security_group(
- 'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
+ 'json', 'meh', 'meh')['security_group']
self._verify_dirty_bit('security_group')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
@@ -327,17 +319,16 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_securitygroups_clears_dirty(self):
self._test_init('security_group')
self._make_security_group(
- 'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
- self.ctx.project_id = self._project_id
- self._list('security-groups', neutron_context=self.ctx)
+ 'json', 'meh', 'meh',)['security_group']
+ self._list('security-groups', as_admin=True)
self._verify_dirty_bit('security_group', expected_value=False)
def test_create_delete_securitygrouprule_marks_dirty(self):
self._test_init('security_group_rule')
sec_group = self._make_security_group(
- 'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
+ 'json', 'meh', 'meh')['security_group']
rule_req = self._build_security_group_rule(
- sec_group['id'], 'ingress', 'TCP', tenant_id=self._project_id)
+ sec_group['id'], 'ingress', 'TCP')
sec_group_rule = self._make_security_group_rule(
'json', rule_req)['security_group_rule']
self._verify_dirty_bit('security_group_rule')
@@ -349,10 +340,8 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_securitygrouprules_clears_dirty(self):
self._test_init('security_group_rule')
- self._make_security_group(
- 'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
+ self._make_security_group('json', 'meh', 'meh')['security_group']
# As the security group create operation also creates 2 security group
# rules there is no need to explicitly create any rule
- self.ctx.project_id = self._project_id
- self._list('security-group-rules', neutron_context=self.ctx)
+ self._list('security-group-rules', as_admin=True)
self._verify_dirty_bit('security_group_rule', expected_value=False)
diff --git a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py
index d5788adf5b..db41501ef9 100644
--- a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py
+++ b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py
@@ -209,13 +209,13 @@ class L3SchedulerBaseMixin(object):
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
- fmt=None, tenant_id=uuidutils.generate_uuid(),
+ fmt=None, tenant_id=None,
external_gateway_info=None,
- subnet=None, set_context=False,
- **kwargs):
+ subnet=None, **kwargs):
+ tenant_id = tenant_id or self._tenant_id
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
- set_context, **kwargs)
+ **kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
@@ -1380,6 +1380,7 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
subnet_ids = []
subnet_ids.append(subnet['subnet']['id'])
with self.port(subnet=subnet,
+ is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=('admin_state_up',
portbindings.PROFILE,), **host_args):
@@ -1640,6 +1641,8 @@ class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
result = self.plugin.get_routers_l3_agents_count(self.adminContext)
+ result += self.plugin.get_routers_l3_agents_count(
+ self.adminContext, ha=True)
self.assertEqual(3, len(result))
check_result = [(router['id'], agents) for router, agents in result]
@@ -1647,6 +1650,29 @@ class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
self.assertIn((router2['id'], 2), check_result)
self.assertIn((router3['id'], 0), check_result)
+ result = self.plugin.get_routers_l3_agents_count(self.adminContext,
+ ha=True, less_than=3)
+ check_result = [(router['id'], agents) for router, agents in result]
+ self.assertIn((router2['id'], 2), check_result)
+
+ def test_get_routers_not_ha_l3_agents_count(self):
+ router1 = self._create_ha_router(ha=False)
+ router2 = self._create_ha_router(ha=False)
+ self.plugin.schedule_router(self.adminContext, router1['id'],
+ candidates=[self.agent1])
+ result = self.plugin.get_routers_l3_agents_count(self.adminContext)
+
+ self.assertEqual(2, len(result))
+ check_result = [(router['id'], agents) for router, agents in result]
+ self.assertIn((router1['id'], 1), check_result)
+ self.assertIn((router2['id'], 0), check_result)
+
+ result = self.plugin.get_routers_l3_agents_count(self.adminContext,
+ less_than=1)
+ check_result = [(router['id'], agents) for router, agents in result]
+ self.assertIn((router2['id'], 0), check_result)
+ self.assertNotIn((router1['id'], 1), check_result)
+
def test_get_ordered_l3_agents_by_num_routers(self):
# Mock scheduling so that the test can control it explicitly
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
diff --git a/neutron/tests/unit/services/metering/test_metering_plugin.py b/neutron/tests/unit/services/metering/test_metering_plugin.py
index c83d21a94e..33961a20c6 100644
--- a/neutron/tests/unit/services/metering/test_metering_plugin.py
+++ b/neutron/tests/unit/services/metering/test_metering_plugin.py
@@ -17,7 +17,6 @@ from unittest import mock
from neutron_lib.agent import topics
from neutron_lib.api.definitions import metering as metering_apidef
from neutron_lib import context
-from neutron_lib.db import api as db_api
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.tests import tools
@@ -60,20 +59,6 @@ class MeteringTestExtensionManager(object):
return []
-# TODO(akamyshnikova):we need this temporary FakeContext class while Context
-# checking for existence of session attribute.
-class FakeContext(context.ContextBaseWithSession):
- def __init__(self, *args, **kwargs):
- super(FakeContext, self).__init__(*args, **kwargs)
- self._session = None
-
- @property
- def session(self):
- if self._session is None:
- self._session = db_api.get_writer_session()
- return self._session
-
-
class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
test_metering_db.MeteringPluginDbTestCaseMixin):
@@ -97,11 +82,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
- self.project_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
- self.ctx = FakeContext('', self.project_id, is_admin=True)
- self.context_patch = mock.patch('neutron_lib.context.Context',
- return_value=self.ctx)
- self.mock_context = self.context_patch.start()
+ self.ctx = context.Context('', self._tenant_id).elevated()
self.topic = topics.METERING_AGENT
@@ -159,7 +140,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
@@ -171,11 +152,9 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
with self.router(name='router2', tenant_id=tenant_id_2,
set_context=True):
self.mock_uuid.return_value = self.uuid
- with self.router(name='router1', tenant_id=self.project_id,
- set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True):
- self.mock_add.assert_called_with(self.ctx, expected)
+ with self.router(name='router1'):
+ with self.metering_label():
+ self.mock_add.assert_called_with(mock.ANY, expected)
def test_add_metering_label_shared_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
@@ -184,7 +163,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
@@ -195,14 +174,11 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
- with self.router(name='router1', tenant_id=self.project_id,
- shared=True, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True):
+ with self.router(name='router1', shared=True):
+ with self.metering_label():
self.mock_uuid.return_value = second_uuid
- with self.metering_label(tenant_id=tenant_id_2, shared=True,
- set_context=True):
- self.mock_add.assert_called_with(self.ctx, expected)
+ with self.metering_label(tenant_id=tenant_id_2, shared=True):
+ self.mock_add.assert_called_with(mock.ANY, expected)
def test_remove_metering_label_rpc_call(self):
expected = [{'status': 'ACTIVE',
@@ -210,20 +186,20 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
- self.mock_add.assert_called_with(self.ctx, expected)
+ with self.router():
+ with self.metering_label() as label:
+ self.mock_add.assert_called_with(mock.ANY, expected)
self._delete('metering-labels',
- label['metering_label']['id'])
- self.mock_remove.assert_called_with(self.ctx, expected)
+ label['metering_label']['id'],
+ as_admin=True)
+ self.mock_remove.assert_called_with(mock.ANY, expected)
def test_remove_one_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
@@ -232,7 +208,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
@@ -246,23 +222,22 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True):
+ with self.router():
+ with self.metering_label():
self.mock_uuid.return_value = second_uuid
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
- self.mock_add.assert_called_with(self.ctx, expected_add)
+ with self.metering_label() as label:
+ self.mock_add.assert_called_with(mock.ANY, expected_add)
self._delete('metering-labels',
- label['metering_label']['id'])
- self.mock_remove.assert_called_with(self.ctx, expected_remove)
+ label['metering_label']['id'],
+ as_admin=True)
+ self.mock_remove.assert_called_with(mock.ANY, expected_remove)
def test_add_and_remove_metering_label_rule_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
@@ -271,7 +246,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix':
@@ -291,7 +266,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix':
@@ -307,16 +282,16 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
remote_ip_prefix = {'remote_ip_prefix': '10.0.0.0/24'}
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'], **remote_ip_prefix):
- self.mock_add_rule.assert_called_with(self.ctx,
+ self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
- self._delete('metering-label-rules', second_uuid)
- self.mock_remove_rule.assert_called_with(self.ctx,
+ self._delete('metering-label-rules', second_uuid,
+ as_admin=True)
+ self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_source_ip_only(self):
@@ -326,7 +301,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'source_ip_prefix':
@@ -346,7 +321,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'source_ip_prefix':
@@ -362,17 +337,17 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
source_ip_prefix = {'source_ip_prefix': '10.0.0.0/24'}
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**source_ip_prefix):
- self.mock_add_rule.assert_called_with(self.ctx,
+ self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
- self._delete('metering-label-rules', second_uuid)
- self.mock_remove_rule.assert_called_with(self.ctx,
+ self._delete('metering-label-rules', second_uuid,
+ as_admin=True)
+ self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_dest_ip_only(self):
@@ -382,7 +357,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@@ -402,7 +377,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@@ -418,17 +393,17 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
source_ip_prefix = {'destination_ip_prefix': '10.0.0.0/24'}
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**source_ip_prefix):
- self.mock_add_rule.assert_called_with(self.ctx,
+ self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
- self._delete('metering-label-rules', second_uuid)
- self.mock_remove_rule.assert_called_with(self.ctx,
+ self._delete('metering-label-rules', second_uuid,
+ as_admin=True)
+ self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_src_and_dest_ip_only(self):
@@ -438,7 +413,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@@ -459,7 +434,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@@ -477,23 +452,22 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
ip_prefixes = {'source_ip_prefix': '10.0.0.0/24',
'destination_ip_prefix': '0.0.0.0/0'}
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**ip_prefixes):
- self.mock_add_rule.assert_called_with(self.ctx,
+ self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
- self._delete('metering-label-rules', second_uuid)
- self.mock_remove_rule.assert_called_with(self.ctx,
+ self._delete('metering-label-rules', second_uuid,
+ as_admin=True)
+ self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_src_and_remote_ip(self):
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
@@ -514,9 +488,8 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
"NeutronError"]["message"])
def test_add_and_remove_metering_label_rule_dest_and_remote_ip(self):
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
@@ -537,9 +510,8 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
"NeutronError"]["message"])
def test_add_and_remove_metering_label_rule_no_ip_prefix_entered(self):
- with self.router(tenant_id=self.project_id, set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True) as label:
+ with self.router():
+ with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
@@ -567,12 +539,15 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
# 1b9e9a6c2ccf7f9bc06429f53e5126f356ae3d4a/neutron/api/v2/base.py#L563
self.ctx.GUARD_TRANSACTION = False
with self.metering_label(tenant_id=tenant_id) as metering_label:
- with self.router(tenant_id=tenant_id, set_context=True) as r:
- router = self._show('routers', r['router']['id'])
+ with self.router(tenant_id=tenant_id) as r:
+ router = self._show('routers', r['router']['id'],
+ tenant_id=tenant_id)
self.assertEqual(tenant_id, router['router']['tenant_id'])
metering_label_id = metering_label['metering_label']['id']
- self._delete('metering-labels', metering_label_id, 204)
- router = self._show('routers', r['router']['id'])
+ self._delete('metering-labels', metering_label_id, 204,
+ as_admin=True)
+ router = self._show('routers', r['router']['id'],
+ tenant_id=tenant_id)
self.assertEqual(tenant_id, router['router']['tenant_id'])
@@ -609,11 +584,7 @@ class TestMeteringPluginL3AgentScheduler(
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
- self.project_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
- self.ctx = FakeContext('', self.project_id, is_admin=True)
- self.context_patch = mock.patch('neutron_lib.context.Context',
- return_value=self.ctx)
- self.mock_context = self.context_patch.start()
+ self.ctx = context.Context('', self._tenant_id).elevated()
self.l3routers_patch = mock.patch(scheduler +
'.get_l3_agents_hosting_routers')
@@ -640,7 +611,7 @@ class TestMeteringPluginL3AgentScheduler(
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
@@ -651,7 +622,7 @@ class TestMeteringPluginL3AgentScheduler(
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
- 'project_id': self.project_id,
+ 'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
@@ -670,15 +641,12 @@ class TestMeteringPluginL3AgentScheduler(
self.l3routers_mock.side_effect = side_effect
- with self.router(name='router1', tenant_id=self.project_id,
- set_context=True):
+ with self.router(name='router1'):
self.mock_uuid.return_value = second_uuid
- with self.router(name='router2', tenant_id=self.project_id,
- set_context=True):
- with self.metering_label(tenant_id=self.project_id,
- set_context=True):
+ with self.router(name='router2'):
+ with self.metering_label():
self.mock_add.assert_called_with(
- self.ctx, tools.UnorderedList(expected))
+ mock.ANY, tools.UnorderedList(expected))
class TestMeteringPluginL3AgentSchedulerServicePlugin(
@@ -727,7 +695,6 @@ class TestMeteringPluginRpcFromL3Agent(
self.meter_plugin = directory.get_plugin(constants.METERING)
- self.tenant_id = 'admin_tenant_id'
self.tenant_id_1 = 'tenant_id_1'
self.tenant_id_2 = 'tenant_id_2'
@@ -759,8 +726,7 @@ class TestMeteringPluginRpcFromL3Agent(
def test_get_sync_data_metering_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
- with self.metering_label(tenant_id=self.tenant_id,
- shared=True):
+ with self.metering_label(shared=True):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
@@ -773,7 +739,7 @@ class TestMeteringPluginRpcFromL3Agent(
def test_get_sync_data_metering_not_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
- with self.metering_label(tenant_id=self.tenant_id):
+ with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
@@ -786,13 +752,11 @@ class TestMeteringPluginRpcFromL3Agent(
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
- with self.router(
- name='router1', tenant_id=self.tenant_id
- ) as router1:
+ with self.router(name='router1') as router1:
self._add_external_gateway_to_router(
router1['router']['id'], s['network_id'])
- with self.router(name='router2', tenant_id=self.tenant_id):
- with self.metering_label(tenant_id=self.tenant_id):
+ with self.router(name='router2'):
+ with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(
@@ -807,18 +771,15 @@ class TestMeteringPluginRpcFromL3Agent(
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
- with self.router(
- name='router1', tenant_id=self.tenant_id
- ) as router1:
+ with self.router(name='router1') as router1:
self._add_external_gateway_to_router(
router1['router']['id'], s['network_id'])
with self.router(
- name='router2', tenant_id=self.tenant_id,
- admin_state_up=False
+ name='router2', admin_state_up=False
) as router2:
self._add_external_gateway_to_router(
router2['router']['id'], s['network_id'])
- with self.metering_label(tenant_id=self.tenant_id):
+ with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(
diff --git a/neutron/tests/unit/services/ovn_l3/test_plugin.py b/neutron/tests/unit/services/ovn_l3/test_plugin.py
index 29a81f22d7..a4f1512555 100644
--- a/neutron/tests/unit/services/ovn_l3/test_plugin.py
+++ b/neutron/tests/unit/services/ovn_l3/test_plugin.py
@@ -434,8 +434,6 @@ class TestOVNL3RouterPlugin(test_mech_driver.Ml2PluginV2TestCase):
{'router': updated_data})
self.l3_inst._nb_ovn.update_lrouter.assert_called_once_with(
'neutron-router-id', enabled=True, external_ids={
- ovn_const.OVN_GW_PORT_EXT_ID_KEY: '',
- ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: '',
ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1',
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router',
ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ''})
@@ -456,8 +454,6 @@ class TestOVNL3RouterPlugin(test_mech_driver.Ml2PluginV2TestCase):
'neutron-router-id', enabled=False,
external_ids={ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'test',
ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1',
- ovn_const.OVN_GW_PORT_EXT_ID_KEY: '',
- ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: '',
ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ''})
@mock.patch.object(utils, 'get_lrouter_non_gw_routes')
@@ -551,8 +547,6 @@ class TestOVNL3RouterPlugin(test_mech_driver.Ml2PluginV2TestCase):
external_ids = {ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: 'router',
ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1',
- ovn_const.OVN_GW_PORT_EXT_ID_KEY: 'gw-port-id',
- ovn_const.OVN_GW_NETWORK_EXT_ID_KEY: 'ext-network-id',
ovn_const.OVN_AZ_HINTS_EXT_ID_KEY: ''}
self.l3_inst._nb_ovn.create_lrouter.assert_called_once_with(
'neutron-router-id', external_ids=external_ids,
diff --git a/neutron/tests/unit/services/qos/test_qos_plugin.py b/neutron/tests/unit/services/qos/test_qos_plugin.py
index 875712f172..ecdea9987f 100644
--- a/neutron/tests/unit/services/qos/test_qos_plugin.py
+++ b/neutron/tests/unit/services/qos/test_qos_plugin.py
@@ -1927,7 +1927,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'alias_%s_rule' % rule_type: kwargs}
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
- request = self.new_update_request(resource, data, rule_id, self.fmt)
+ request = self.new_update_request(resource, data, rule_id, self.fmt,
+ as_admin=True)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -1936,7 +1937,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _show_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
- request = self.new_show_request(resource, rule_id, self.fmt)
+ request = self.new_show_request(resource, rule_id, self.fmt,
+ as_admin=True)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -1945,7 +1947,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _delete_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
- request = self.new_delete_request(resource, rule_id, self.fmt)
+ request = self.new_delete_request(resource, rule_id, self.fmt,
+ as_admin=True)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@@ -2014,7 +2017,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
return_value=None):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
- request = self.new_show_request(resource, rule_id, self.fmt)
+ request = self.new_show_request(resource, rule_id, self.fmt,
+ as_admin=True)
res = request.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
diff --git a/neutron/tests/unit/services/revisions/test_revision_plugin.py b/neutron/tests/unit/services/revisions/test_revision_plugin.py
index 77bd869ae9..8f5ae05916 100644
--- a/neutron/tests/unit/services/revisions/test_revision_plugin.py
+++ b/neutron/tests/unit/services/revisions/test_revision_plugin.py
@@ -97,7 +97,7 @@ class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
# with the flush process that occurs with these two connected objects,
# creating two copies of the Network object in the Session and putting
# it into an invalid state.
- with self.network(shared=True):
+ with self.network(shared=True, as_admin=True):
pass
def test_port_name_update_revises(self):
@@ -279,7 +279,8 @@ class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'port': {'qos_policy_id': qos_obj['id']}}
- response = self._update('ports', port['port']['id'], data)
+ response = self._update('ports', port['port']['id'], data,
+ as_admin=True)
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
@@ -292,7 +293,8 @@ class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'network': {'qos_policy_id': qos_obj['id']}}
- response = self._update('networks', network['network']['id'], data)
+ response = self._update('networks', network['network']['id'], data,
+ as_admin=True)
new_rev = response['network']['revision_number']
self.assertGreater(new_rev, rev)
diff --git a/plugin.spec b/plugin.spec
index 2892869ab0..8459f4da87 100644
--- a/plugin.spec
+++ b/plugin.spec
@@ -59,7 +59,7 @@ subparsers:
- train
install_from_package:
type: Bool
- help: Install python-neutron-ovn-migration-tool rpm
+ help: Install openstack-neutron-ovn-migration-tool rpm
default: True
dvr:
diff --git a/releasenotes/notes/bug-1953165-6e848ea2c0398f56.yaml b/releasenotes/notes/bug-1953165-6e848ea2c0398f56.yaml
new file mode 100644
index 0000000000..6c79c0daef
--- /dev/null
+++ b/releasenotes/notes/bug-1953165-6e848ea2c0398f56.yaml
@@ -0,0 +1,16 @@
+---
+issues:
+ - |
+ The high availability of metadata service on isolated networks is limited
+ or non-existent. IPv4 metadata is redundant when the DHCP agent managing
+ it is redundant, but recovery is tied to the renewal of the DHCP lease,
+ making most recoveries very slow. IPv6 metadata is not redundant at all
+ as the IPv6 metadata address can only be configured in a single place at
+ a time as it is link-local. Multiple agents trying to configure it will
+ generate an IPv6 duplicate address detection failure.
+
+ Administrators may observe the IPv6 metadata address in "dadfailed" state
+ in the DHCP namespace for this reason, which is only an indication it is
+ not highly available. Until a redesign is made to the isolated metadata
+ service there is not a better deployment option. See `bug 1953165
+ <https://bugs.launchpad.net/neutron/+bug/1953165>`_ for information.
diff --git a/releasenotes/notes/enable-enforce-scope-and-new-defaults-1f82a9eb71125f5d.yaml b/releasenotes/notes/enable-enforce-scope-and-new-defaults-1f82a9eb71125f5d.yaml
new file mode 100644
index 0000000000..5ca899343e
--- /dev/null
+++ b/releasenotes/notes/enable-enforce-scope-and-new-defaults-1f82a9eb71125f5d.yaml
@@ -0,0 +1,25 @@
+---
+upgrade:
+ - |
+ The Neutron service enable the API policies (RBAC) new defaults and scope
+ by default. The Default value of config options
+ ``[oslo_policy] enforce_scope`` and
+ ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed
+ to ``True``.
+
+ This means if you are using system scope token to access Neutron API then
+ the request will be failed with 403 error code. Also, new defaults will be
+ enforced by default. To know about the new defaults of each policy
+ rule, refer to the `Policy New Defaults`_. For more detail about
+ the Neutron API policies changes, refer to `Policy Concepts`_.
+
+ If you want to disable them then modify the below config options value in
+ ``neutron.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=False
+ enforce_scope=False
+
+ .. _`Policy New Defaults`: https://docs.openstack.org/neutron/latest/configuration/policy.html
+ .. _`Policy Concepts`: https://docs.openstack.org/neutron/latest/contributor/internals/policy.html
+
diff --git a/releasenotes/notes/localnet-learn-fdb-22469280b49701fc.yaml b/releasenotes/notes/localnet-learn-fdb-22469280b49701fc.yaml
new file mode 100644
index 0000000000..9af9ca48be
--- /dev/null
+++ b/releasenotes/notes/localnet-learn-fdb-22469280b49701fc.yaml
@@ -0,0 +1,23 @@
+---
+issues:
+ - |
+ In OVN 22.09 the option "localnet_learn_fdb" was added, enabling localnet
+ ports to learn MAC addresses and store them at the FDB table.
+ There is no aging mechanism for those MACs (that is the reason for not
+ having this option enabled by default) and therefore it needs to be used
+ with care, specially when provider networks are big. It is recommended to
+ perform periodic manual cleanups of FDB table, to avoid scalability
+ issues -- until OVN implements an aging mechanism for this, tracked at
+ https://bugzilla.redhat.com/show_bug.cgi?id=2179942.
+fixes:
+ - |
+ By default localnet ports don't learn MAC addresses and therefore they are
+ not stored in the FDB table at OVN SB DB. This leads to flooding issues
+ when the destination traffic is an unknown IP by OpenStack. In OVN 22.09
+ the option "localnet_learn_fdb" was added, enabling those ports to learn
+ MAC addresses and store them at the FDB table. Note there is no aging
+ mechanism for those MACs, thus this is not enabled by default and needs
+ to be used carefully, specially when provider networks are big, and/or
+ performing manual cleanup of FDB table over time to avoid scalability
+ issues, until OVN implements it at
+ https://bugzilla.redhat.com/show_bug.cgi?id=2179942.
diff --git a/releasenotes/notes/ovn-recreate-metadata-port-76e2c0e651267aa0.yaml b/releasenotes/notes/ovn-recreate-metadata-port-76e2c0e651267aa0.yaml
new file mode 100644
index 0000000000..dfe077945b
--- /dev/null
+++ b/releasenotes/notes/ovn-recreate-metadata-port-76e2c0e651267aa0.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ Fix an issue in the OVN driver where network metadata could
+ become unavailable if the metadata port was ever deleted, even
+ if accidental. To re-create the port, a user can now disable,
+ then enable, DHCP for one of the subnets associated with the
+ network using the Neutron API. This will try and create the
+ port, similar to what happens in the DHCP agent for ML2/OVS.
+ For more information, see bug `2015377
+ <https://bugs.launchpad.net/ubuntu/+source/neutron/+bug/2015377>`_.
diff --git a/releasenotes/releasenotes/notes/network_subnet_mtu_validation-c221f22efcfae927.yaml b/releasenotes/releasenotes/notes/network_subnet_mtu_validation-c221f22efcfae927.yaml
new file mode 100644
index 0000000000..f30421c61a
--- /dev/null
+++ b/releasenotes/releasenotes/notes/network_subnet_mtu_validation-c221f22efcfae927.yaml
@@ -0,0 +1,22 @@
+---
+fixes:
+ - |
+ The Neutron API has been changed to validate network MTU minimums.
+ A network's MTU is now only valid if it is the minimum value
+ allowed based on the IP version of the associated subnets,
+ 68 for IPv4 and 1280 for IPv6.
+
+ This minimum is now enforced in the following ways:
+
+ * When a subnet is associated with a network, validate
+ the MTU is large enough for the IP version. Not only
+ would the subnet be unusable if it was allowed, but the
+ Linux kernel can fail adding addresses and configuring
+ network settings like the MTU.
+
+ * When a network MTU is changed, validate the MTU is large
+ enough for any currently associated subnets. Allowing a
+ smaller MTU would render any existing subnets unusable.
+
+ See bug `1988069 <https://bugs.launchpad.net/neutron/+bug/1988069>`_
+ for more information.
diff --git a/tools/ovn_migration/infrared/tripleo-ovn-migration/README.rst b/tools/ovn_migration/infrared/tripleo-ovn-migration/README.rst
index 1f1d04dbf5..ebbd875605 100644
--- a/tools/ovn_migration/infrared/tripleo-ovn-migration/README.rst
+++ b/tools/ovn_migration/infrared/tripleo-ovn-migration/README.rst
@@ -7,11 +7,11 @@ See http://infrared.readthedocs.io/en/stable/index.html for more information.
Before using this plugin, first deploy an ML2/OVS overcloud and then:
-1. On your undercloud, install python-neutron-ovn-migration-tool package (https://trunk.rdoproject.org/centos7-master/current/)
- You also need to install python-neutron and python3-openvswitch packages.
+1. On your undercloud, install openstack-neutron-ovn-migration-tool package (https://trunk.rdoproject.org/centos9-master/component/network/current/)
+ You also need to install python3-neutron and python3-openvswitch packages.
2. Run ::
- $infrared plugin add "https://github.com/openstack/neutron.git"
+ $infrared plugin add "https://opendev.org/openstack/neutron.git"
3. Start migration by running::
diff --git a/tools/ovn_migration/infrared/tripleo-ovn-migration/main.yml b/tools/ovn_migration/infrared/tripleo-ovn-migration/main.yml
index e4424529ee..881ae6d22c 100644
--- a/tools/ovn_migration/infrared/tripleo-ovn-migration/main.yml
+++ b/tools/ovn_migration/infrared/tripleo-ovn-migration/main.yml
@@ -7,7 +7,7 @@
yum:
name:
- python3-virtualenv
- - python3-neutron-ovn-migration-tool
+ - openstack-neutron-ovn-migration-tool
state: present
- name: Set host_key_checking to False in ansible.cfg
diff --git a/tox.ini b/tox.ini
index 34eed72955..7029bfc238 100644
--- a/tox.ini
+++ b/tox.ini
@@ -79,8 +79,8 @@ setenv = {[testenv:dsvm-functional]setenv}
deps = {[testenv:dsvm-functional]deps}
commands =
bash {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin
- stestr run --exclude-regex (.*MySQL\.|.*PostgreSQL\.|.*test_get_all_devices|.*TestMetadataAgent\.) {posargs}
- stestr run --combine --concurrency 1 (.*MySQL\.|.*PostgreSQL\.|.*test_get_all_devices|.*TestMetadataAgent\.) {posargs}
+ stestr run --slowest --exclude-regex (.*MySQL\.|.*PostgreSQL\.|.*test_get_all_devices|.*TestMetadataAgent\.) {posargs}
+ stestr run --slowest --combine --concurrency 1 (.*MySQL\.|.*PostgreSQL\.|.*test_get_all_devices|.*TestMetadataAgent\.) {posargs}
[testenv:dsvm-fullstack]
setenv = {[testenv]setenv}
@@ -105,8 +105,8 @@ deps = {[testenv:dsvm-fullstack]deps}
commands =
bash {toxinidir}/tools/generate_dhclient_script_for_fullstack.sh {envdir}
bash {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin
- stestr run --concurrency 2 --exclude-regex neutron.tests.fullstack.test_securitygroup.TestSecurityGroupsSameNetwork.test_securitygroup {posargs}
- stestr run --combine --concurrency 1 neutron.tests.fullstack.test_securitygroup.TestSecurityGroupsSameNetwork.test_securitygroup {posargs}
+ stestr run --slowest --concurrency 2 --exclude-regex neutron.tests.fullstack.test_securitygroup.TestSecurityGroupsSameNetwork.test_securitygroup {posargs}
+ stestr run --slowest --combine --concurrency 1 neutron.tests.fullstack.test_securitygroup.TestSecurityGroupsSameNetwork.test_securitygroup {posargs}
[testenv:releasenotes]
envdir = {toxworkdir}/docs
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
index 63d486645a..073ea3218f 100644
--- a/zuul.d/base.yaml
+++ b/zuul.d/base.yaml
@@ -117,12 +117,14 @@
- job:
name: neutron-fullstack-with-uwsgi-with-neutron-lib-master
+ branches: ^master$
parent: neutron-fullstack-with-uwsgi
required-projects:
- openstack/neutron-lib
- job:
name: neutron-fullstack-with-pyroute2-master
+ branches: ^master$
parent: neutron-fullstack
required-projects:
- name: github.com/svinota/pyroute2
@@ -142,6 +144,7 @@
- job:
name: neutron-functional-with-uwsgi-with-neutron-lib-master
+ branches: ^master$
parent: neutron-functional-with-uwsgi
required-projects:
- openstack/neutron-lib
@@ -166,12 +169,14 @@
- job:
name: neutron-functional-with-pyroute2-master
+ branches: ^master$
parent: neutron-functional
required-projects:
- name: github.com/svinota/pyroute2
- job:
name: neutron-functional-with-oslo-master
+ branches: ^master$
parent: neutron-functional
description: |
This job installs all oslo libraries from source and executes the
@@ -212,12 +217,15 @@
- job:
name: neutron-functional-with-sqlalchemy-master
+ branches: ^master$
parent: neutron-functional
required-projects:
- name: github.com/sqlalchemy/sqlalchemy
override-checkout: main
- openstack/oslo.db
- openstack/neutron-lib
+ - name: github.com/sqlalchemy/alembic
+ override-checkout: main
- job:
name: neutron-fullstack-with-uwsgi-fips
parent: neutron-fullstack-with-uwsgi
diff --git a/zuul.d/grenade.yaml b/zuul.d/grenade.yaml
index 921726724f..0c35f846c3 100644
--- a/zuul.d/grenade.yaml
+++ b/zuul.d/grenade.yaml
@@ -293,6 +293,8 @@
zuul_copy_output:
'{{ devstack_base_dir }}/data/ovs': 'logs'
'{{ devstack_base_dir }}/data/ovn': 'logs'
+ '/opt/stack/old/logs': 'logs'
+ '/opt/stack/new/logs': 'logs'
extensions_to_txt:
db: true
devstack_services:
diff --git a/zuul.d/job-templates.yaml b/zuul.d/job-templates.yaml
index dbea01ec9e..c092cabc4b 100644
--- a/zuul.d/job-templates.yaml
+++ b/zuul.d/job-templates.yaml
@@ -23,6 +23,9 @@
- ^roles/.*$
- ^rally-jobs/.*$
- ^zuul.d/(?!(job-templates)).*\.yaml
+ - openstack-tox-py39: # from openstack-python3-jobs template
+ timeout: 3600
+ irrelevant-files: *irrelevant-files
- openstack-tox-py310: # from openstack-python3-jobs template
timeout: 3600
irrelevant-files: *irrelevant-files
@@ -40,6 +43,9 @@
- openstack-tox-py38-arm64: # from openstack-python3-jobs-arm64 template
timeout: 4800
irrelevant-files: *irrelevant-files
+ - openstack-tox-py39-arm64: # from openstack-python3-jobs-arm64 template
+ timeout: 4800
+ irrelevant-files: *irrelevant-files
- openstack-tox-py310-arm64: # from openstack-python3-jobs-arm64 template
timeout: 4800
irrelevant-files: *irrelevant-files
@@ -48,6 +54,9 @@
- openstack-tox-py38: # from openstack-python3-jobs template
timeout: 3600
irrelevant-files: *irrelevant-files
+ - openstack-tox-py39: # from openstack-python3-jobs template
+ timeout: 3600
+ irrelevant-files: *irrelevant-files
- openstack-tox-py310: # from openstack-python3-jobs template
timeout: 3600
irrelevant-files: *irrelevant-files
@@ -56,51 +65,19 @@
name: neutron-experimental-jobs
experimental:
jobs:
- - neutron-functional
- - neutron-functional-with-uwsgi-fips
- neutron-functional-with-uwsgi-with-neutron-lib-master
- - neutron-functional-with-pyroute2-master
- - neutron-functional-with-sqlalchemy-master
- - neutron-fullstack
- - neutron-fullstack-with-uwsgi-fips
- neutron-fullstack-with-uwsgi-with-neutron-lib-master
- - neutron-fullstack-with-pyroute2-master
- - neutron-ovn-grenade-multinode
- - neutron-ovn-tempest-with-uwsgi-loki
- neutron-ovn-tempest-full-multinode-ovs-master
+ - neutron-ovn-grenade-multinode
- neutron-ovn-tempest-ovs-master
- - neutron-ovn-tempest-with-neutron-lib-master
- - neutron-ovs-tempest-with-neutron-lib-master
- - neutron-ovs-tempest-slow
- - neutron-ovn-tempest-slow
- - neutron-ovs-tempest-with-os-ken-master
- - neutron-ovn-tempest-postgres-full
- - neutron-ovn-tempest-mariadb-full
- neutron-ovn-tempest-ovs-release
- - neutron-ovn-tempest-ipv6-only-ovs-master
- - neutron-ovn-tempest-ovs-master-centos-9-stream
- - neutron-ovn-tempest-with-sqlalchemy-master
- - neutron-ovs-tempest-with-sqlalchemy-master
- - neutron-ovs-tempest-fips
- - neutron-ovn-tempest-ovs-release-fips
- - devstack-tobiko-neutron:
- voting: true
- - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
- - openstacksdk-functional-devstack-networking
- - neutron-linuxbridge-tempest-plugin-scenario-nftables
- - neutron-ovs-tempest-plugin-scenario-iptables_hybrid-nftables
- - devstack-enforce-scope
- - openstack-tox-py39-with-oslo-master:
- timeout: 3600
- irrelevant-files: *irrelevant-files
- - neutron-functional-with-oslo-master
- - neutron-ovs-tempest-with-oslo-master
- - neutron-ovn-tempest-ovs-release-with-oslo-master
+ - neutron-ovs-tempest-with-neutron-lib-master
+ - neutron-ovn-tempest-with-uwsgi-loki
- project-template:
name: neutron-periodic-jobs
periodic:
- jobs:
+ jobs: &neutron-periodic-jobs
# NOTE(ralonsoh): to be removed when "openstack-tox-py311" is defined
# and added to "openstack-python3-jobs" template.
- tox-py311:
@@ -137,6 +114,8 @@
- neutron-functional-with-oslo-master
- neutron-ovs-tempest-with-oslo-master
- neutron-ovn-tempest-ovs-release-with-oslo-master
+ experimental:
+ jobs: *neutron-periodic-jobs
- project-template:
name: neutron-skip-level-jobs
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index ec93017b5c..032064e1d1 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -63,8 +63,6 @@
- ^roles/.*functional.*$
- ^playbooks/.*functional.*$
- ^zuul.d/(?!(project)).*\.yaml
- - neutron-ovn-tempest-ovs-release-ubuntu-old:
- irrelevant-files: *ovn-irrelevant-files
gate:
jobs:
@@ -76,5 +74,3 @@
- neutron-ovn-tempest-ipv6-only-ovs-release
- tempest-integrated-networking:
irrelevant-files: *ovn-irrelevant-files
- - neutron-ovn-tempest-ovs-release-ubuntu-old:
- irrelevant-files: *ovn-irrelevant-files
diff --git a/zuul.d/tempest-multinode.yaml b/zuul.d/tempest-multinode.yaml
index e92b2ac6bc..d86914482a 100644
--- a/zuul.d/tempest-multinode.yaml
+++ b/zuul.d/tempest-multinode.yaml
@@ -528,6 +528,7 @@
# TODO(slaweq): propose job with ovs-release and move -master one to
# experimental queue
name: neutron-ovn-tempest-full-multinode-ovs-master
+ branches: ^master$
parent: neutron-ovn-multinode-base
run: playbooks/multinode-devstack-custom.yaml
vars:
@@ -569,10 +570,14 @@
devstack_localrc:
OVN_BUILD_FROM_SOURCE: True
OVN_BRANCH: main
- OVS_BRANCH: master
+ # TODO(ykarel) move to master once OVN main branch is fixed to
+ # work with https://github.com/openvswitch/ovs/commit/07cf5810de
+ OVS_BRANCH: e90a0727f17f6ad915a32735a8c0b282f2c8cd6f
group-vars:
subnode:
devstack_localrc:
OVN_BUILD_FROM_SOURCE: True
OVN_BRANCH: main
- OVS_BRANCH: master
+ # TODO(ykarel) move to master once OVN main branch is fixed to
+ # work with https://github.com/openvswitch/ovs/commit/07cf5810de
+ OVS_BRANCH: e90a0727f17f6ad915a32735a8c0b282f2c8cd6f
diff --git a/zuul.d/tempest-singlenode.yaml b/zuul.d/tempest-singlenode.yaml
index 2130e9133d..6c191e60b0 100644
--- a/zuul.d/tempest-singlenode.yaml
+++ b/zuul.d/tempest-singlenode.yaml
@@ -295,6 +295,7 @@
- job:
name: neutron-ovs-tempest-with-os-ken-master
+ branches: ^master$
parent: neutron-ovs-tempest-base
timeout: 10800
required-projects:
@@ -343,6 +344,7 @@
- job:
name: neutron-ovn-tempest-with-neutron-lib-master
+ branches: ^master$
parent: tempest-integrated-networking
timeout: 10800
required-projects:
@@ -373,12 +375,14 @@
- job:
name: neutron-ovs-tempest-with-neutron-lib-master
+ branches: ^master$
parent: neutron-ovs-tempest-base
required-projects:
- openstack/neutron-lib
- job:
name: neutron-ovs-tempest-with-oslo-master
+ branches: ^master$
parent: neutron-ovs-tempest-base
description: |
Job testing for devstack/tempest testing Neutron with OVS driver.
@@ -420,7 +424,9 @@
timeout: 10800
vars:
devstack_localrc:
- NEUTRON_DEPLOY_MOD_WSGI: true
+ # TODO(ykarel) can be enabled once
+ # https://bugs.launchpad.net/neutron/+bug/1912359 fixed
+ NEUTRON_DEPLOY_MOD_WSGI: false
MYSQL_REDUCE_MEMORY: true
devstack_plugins:
neutron: https://opendev.org/openstack/neutron.git
@@ -468,7 +474,7 @@
- job:
name: neutron-ovn-tempest-with-uwsgi-loki
- parent: neutron-tempest-with-uwsgi
+ parent: neutron-ovn-tempest-with-uwsgi
timeout: 10800
vars:
devstack_services:
@@ -615,23 +621,21 @@
parent: neutron-ovn-base
- job:
- name: neutron-ovn-tempest-ovs-release-ubuntu-old
- description: Job testing for devstack/tempest testing Neutron with ovn driver and previously supported (old) version of the Ubuntu operating system
- parent: neutron-ovn-base
- nodeset: openstack-single-node-focal
-
-- job:
name: neutron-ovn-tempest-ovs-master
+ branches: ^master$
description: Job testing for devstack/tempest testing Neutron with ovn driver and OVN master branch
parent: neutron-ovn-base
vars:
devstack_localrc:
OVN_BUILD_FROM_SOURCE: True
OVN_BRANCH: main
- OVS_BRANCH: master
+ # TODO(ykarel) move to master once OVN main branch is fixed to
+ # work with https://github.com/openvswitch/ovs/commit/07cf5810de
+ OVS_BRANCH: e90a0727f17f6ad915a32735a8c0b282f2c8cd6f
- job:
name: neutron-ovn-tempest-ovs-release-with-oslo-master
+ branches: ^master$
description: |
Job testing for devstack/tempest testing Neutron with OVN driver.
This job installs all oslo libraries from source.
@@ -766,15 +770,19 @@
- job:
name: neutron-ovn-tempest-ipv6-only-ovs-master
+ branches: ^master$
parent: neutron-ovn-tempest-ipv6-only-base
vars:
devstack_localrc:
OVN_BUILD_FROM_SOURCE: True
OVN_BRANCH: "main"
- OVS_BRANCH: master
+ # TODO(ykarel) move to master once OVN main branch is fixed to
+ # work with https://github.com/openvswitch/ovs/commit/07cf5810de
+ OVS_BRANCH: e90a0727f17f6ad915a32735a8c0b282f2c8cd6f
- job:
name: neutron-ovn-tempest-with-sqlalchemy-master
+ branches: ^master$
parent: tempest-integrated-networking
timeout: 10800
required-projects:
@@ -784,6 +792,8 @@
- openstack/neutron-lib
- name: github.com/sqlalchemy/sqlalchemy
override-checkout: main
+ - name: github.com/sqlalchemy/alembic
+ override-checkout: main
vars:
devstack_plugins:
neutron: https://opendev.org/openstack/neutron.git
@@ -808,9 +818,12 @@
- job:
name: neutron-ovs-tempest-with-sqlalchemy-master
+ branches: ^master$
parent: neutron-ovs-tempest-base
required-projects:
- name: github.com/sqlalchemy/sqlalchemy
override-checkout: main
- openstack/oslo.db
- openstack/neutron-lib
+ - name: github.com/sqlalchemy/alembic
+ override-checkout: main