summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.coveragerc1
-rw-r--r--.zuul.yaml41
-rw-r--r--doc/requirements.txt1
-rw-r--r--doc/source/cli/command-objects/block-storage-cleanup.rst8
-rw-r--r--doc/source/cli/command-objects/block-storage-log-level.rst8
-rw-r--r--doc/source/cli/command-objects/block-storage-manage.rst11
-rw-r--r--doc/source/cli/command-objects/network-trunk.rst16
-rw-r--r--doc/source/cli/command-objects/volume.rst8
-rw-r--r--doc/source/cli/data/cinder.csv16
-rw-r--r--doc/source/cli/data/glance.csv8
-rw-r--r--doc/source/cli/data/nova.csv14
-rw-r--r--doc/source/cli/plugin-commands/cyborg.rst4
-rw-r--r--doc/source/cli/plugin-commands/index.rst1
-rw-r--r--doc/source/contributor/humaninterfaceguide.rst4
-rw-r--r--doc/source/contributor/plugins.rst1
-rw-r--r--openstackclient/common/project_cleanup.py35
-rw-r--r--openstackclient/common/quota.py10
-rw-r--r--openstackclient/compute/v2/host.py60
-rw-r--r--openstackclient/compute/v2/server.py42
-rw-r--r--openstackclient/compute/v2/server_migration.py82
-rw-r--r--openstackclient/compute/v2/server_volume.py73
-rw-r--r--openstackclient/compute/v2/service.py10
-rw-r--r--openstackclient/image/v2/image.py76
-rw-r--r--openstackclient/network/v2/floating_ip_port_forwarding.py102
-rw-r--r--openstackclient/network/v2/network_qos_policy.py13
-rw-r--r--openstackclient/network/v2/network_qos_rule_type.py25
-rw-r--r--openstackclient/network/v2/network_trunk.py402
-rw-r--r--openstackclient/tests/functional/compute/v2/test_server.py3
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py29
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_trunk.py149
-rw-r--r--openstackclient/tests/unit/common/test_project_cleanup.py26
-rw-r--r--openstackclient/tests/unit/compute/v2/fakes.py329
-rw-r--r--openstackclient/tests/unit/compute/v2/test_host.py105
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server.py3
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server_migration.py187
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server_volume.py186
-rw-r--r--openstackclient/tests/unit/image/v2/test_image.py25
-rw-r--r--openstackclient/tests/unit/network/v2/fakes.py107
-rw-r--r--openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py231
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_qos_policy.py2
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py34
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_trunk.py851
-rw-r--r--openstackclient/tests/unit/volume/v1/test_volume.py51
-rw-r--r--openstackclient/tests/unit/volume/v2/test_consistency_group.py4
-rw-r--r--openstackclient/tests/unit/volume/v2/test_volume.py53
-rw-r--r--openstackclient/tests/unit/volume/v3/fakes.py89
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py178
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py233
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_manage.py411
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume.py179
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume_group.py180
-rw-r--r--openstackclient/volume/v1/volume.py37
-rw-r--r--openstackclient/volume/v2/backup_record.py14
-rw-r--r--openstackclient/volume/v2/consistency_group.py45
-rw-r--r--openstackclient/volume/v2/volume.py37
-rw-r--r--openstackclient/volume/v3/block_storage_cleanup.py146
-rw-r--r--openstackclient/volume/v3/block_storage_log_level.py147
-rw-r--r--openstackclient/volume/v3/block_storage_manage.py258
-rw-r--r--openstackclient/volume/v3/volume.py114
-rw-r--r--openstackclient/volume/v3/volume_group.py182
-rw-r--r--releasenotes/notes/add-auto-approve-cleanup-a2d225faa42dfdcb.yaml6
-rw-r--r--releasenotes/notes/add-block-storage-manage-commands-6ebf029bd7a67bb3.yaml7
-rw-r--r--releasenotes/notes/add-create-group-from-src-options-6fcb0c87f617ca91.yaml6
-rw-r--r--releasenotes/notes/add-port-ranges-in-port-forwarding-command-8c6ee05cf625578a.yaml4
-rw-r--r--releasenotes/notes/add-vol-service-get-set-log-commands-f9420e5061d994b5.yaml6
-rw-r--r--releasenotes/notes/add-volume-revert-command-1c8f695420acbe7e.yaml5
-rw-r--r--releasenotes/notes/add-volume-summary-command-b2175b48af3ccab1.yaml5
-rw-r--r--releasenotes/notes/add-workers-cleanup-command-720573c0f642efe9.yaml6
-rw-r--r--releasenotes/notes/consistency-group-create-opts-aliases-e1c2f1498e9b1d3d.yaml7
-rw-r--r--releasenotes/notes/deprecate-volume-group-create-positional-arguments-89f6b886c0f1f2b5.yaml10
-rw-r--r--releasenotes/notes/migrate-host-list-show-to-sdk-9b80cd9b4196ab01.yaml4
-rw-r--r--releasenotes/notes/migrate-server-volume-list-update-to-sdk-95b1d3063e46f813.yaml3
-rw-r--r--releasenotes/notes/network-qos-rule-type-filters-47f4911a02011501.yaml8
-rw-r--r--releasenotes/notes/rename-server-volume-update-to-server-volume-set-833f1730a9bf6169.yaml6
-rw-r--r--releasenotes/notes/switch-server-migration-show-to-sdk-4adb88a0f1f03f3b.yaml3
-rw-r--r--releasenotes/source/2023.1.rst6
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--setup.cfg17
-rw-r--r--tox.ini35
79 files changed, 5053 insertions, 789 deletions
diff --git a/.coveragerc b/.coveragerc
index 3685187b..8dc03265 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,6 +1,7 @@
[run]
branch = True
source = openstackclient
+omit = openstackclient/tests/*
[report]
ignore_errors = True
diff --git a/.zuul.yaml b/.zuul.yaml
index 95ab34ab..2c66c74a 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -38,11 +38,11 @@
zuul_work_dir: src/opendev.org/openstack/python-openstackclient
- job:
- name: osc-functional-devstack-base
+ name: osc-functional-devstack
parent: devstack-tox-functional
description: |
- Base job for devstack-based functional tests
- timeout: 9000
+ Run functional tests for OpenStackClient.
+ timeout: 7800
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
@@ -54,6 +54,12 @@
LIBS_FROM_GIT: python-openstackclient
# NOTE(dtroyer): Functional tests need a bit more volume headroom
VOLUME_BACKING_FILE_SIZE: 20G
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ devstack_plugins:
+ # NOTE(amotoki): Some neutron features are enabled by devstack plugin
+ neutron: https://opendev.org/openstack/neutron
devstack_services:
ceilometer-acentral: false
ceilometer-acompute: false
@@ -66,22 +72,6 @@
s-container: true
s-object: true
s-proxy: true
- osc_environment:
- PYTHONUNBUFFERED: 'true'
- OS_CLOUD: devstack-admin
- zuul_work_dir: src/opendev.org/openstack/python-openstackclient
-
-# The Neutron bits are here rather than in osc-functional-devstack-base to
-# simplify removing Neutron in the osc-functional-devstack-n-net job.
-- job:
- name: osc-functional-devstack
- parent: osc-functional-devstack-base
- timeout: 7800
- vars:
- devstack_plugins:
- # NOTE(amotoki): Some neutron features are enabled by devstack plugin
- neutron: https://opendev.org/openstack/neutron
- devstack_services:
# Disable OVN services
br-ex-tcpdump: false
br-int-flows: false
@@ -102,15 +92,20 @@
neutron-tag-ports-during-bulk-creation: true
neutron-conntrack-helper: true
neutron-ndp-proxy: true
- devstack_localrc:
- Q_AGENT: openvswitch
- Q_ML2_TENANT_NETWORK_TYPE: vxlan
- Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ q-trunk: true
+ osc_environment:
+ PYTHONUNBUFFERED: 'true'
+ OS_CLOUD: devstack-admin
tox_envlist: functional
+ zuul_work_dir: src/opendev.org/openstack/python-openstackclient
- job:
name: osc-functional-devstack-tips
parent: osc-functional-devstack
+ description: |
+ Run functional tests for OpenStackClient with master branch of important libs.
+
+ Takes advantage of the base tox job's install-siblings feature.
timeout: 7800
required-projects:
- openstack/cliff
diff --git a/doc/requirements.txt b/doc/requirements.txt
index 8b4202be..93e4f046 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -11,6 +11,7 @@ aodhclient>=0.9.0 # Apache-2.0
gnocchiclient>=3.3.1 # Apache-2.0
osc-placement>=1.7.0 # Apache-2.0
python-barbicanclient>=4.5.2 # Apache-2.0
+python-cyborgclient>=1.2.1 # Apache-2.0
python-designateclient>=2.7.0 # Apache-2.0
python-heatclient>=1.10.0 # Apache-2.0
python-ironicclient>=2.3.0 # Apache-2.0
diff --git a/doc/source/cli/command-objects/block-storage-cleanup.rst b/doc/source/cli/command-objects/block-storage-cleanup.rst
new file mode 100644
index 00000000..6a593c11
--- /dev/null
+++ b/doc/source/cli/command-objects/block-storage-cleanup.rst
@@ -0,0 +1,8 @@
+=============
+block storage
+=============
+
+Block Storage v3
+
+.. autoprogram-cliff:: openstack.volume.v3
+ :command: block storage cleanup
diff --git a/doc/source/cli/command-objects/block-storage-log-level.rst b/doc/source/cli/command-objects/block-storage-log-level.rst
new file mode 100644
index 00000000..17241a0e
--- /dev/null
+++ b/doc/source/cli/command-objects/block-storage-log-level.rst
@@ -0,0 +1,8 @@
+=======================
+Block Storage Log Level
+=======================
+
+Block Storage v3
+
+.. autoprogram-cliff:: openstack.volume.v3
+ :command: block storage log level *
diff --git a/doc/source/cli/command-objects/block-storage-manage.rst b/doc/source/cli/command-objects/block-storage-manage.rst
new file mode 100644
index 00000000..a1cff1ad
--- /dev/null
+++ b/doc/source/cli/command-objects/block-storage-manage.rst
@@ -0,0 +1,11 @@
+====================
+Block Storage Manage
+====================
+
+Block Storage v3
+
+.. autoprogram-cliff:: openstack.volume.v3
+ :command: block storage volume manageable list
+
+.. autoprogram-cliff:: openstack.volume.v3
+ :command: block storage snapshot manageable list
diff --git a/doc/source/cli/command-objects/network-trunk.rst b/doc/source/cli/command-objects/network-trunk.rst
new file mode 100644
index 00000000..98fd4b0c
--- /dev/null
+++ b/doc/source/cli/command-objects/network-trunk.rst
@@ -0,0 +1,16 @@
+=============
+network trunk
+=============
+
+A **network trunk** is a container to group logical ports from different
+networks and provide a single trunked vNIC for servers. It consists of
+one parent port which is a regular VIF and multiple subports which allow
+the server to connect to more networks.
+
+Network v2
+
+.. autoprogram-cliff:: openstack.network.v2
+ :command: network subport list
+
+.. autoprogram-cliff:: openstack.network.v2
+ :command: network trunk *
diff --git a/doc/source/cli/command-objects/volume.rst b/doc/source/cli/command-objects/volume.rst
index ac414110..9b491772 100644
--- a/doc/source/cli/command-objects/volume.rst
+++ b/doc/source/cli/command-objects/volume.rst
@@ -388,3 +388,11 @@ Unset volume properties
.. describe:: <volume>
Volume to modify (name or ID)
+
+Block Storage v3
+
+ .. autoprogram-cliff:: openstack.volume.v3
+ :command: volume summary
+
+ .. autoprogram-cliff:: openstack.volume.v3
+ :command: volume revert
diff --git a/doc/source/cli/data/cinder.csv b/doc/source/cli/data/cinder.csv
index 8b25d3fd..84ea409e 100644
--- a/doc/source/cli/data/cinder.csv
+++ b/doc/source/cli/data/cinder.csv
@@ -45,7 +45,7 @@ freeze-host,volume host set --disable,Freeze and disable the specified cinder-vo
get-capabilities,volume backend capability show,Show capabilities of a volume backend. Admin only.
get-pools,volume backend pool list,Show pool information for backends. Admin only.
group-create,volume group create,Creates a group. (Supported by API versions 3.13 - 3.latest)
-group-create-from-src,,Creates a group from a group snapshot or a source group. (Supported by API versions 3.14 - 3.latest)
+group-create-from-src,volume group create [--source-group|--group-snapshot],Creates a group from a group snapshot or a source group. (Supported by API versions 3.14 - 3.latest)
group-delete,volume group delete,Removes one or more groups. (Supported by API versions 3.13 - 3.latest)
group-disable-replication,volume group set --disable-replication,Disables replication for group. (Supported by API versions 3.38 - 3.latest)
group-enable-replication,volume group set --enable-replication,Enables replication for group. (Supported by API versions 3.38 - 3.latest)
@@ -71,7 +71,7 @@ image-metadata-show,volume show,Shows volume image metadata.
list,volume list,Lists all volumes.
list-filters,block storage resource filter list,List enabled filters. (Supported by API versions 3.33 - 3.latest)
manage,volume create --remote-source k=v,Manage an existing volume.
-manageable-list,,Lists all manageable volumes. (Supported by API versions 3.8 - 3.latest)
+manageable-list,block storage volume manageable list,Lists all manageable volumes. (Supported by API versions 3.8 - 3.latest)
message-delete,volume message delete,Removes one or more messages. (Supported by API versions 3.3 - 3.latest)
message-list,volume message list,Lists all messages. (Supported by API versions 3.3 - 3.latest)
message-show,volume message show,Shows message details. (Supported by API versions 3.3 - 3.latest)
@@ -100,19 +100,19 @@ readonly-mode-update,volume set --read-only-mode | --read-write-mode,Updates vol
rename,volume set --name,Renames a volume.
reset-state,volume set --state,Explicitly updates the volume state.
retype,volume type set --type,Changes the volume type for a volume.
-revert-to-snapshot,,Revert a volume to the specified snapshot. (Supported by API versions 3.40 - 3.latest)
+revert-to-snapshot,volume revert,Revert a volume to the specified snapshot. (Supported by API versions 3.40 - 3.latest)
service-disable,volume service set --disable,Disables the service.
service-enable,volume service set --enable,Enables the service.
-service-get-log,,(Supported by API versions 3.32 - 3.latest)
+service-get-log,block storage log level list,(Supported by API versions 3.32 - 3.latest)
service-list,volume service list,Lists all services. Filter by host and service binary.
-service-set-log,,(Supported by API versions 3.32 - 3.latest)
+service-set-log,block storage log level set,(Supported by API versions 3.32 - 3.latest)
set-bootable,volume set --bootable / --not-bootable,Update bootable status of a volume.
show,volume show,Shows volume details.
snapshot-create,snapshot create,Creates a snapshot.
snapshot-delete,snapshot delete,Remove one or more snapshots.
snapshot-list,snapshot list,Lists all snapshots.
snapshot-manage,volume snapshot create --remote-source <key=value>,Manage an existing snapshot.
-snapshot-manageable-list,,Lists all manageable snapshots. (Supported by API versions 3.8 - 3.latest)
+snapshot-manageable-list,block storage snapshot manageable list,Lists all manageable snapshots. (Supported by API versions 3.8 - 3.latest)
snapshot-metadata,snapshot set --property k=v / snapshot unset --property k,Sets or deletes snapshot metadata.
snapshot-metadata-show,snapshot show,Shows snapshot metadata.
snapshot-metadata-update-all,snapshot set --property k=v,Updates snapshot metadata.
@@ -120,7 +120,7 @@ snapshot-rename,snapshot set --name,Renames a snapshot.
snapshot-reset-state,snapshot set --state,Explicitly updates the snapshot state.
snapshot-show,snapshot show,Shows snapshot details.
snapshot-unmanage,volume snapshot delete --remote,Stop managing a snapshot.
-summary,,Get volumes summary. (Supported by API versions 3.12 - 3.latest)
+summary,volume summary,Get volumes summary. (Supported by API versions 3.12 - 3.latest)
thaw-host,volume host set --enable,Thaw and enable the specified cinder-volume host.
transfer-accept,volume transfer accept,Accepts a volume transfer.
transfer-create,volume transfer create,Creates a volume transfer.
@@ -140,7 +140,7 @@ type-update,volume type set,"Updates volume type name description and/or is_publ
unmanage,volume delete --remote,Stop managing a volume.
upload-to-image,image create --volume,Uploads volume to Image Service as an image.
version-list,versions show --service block-storage,List all API versions. (Supported by API versions 3.0 - 3.latest)
-work-cleanup,,Request cleanup of services with optional filtering. (Supported by API versions 3.24 - 3.latest)
+work-cleanup,block storage cleanup,Request cleanup of services with optional filtering. (Supported by API versions 3.24 - 3.latest)
bash-completion,complete,Prints arguments for bash_completion.
help,help,Shows help about this program or one of its subcommands.
list-extensions,extension list --volume,Lists all available os-api extensions.
diff --git a/doc/source/cli/data/glance.csv b/doc/source/cli/data/glance.csv
index adca8c0e..9d37509b 100644
--- a/doc/source/cli/data/glance.csv
+++ b/doc/source/cli/data/glance.csv
@@ -22,14 +22,14 @@ import-info,,Print import methods available from Glance.
location-add,,Add a location (and related metadata) to an image.
location-delete,,Remove locations (and related metadata) from an image.
location-update,,Update metadata of an image's location.
-md-namespace-create,,Create a new metadata definitions namespace.
-md-namespace-delete,,Delete specified metadata definitions namespace with its contents.
+md-namespace-create,image metadef namespace create,Create a new metadata definitions namespace.
+md-namespace-delete,image metadef namespace delete,Delete specified metadata definitions namespace with its contents.
md-namespace-import,,Import a metadata definitions namespace from file or standard input.
-md-namespace-list,,List metadata definitions namespaces.
+md-namespace-list,image metadef namespace list,List metadata definitions namespaces.
md-namespace-objects-delete,,Delete all metadata definitions objects inside a specific namespace.
md-namespace-properties-delete,,Delete all metadata definitions property inside a specific namespace.
md-namespace-resource-type-list,,List resource types associated to specific namespace.
-md-namespace-show,,Describe a specific metadata definitions namespace.
+md-namespace-show,image metadef namespace show,Describe a specific metadata definitions namespace.
md-namespace-tags-delete,,Delete all metadata definitions tags inside a specific namespace.
md-namespace-update,,Update an existing metadata definitions namespace.
md-object-create,,Create a new metadata definitions object inside a namespace.
diff --git a/doc/source/cli/data/nova.csv b/doc/source/cli/data/nova.csv
index e494ce28..ff691a51 100644
--- a/doc/source/cli/data/nova.csv
+++ b/doc/source/cli/data/nova.csv
@@ -4,7 +4,7 @@ agent-delete,compute agent delete,Delete existing agent build.
agent-list,compute agent list,List all builds.
agent-modify,compute agent set,Modify existing agent build.
aggregate-add-host,aggregate add host,Add the host to the specified aggregate.
-aggregate-cache-images,WONTFIX,Request images be cached. (Supported by API versions '2.81' - '2.latest') [hint: use '-- os-compute-api-version' flag to show help message for proper version]
+aggregate-cache-images,aggregate cache image,Request images be cached. (Supported by API versions '2.81' - '2.latest') [hint: use '-- os-compute-api-version' flag to show help message for proper version]
aggregate-create,aggregate create,Create a new aggregate with the specified details.
aggregate-delete,aggregate delete,Delete the aggregate.
aggregate-list,aggregate list,Print a list of all aggregates.
@@ -36,19 +36,19 @@ get-rdp-console,console url show --rdp,Get a rdp console to a server.
get-serial-console,console url show --serial,Get a serial console to a server.
get-spice-console,console url show --spice,Get a spice console to a server.
get-vnc-console,console url show --novnc,Get a vnc console to a server.
-host-evacuate,,Evacuate all instances from failed host.
-host-evacuate-live,,Live migrate all instances off the specified host to other available hosts.
-host-meta,,Set or Delete metadata on all instances of a host.
-host-servers-migrate,,Cold migrate all instances off the specified host to other available hosts.
+host-evacuate,WONTFIX,Evacuate all instances from failed host.
+host-evacuate-live,WONTFIX,Live migrate all instances off the specified host to other available hosts.
+host-meta,WONTFIX,Set or Delete metadata on all instances of a host.
+host-servers-migrate,WONTFIX,Cold migrate all instances off the specified host to other available hosts.
hypervisor-list,hypervisor list,List hypervisors. (Supported by API versions '2.0' - '2.latest')
-hypervisor-servers,,List servers belonging to specific hypervisors.
+hypervisor-servers,server list --host,List servers belonging to specific hypervisors.
hypervisor-show,hypervisor show,Display the details of the specified hypervisor.
hypervisor-stats,hypervisor stats show,Get hypervisor statistics over all compute nodes.
hypervisor-uptime,hypervisor show,Display the uptime of the specified hypervisor.
image-create,server image create,Create a new image by taking a snapshot of a running server.
instance-action,server event show,Show an action.
instance-action-list,server event list,List actions on a server.
-instance-usage-audit-log,,List/Get server usage audits.
+instance-usage-audit-log,WONTFIX,List/Get server usage audits.
interface-attach,server add port / server add floating ip / server add fixed ip,Attach a network interface to a server.
interface-detach,server remove port,Detach a network interface from a server.
interface-list,port list --server,List interfaces attached to a server.
diff --git a/doc/source/cli/plugin-commands/cyborg.rst b/doc/source/cli/plugin-commands/cyborg.rst
new file mode 100644
index 00000000..aedaa6b5
--- /dev/null
+++ b/doc/source/cli/plugin-commands/cyborg.rst
@@ -0,0 +1,4 @@
+cyborg
+------
+
+.. autoprogram-cliff:: openstack.accelerator.v2
diff --git a/doc/source/cli/plugin-commands/index.rst b/doc/source/cli/plugin-commands/index.rst
index 638dcbe5..e2e0dfa4 100644
--- a/doc/source/cli/plugin-commands/index.rst
+++ b/doc/source/cli/plugin-commands/index.rst
@@ -8,6 +8,7 @@ Plugin Commands
:maxdepth: 1
barbican
+ cyborg
designate
gnocchi
heat
diff --git a/doc/source/contributor/humaninterfaceguide.rst b/doc/source/contributor/humaninterfaceguide.rst
index 5987b86f..db6d7987 100644
--- a/doc/source/contributor/humaninterfaceguide.rst
+++ b/doc/source/contributor/humaninterfaceguide.rst
@@ -246,7 +246,7 @@ Objects and Actions
Commands consist of an object, described by one or more words, followed by an
action. ::
- <object> <action> [<name-or-id>]
+ <object> <action>
For example:
@@ -411,7 +411,7 @@ For example:
* ``server show <name-or-id>`` (compute servers have names or IDs and can be
referenced by both)
* ``consumer show <id>`` (identity consumers only have IDs, not names)
-* ``server show --toplogy <name-or-id>`` (additional information should be
+* ``server show --topology <name-or-id>`` (additional information should be
provided as options)
``list``
diff --git a/doc/source/contributor/plugins.rst b/doc/source/contributor/plugins.rst
index c2a08c5d..35d8d207 100644
--- a/doc/source/contributor/plugins.rst
+++ b/doc/source/contributor/plugins.rst
@@ -26,6 +26,7 @@ The following is a list of projects that are an OpenStackClient plugin.
- gnocchiclient
- osc-placement
- python-barbicanclient
+- python-cyborgclient
- python-designateclient
- python-heatclient
- python-ironicclient
diff --git a/openstackclient/common/project_cleanup.py b/openstackclient/common/project_cleanup.py
index f2536354..1193051a 100644
--- a/openstackclient/common/project_cleanup.py
+++ b/openstackclient/common/project_cleanup.py
@@ -28,16 +28,15 @@ from openstackclient.identity import common as identity_common
LOG = logging.getLogger(__name__)
-def ask_user_yesno(msg, default=True):
+def ask_user_yesno(msg):
"""Ask user Y/N question
:param str msg: question text
- :param bool default: default value
:return bool: User choice
"""
while True:
answer = getpass._raw_input(
- '{} [{}]: '.format(msg, 'y/N' if not default else 'Y/n'))
+ '{} [{}]: '.format(msg, 'y/n'))
if answer in ('y', 'Y', 'yes'):
return True
elif answer in ('n', 'N', 'no'):
@@ -49,10 +48,16 @@ class ProjectCleanup(command.Command):
def get_parser(self, prog_name):
parser = super(ProjectCleanup, self).get_parser(prog_name)
- parser.add_argument(
+ action_group = parser.add_mutually_exclusive_group()
+ action_group.add_argument(
'--dry-run',
action='store_true',
- help=_("List a project's resources")
+ help=_("List a project's resources but do not delete them")
+ )
+ action_group.add_argument(
+ '--auto-approve',
+ action='store_true',
+ help=_("Delete resources without asking for confirmation")
)
project_group = parser.add_mutually_exclusive_group(required=True)
project_group.add_argument(
@@ -68,12 +73,12 @@ class ProjectCleanup(command.Command):
parser.add_argument(
'--created-before',
metavar='<YYYY-MM-DDTHH24:MI:SS>',
- help=_('Drop resources created before the given time')
+ help=_('Only delete resources created before the given time')
)
parser.add_argument(
'--updated-before',
metavar='<YYYY-MM-DDTHH24:MI:SS>',
- help=_('Drop resources updated before the given time')
+ help=_('Only delete resources updated before the given time')
)
identity_common.add_project_domain_option_to_parser(parser)
return parser
@@ -128,13 +133,13 @@ class ProjectCleanup(command.Command):
if parsed_args.dry_run:
return
- confirm = ask_user_yesno(
- _("These resources will be deleted. Are you sure"),
- default=False)
+ if not parsed_args.auto_approve:
+ if not ask_user_yesno(
+ _("These resources will be deleted. Are you sure")):
+ return
- if confirm:
- self.log.warning(_('Deleting resources'))
+ self.log.warning(_('Deleting resources'))
- project_connect.project_cleanup(dry_run=False,
- status_queue=status_queue,
- filters=filters)
+ project_connect.project_cleanup(dry_run=False,
+ status_queue=status_queue,
+ filters=filters)
diff --git a/openstackclient/common/quota.py b/openstackclient/common/quota.py
index 246e44b3..670451e2 100644
--- a/openstackclient/common/quota.py
+++ b/openstackclient/common/quota.py
@@ -749,12 +749,10 @@ class SetQuota(common.NetDetectionMixin, command.Command):
class ShowQuota(command.Lister):
- _description = _(
- "Show quotas for project or class. "
- "Specify ``--os-compute-api-version 2.50`` or higher to see "
- "``server-groups`` and ``server-group-members`` output for a given "
- "quota class."
- )
+ _description = _("""Show quotas for project or class.
+
+Specify ``--os-compute-api-version 2.50`` or higher to see ``server-groups``
+and ``server-group-members`` output for a given quota class.""")
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
diff --git a/openstackclient/compute/v2/host.py b/openstackclient/compute/v2/host.py
index 07c92a8c..e6dd3a6f 100644
--- a/openstackclient/compute/v2/host.py
+++ b/openstackclient/compute/v2/host.py
@@ -22,10 +22,10 @@ from openstackclient.i18n import _
class ListHost(command.Lister):
- _description = _("List hosts")
+ _description = _("DEPRECATED: List hosts")
def get_parser(self, prog_name):
- parser = super(ListHost, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"--zone",
metavar="<zone>",
@@ -34,17 +34,33 @@ class ListHost(command.Lister):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
columns = (
"Host Name",
"Service",
"Zone"
)
- data = compute_client.api.host_list(parsed_args.zone)
- return (columns,
- (utils.get_dict_properties(
- s, columns,
- ) for s in data))
+
+ self.log.warning(
+ "API has been deprecated. "
+ "Please consider using 'hypervisor list' instead."
+ )
+
+ # doing this since openstacksdk has decided not to support this
+ # deprecated command
+ hosts = compute_client.get(
+ '/os-hosts', microversion='2.1'
+ ).json().get('hosts')
+
+ if parsed_args.zone is not None:
+ filtered_hosts = []
+ for host in hosts:
+ if host['zone'] == parsed_args.zone:
+ filtered_hosts.append(host)
+
+ hosts = filtered_hosts
+
+ return columns, (utils.get_dict_properties(s, columns) for s in hosts)
class SetHost(command.Command):
@@ -102,10 +118,10 @@ class SetHost(command.Command):
class ShowHost(command.Lister):
- _description = _("Display host details")
+ _description = _("DEPRECATED: Display host details")
def get_parser(self, prog_name):
- parser = super(ShowHost, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
@@ -114,7 +130,7 @@ class ShowHost(command.Lister):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
columns = (
"Host",
"Project",
@@ -123,9 +139,21 @@ class ShowHost(command.Lister):
"Disk GB"
)
- data = compute_client.api.host_show(parsed_args.host)
+ self.log.warning(
+ "API has been deprecated. "
+ "Please consider using 'hypervisor show' instead."
+ )
+
+ # doing this since openstacksdk has decided not to support this
+ # deprecated command
+ resources = compute_client.get(
+ '/os-hosts/' + parsed_args.host,
+ microversion='2.1'
+ ).json().get('host')
+
+ data = []
+ if resources is not None:
+ for resource in resources:
+ data.append(resource['resource'])
- return (columns,
- (utils.get_dict_properties(
- s, columns,
- ) for s in data))
+ return columns, (utils.get_dict_properties(s, columns) for s in data)
diff --git a/openstackclient/compute/v2/server.py b/openstackclient/compute/v2/server.py
index 156f855f..0c062a9e 100644
--- a/openstackclient/compute/v2/server.py
+++ b/openstackclient/compute/v2/server.py
@@ -608,10 +608,10 @@ class AddServerSecurityGroup(command.Command):
class AddServerVolume(command.ShowOne):
- _description = _(
- "Add volume to server. "
- "Specify ``--os-compute-api-version 2.20`` or higher to add a volume "
- "to a server with status ``SHELVED`` or ``SHELVED_OFFLOADED``.")
+ _description = _("""Add volume to server.
+
+Specify ``--os-compute-api-version 2.20`` or higher to add a volume to a server
+with status ``SHELVED`` or ``SHELVED_OFFLOADED``.""")
def get_parser(self, prog_name):
parser = super(AddServerVolume, self).get_parser(prog_name)
@@ -3766,11 +3766,10 @@ class RemoveServerSecurityGroup(command.Command):
class RemoveServerVolume(command.Command):
- _description = _(
- "Remove volume from server. "
- "Specify ``--os-compute-api-version 2.20`` or higher to remove a "
- "volume from a server with status ``SHELVED`` or "
- "``SHELVED_OFFLOADED``.")
+ _description = _("""Remove volume from server.
+
+Specify ``--os-compute-api-version 2.20`` or higher to remove a
+volume from a server with status ``SHELVED`` or ``SHELVED_OFFLOADED``.""")
def get_parser(self, prog_name):
parser = super(RemoveServerVolume, self).get_parser(prog_name)
@@ -3807,11 +3806,10 @@ class RemoveServerVolume(command.Command):
class RescueServer(command.Command):
- _description = _(
- "Put server in rescue mode. "
- "Specify ``--os-compute-api-version 2.87`` or higher to rescue a "
- "server booted from a volume."
- )
+ _description = _("""Put server in rescue mode.
+
+Specify ``--os-compute-api-version 2.87`` or higher to rescue a
+server booted from a volume.""")
def get_parser(self, prog_name):
parser = super(RescueServer, self).get_parser(prog_name)
@@ -3967,9 +3965,7 @@ Confirm (verify) success of resize operation and release the old server.""")
# TODO(stephenfin): Remove in OSC 7.0
class MigrateConfirm(ResizeConfirm):
- _description = _("""DEPRECATED: Confirm server migration.
-
-Use 'server migration confirm' instead.""")
+ _description = _("DEPRECATED: Use 'server migration confirm' instead.")
def take_action(self, parsed_args):
msg = _(
@@ -4015,9 +4011,7 @@ one.""")
# TODO(stephenfin): Remove in OSC 7.0
class MigrateRevert(ResizeRevert):
- _description = _("""Revert server migration.
-
-Use 'server migration revert' instead.""")
+ _description = _("DEPRECATED: Use 'server migration revert' instead.")
def take_action(self, parsed_args):
msg = _(
@@ -4362,10 +4356,10 @@ class ShelveServer(command.Command):
class ShowServer(command.ShowOne):
- _description = _(
- "Show server details. Specify ``--os-compute-api-version 2.47`` "
- "or higher to see the embedded flavor information for the server."
- )
+ _description = _("""Show server details.
+
+Specify ``--os-compute-api-version 2.47`` or higher to see the embedded flavor
+information for the server.""")
def get_parser(self, prog_name):
parser = super(ShowServer, self).get_parser(prog_name)
diff --git a/openstackclient/compute/v2/server_migration.py b/openstackclient/compute/v2/server_migration.py
index 016d15d7..91575c1e 100644
--- a/openstackclient/compute/v2/server_migration.py
+++ b/openstackclient/compute/v2/server_migration.py
@@ -14,7 +14,6 @@
import uuid
-from novaclient import api_versions
from openstack import utils as sdk_utils
from osc_lib.command import command
from osc_lib import exceptions
@@ -256,7 +255,7 @@ class ListMigration(command.Lister):
def _get_migration_by_uuid(compute_client, server_id, migration_uuid):
- for migration in compute_client.server_migrations.list(server_id):
+ for migration in compute_client.server_migrations(server_id):
if migration.uuid == migration_uuid:
return migration
break
@@ -290,9 +289,9 @@ class ShowMigration(command.ShowOne):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
- if compute_client.api_version < api_versions.APIVersion('2.24'):
+ if not sdk_utils.supports_microversion(compute_client, '2.24'):
msg = _(
'--os-compute-api-version 2.24 or greater is required to '
'support the server migration show command'
@@ -308,16 +307,16 @@ class ShowMigration(command.ShowOne):
)
raise exceptions.CommandError(msg)
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, '2.59'):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'retrieve server migrations by UUID'
)
raise exceptions.CommandError(msg)
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
# the nova API doesn't currently allow retrieval by UUID but it's a
@@ -328,11 +327,13 @@ class ShowMigration(command.ShowOne):
compute_client, server.id, parsed_args.migration,
)
else:
- server_migration = compute_client.server_migrations.get(
- server.id, parsed_args.migration,
+ server_migration = compute_client.get_server_migration(
+ server.id,
+ parsed_args.migration,
+ ignore_missing=False,
)
- columns = (
+ column_headers = (
'ID',
'Server UUID',
'Status',
@@ -351,14 +352,35 @@ class ShowMigration(command.ShowOne):
'Updated At',
)
- if compute_client.api_version >= api_versions.APIVersion('2.59'):
- columns += ('UUID',)
+ columns = (
+ 'id',
+ 'server_id',
+ 'status',
+ 'source_compute',
+ 'source_node',
+ 'dest_compute',
+ 'dest_host',
+ 'dest_node',
+ 'memory_total_bytes',
+ 'memory_processed_bytes',
+ 'memory_remaining_bytes',
+ 'disk_total_bytes',
+ 'disk_processed_bytes',
+ 'disk_remaining_bytes',
+ 'created_at',
+ 'updated_at',
+ )
- if compute_client.api_version >= api_versions.APIVersion('2.80'):
- columns += ('User ID', 'Project ID')
+ if sdk_utils.supports_microversion(compute_client, '2.59'):
+ column_headers += ('UUID',)
+ columns += ('uuid',)
+
+ if sdk_utils.supports_microversion(compute_client, '2.80'):
+ column_headers += ('User ID', 'Project ID')
+ columns += ('user_id', 'project_id')
data = utils.get_item_properties(server_migration, columns)
- return columns, data
+ return column_headers, data
class AbortMigration(command.Command):
@@ -382,9 +404,9 @@ class AbortMigration(command.Command):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
- if compute_client.api_version < api_versions.APIVersion('2.24'):
+ if not sdk_utils.supports_microversion(compute_client, '2.24'):
msg = _(
'--os-compute-api-version 2.24 or greater is required to '
'support the server migration abort command'
@@ -400,16 +422,16 @@ class AbortMigration(command.Command):
)
raise exceptions.CommandError(msg)
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, '2.59'):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'abort server migrations by UUID'
)
raise exceptions.CommandError(msg)
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
# the nova API doesn't currently allow retrieval by UUID but it's a
@@ -421,8 +443,10 @@ class AbortMigration(command.Command):
compute_client, server.id, parsed_args.migration,
).id
- compute_client.server_migrations.live_migration_abort(
- server.id, migration_id,
+ compute_client.abort_server_migration(
+ migration_id,
+ server.id,
+ ignore_missing=False,
)
@@ -447,9 +471,9 @@ class ForceCompleteMigration(command.Command):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
- if compute_client.api_version < api_versions.APIVersion('2.22'):
+ if not sdk_utils.supports_microversion(compute_client, '2.22'):
msg = _(
'--os-compute-api-version 2.22 or greater is required to '
'support the server migration force complete command'
@@ -465,16 +489,16 @@ class ForceCompleteMigration(command.Command):
)
raise exceptions.CommandError(msg)
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, '2.59'):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'abort server migrations by UUID'
)
raise exceptions.CommandError(msg)
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
# the nova API doesn't currently allow retrieval by UUID but it's a
@@ -486,6 +510,6 @@ class ForceCompleteMigration(command.Command):
compute_client, server.id, parsed_args.migration,
).id
- compute_client.server_migrations.live_migrate_force_complete(
- server.id, migration_id,
+ compute_client.force_complete_server_migration(
+ migration_id, server.id
)
diff --git a/openstackclient/compute/v2/server_volume.py b/openstackclient/compute/v2/server_volume.py
index d53cec93..b4322c0b 100644
--- a/openstackclient/compute/v2/server_volume.py
+++ b/openstackclient/compute/v2/server_volume.py
@@ -14,7 +14,7 @@
"""Compute v2 Server action implementations"""
-from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
@@ -34,27 +34,25 @@ class ListServerVolume(command.Lister):
return parser
def take_action(self, parsed_args):
+ compute_client = self.app.client_manager.sdk_connection.compute
- compute_client = self.app.client_manager.compute
-
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
-
- volumes = compute_client.volumes.get_server_volumes(server.id)
+ volumes = compute_client.volume_attachments(server)
columns = ()
column_headers = ()
- if compute_client.api_version < api_versions.APIVersion('2.89'):
+ if not sdk_utils.supports_microversion(compute_client, '2.89'):
columns += ('id',)
column_headers += ('ID',)
columns += (
'device',
- 'serverId',
- 'volumeId',
+ 'server_id',
+ 'volume_id',
)
column_headers += (
'Device',
@@ -62,40 +60,36 @@ class ListServerVolume(command.Lister):
'Volume ID',
)
- if compute_client.api_version >= api_versions.APIVersion('2.70'):
+ if sdk_utils.supports_microversion(compute_client, '2.70'):
columns += ('tag',)
column_headers += ('Tag',)
- if compute_client.api_version >= api_versions.APIVersion('2.79'):
+ if sdk_utils.supports_microversion(compute_client, '2.79'):
columns += ('delete_on_termination',)
column_headers += ('Delete On Termination?',)
- if compute_client.api_version >= api_versions.APIVersion('2.89'):
- columns += ('attachment_id', 'bdm_uuid')
+ if sdk_utils.supports_microversion(compute_client, '2.89'):
+ columns += ('attachment_id', 'bdm_id')
column_headers += ('Attachment ID', 'BlockDeviceMapping UUID')
return (
column_headers,
- (
- utils.get_item_properties(
- s, columns, mixed_case_fields=('serverId', 'volumeId')
- ) for s in volumes
- ),
+ (utils.get_item_properties(s, columns) for s in volumes),
)
-class UpdateServerVolume(command.Command):
+class SetServerVolume(command.Command):
"""Update a volume attachment on the server."""
def get_parser(self, prog_name):
- parser = super(UpdateServerVolume, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
'server',
help=_('Server to update volume for (name or ID)'),
)
parser.add_argument(
'volume',
- help=_('Volume (ID)'),
+ help=_('Volume to update attachment for (name or ID)'),
)
termination_group = parser.add_mutually_exclusive_group()
termination_group.add_argument(
@@ -120,31 +114,34 @@ class UpdateServerVolume(command.Command):
return parser
def take_action(self, parsed_args):
-
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
+ volume_client = self.app.client_manager.sdk_connection.volume
if parsed_args.delete_on_termination is not None:
- if compute_client.api_version < api_versions.APIVersion('2.85'):
+ if not sdk_utils.supports_microversion(compute_client, '2.85'):
msg = _(
'--os-compute-api-version 2.85 or greater is required to '
- 'support the --(no-)delete-on-termination option'
+ 'support the -delete-on-termination or '
+ '--preserve-on-termination option'
)
raise exceptions.CommandError(msg)
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
-
- # NOTE(stephenfin): This may look silly, and that's because it is.
- # This API was originally used only for the swapping volumes, which
- # is an internal operation that should only be done by
- # orchestration software rather than a human. We're not going to
- # expose that, but we are going to expose the ability to change the
- # delete on termination behavior.
- compute_client.volumes.update_server_volume(
- server.id,
- parsed_args.volume,
+ volume = volume_client.find_volume(
parsed_args.volume,
+ ignore_missing=False,
+ )
+
+ compute_client.update_volume_attachment(
+ server,
+ volume,
delete_on_termination=parsed_args.delete_on_termination,
)
+
+
+# Legacy alias
+class UpdateServerVolume(SetServerVolume):
+ """DEPRECATED: Use 'server volume set' instead."""
diff --git a/openstackclient/compute/v2/service.py b/openstackclient/compute/v2/service.py
index 8605156c..fad717c9 100644
--- a/openstackclient/compute/v2/service.py
+++ b/openstackclient/compute/v2/service.py
@@ -71,11 +71,11 @@ class DeleteService(command.Command):
class ListService(command.Lister):
- _description = _("List compute services. Using "
- "``--os-compute-api-version`` 2.53 or greater will "
- "return the ID as a UUID value which can be used to "
- "uniquely identify the service in a multi-cell "
- "deployment.")
+ _description = _("""List compute services.
+
+Using ``--os-compute-api-version`` 2.53 or greater will return the ID as a UUID
+value which can be used to uniquely identify the service in a multi-cell
+deployment.""")
def get_parser(self, prog_name):
parser = super(ListService, self).get_parser(prog_name)
diff --git a/openstackclient/image/v2/image.py b/openstackclient/image/v2/image.py
index 21b962f1..71dcc731 100644
--- a/openstackclient/image/v2/image.py
+++ b/openstackclient/image/v2/image.py
@@ -33,7 +33,7 @@ from osc_lib import utils
from openstackclient.common import progressbar
from openstackclient.i18n import _
-from openstackclient.identity import common
+from openstackclient.identity import common as identity_common
if os.name == "nt":
import msvcrt
@@ -177,21 +177,22 @@ class AddProjectToImage(command.ShowOne):
metavar="<project>",
help=_("Project to associate with image (ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
identity_client = self.app.client_manager.identity
- project_id = common.find_project(
+ project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
image = image_client.find_image(
- parsed_args.image, ignore_missing=False
+ parsed_args.image,
+ ignore_missing=False,
)
obj = image_client.add_member(
@@ -397,7 +398,7 @@ class CreateImage(command.ShowOne):
"Force the use of glance image import instead of direct upload"
),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
for deadopt in self.deadopts:
parser.add_argument(
"--%s" % deadopt,
@@ -450,7 +451,7 @@ class CreateImage(command.ShowOne):
kwargs['visibility'] = parsed_args.visibility
if parsed_args.project:
- kwargs['owner_id'] = common.find_project(
+ kwargs['owner_id'] = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
@@ -664,7 +665,8 @@ class DeleteImage(command.Command):
for image in parsed_args.images:
try:
image_obj = image_client.find_image(
- image, ignore_missing=False
+ image,
+ ignore_missing=False,
)
image_client.delete_image(image_obj.id)
except Exception as e:
@@ -765,7 +767,7 @@ class ListImage(command.Lister):
metavar='<project>',
help=_("Search by project (admin only) (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--tag',
metavar='<tag>',
@@ -835,7 +837,10 @@ class ListImage(command.Lister):
if parsed_args.limit:
kwargs['limit'] = parsed_args.limit
if parsed_args.marker:
- kwargs['marker'] = image_client.find_image(parsed_args.marker).id
+ kwargs['marker'] = image_client.find_image(
+ parsed_args.marker,
+ ignore_missing=False,
+ ).id
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.status:
@@ -846,7 +851,7 @@ class ListImage(command.Lister):
kwargs['tag'] = parsed_args.tag
project_id = None
if parsed_args.project:
- project_id = common.find_project(
+ project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
@@ -925,14 +930,17 @@ class ListImageProjects(command.Lister):
metavar="<image>",
help=_("Image (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
columns = ("Image ID", "Member ID", "Status")
- image_id = image_client.find_image(parsed_args.image).id
+ image_id = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ ).id
data = image_client.members(image=image_id)
@@ -963,19 +971,22 @@ class RemoveProjectImage(command.Command):
metavar="<project>",
help=_("Project to disassociate with image (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
identity_client = self.app.client_manager.identity
- project_id = common.find_project(
- identity_client, parsed_args.project, parsed_args.project_domain
+ project_id = identity_common.find_project(
+ identity_client,
+ parsed_args.project,
+ parsed_args.project_domain,
).id
image = image_client.find_image(
- parsed_args.image, ignore_missing=False
+ parsed_args.image,
+ ignore_missing=False,
)
image_client.remove_member(member=project_id, image=image.id)
@@ -1001,7 +1012,10 @@ class SaveImage(command.Command):
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
- image = image_client.find_image(parsed_args.image)
+ image = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ )
output_file = parsed_args.filename
if output_file is None:
@@ -1176,7 +1190,7 @@ class SetImage(command.Command):
metavar="<project>",
help=_("Set an alternate project on this image (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
for deadopt in self.deadopts:
parser.add_argument(
"--%s" % deadopt,
@@ -1192,7 +1206,10 @@ class SetImage(command.Command):
const="accepted",
dest="membership",
default=None,
- help=_("Accept the image membership"),
+ help=_(
+ "Accept the image membership for either the project indicated "
+ "by '--project', if provided, or the current user's project"
+ ),
)
membership_group.add_argument(
"--reject",
@@ -1200,7 +1217,10 @@ class SetImage(command.Command):
const="rejected",
dest="membership",
default=None,
- help=_("Reject the image membership"),
+ help=_(
+ "Reject the image membership for either the project indicated "
+ "by '--project', if provided, or the current user's project"
+ ),
)
membership_group.add_argument(
"--pending",
@@ -1248,7 +1268,7 @@ class SetImage(command.Command):
)
project_id = None
if parsed_args.project:
- project_id = common.find_project(
+ project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
@@ -1369,7 +1389,8 @@ class ShowImage(command.ShowOne):
image_client = self.app.client_manager.image
image = image_client.find_image(
- parsed_args.image, ignore_missing=False
+ parsed_args.image,
+ ignore_missing=False,
)
info = _format_image(image, parsed_args.human_readable)
@@ -1413,7 +1434,8 @@ class UnsetImage(command.Command):
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
image = image_client.find_image(
- parsed_args.image, ignore_missing=False
+ parsed_args.image,
+ ignore_missing=False,
)
kwargs = {}
@@ -1789,10 +1811,10 @@ class ImportImage(command.ShowOne):
image_client.import_image(
image,
method=parsed_args.import_method,
- # uri=parsed_args.uri,
- # remote_region=parsed_args.remote_region,
- # remote_image=parsed_args.remote_image,
- # remote_service_interface=parsed_args.remote_service_interface,
+ uri=parsed_args.uri,
+ remote_region=parsed_args.remote_region,
+ remote_image=parsed_args.remote_image,
+ remote_service_interface=parsed_args.remote_service_interface,
stores=parsed_args.stores,
all_stores=parsed_args.all_stores,
all_stores_must_succeed=not parsed_args.allow_failure,
diff --git a/openstackclient/network/v2/floating_ip_port_forwarding.py b/openstackclient/network/v2/floating_ip_port_forwarding.py
index bcd5add4..0156af8e 100644
--- a/openstackclient/network/v2/floating_ip_port_forwarding.py
+++ b/openstackclient/network/v2/floating_ip_port_forwarding.py
@@ -25,6 +25,61 @@ from openstackclient.network import common
LOG = logging.getLogger(__name__)
+def validate_ports_diff(ports):
+ if len(ports) == 0:
+ return 0
+
+ ports_diff = ports[-1] - ports[0]
+ if ports_diff < 0:
+ msg = _("The last number in port range must be"
+ " greater or equal to the first")
+ raise exceptions.CommandError(msg)
+ return ports_diff
+
+
+def validate_ports_match(internal_ports, external_ports):
+ internal_ports_diff = validate_ports_diff(internal_ports)
+ external_ports_diff = validate_ports_diff(external_ports)
+
+ if internal_ports_diff != 0 and internal_ports_diff != external_ports_diff:
+ msg = _("The relation between internal and external ports does not "
+ "match the pattern 1:N and N:N")
+ raise exceptions.CommandError(msg)
+
+
+def validate_and_assign_port_ranges(parsed_args, attrs):
+ internal_port_range = parsed_args.internal_protocol_port
+ external_port_range = parsed_args.external_protocol_port
+ external_ports = internal_ports = []
+ if external_port_range:
+ external_ports = list(map(int, str(external_port_range).split(':')))
+ if internal_port_range:
+ internal_ports = list(map(int, str(internal_port_range).split(':')))
+
+ validate_ports_match(internal_ports, external_ports)
+
+ for port in external_ports + internal_ports:
+ validate_port(port)
+
+ if internal_port_range:
+ if ':' in internal_port_range:
+ attrs['internal_port_range'] = internal_port_range
+ else:
+ attrs['internal_port'] = int(internal_port_range)
+
+ if external_port_range:
+ if ':' in external_port_range:
+ attrs['external_port_range'] = external_port_range
+ else:
+ attrs['external_port'] = int(external_port_range)
+
+
+def validate_port(port):
+ if port <= 0 or port > 65535:
+ msg = _("The port number range is <1-65535>")
+ raise exceptions.CommandError(msg)
+
+
def _get_columns(item):
column_map = {}
hidden_columns = ['location', 'tenant_id']
@@ -58,7 +113,6 @@ class CreateFloatingIPPortForwarding(command.ShowOne,
)
parser.add_argument(
'--internal-protocol-port',
- type=int,
metavar='<port-number>',
required=True,
help=_("The protocol port number "
@@ -67,7 +121,6 @@ class CreateFloatingIPPortForwarding(command.ShowOne,
)
parser.add_argument(
'--external-protocol-port',
- type=int,
metavar='<port-number>',
required=True,
help=_("The protocol port number of "
@@ -92,6 +145,7 @@ class CreateFloatingIPPortForwarding(command.ShowOne,
help=_("Floating IP that the port forwarding belongs to "
"(IP address or ID)")
)
+
return parser
def take_action(self, parsed_args):
@@ -102,19 +156,7 @@ class CreateFloatingIPPortForwarding(command.ShowOne,
ignore_missing=False,
)
- if parsed_args.internal_protocol_port is not None:
- if (parsed_args.internal_protocol_port <= 0 or
- parsed_args.internal_protocol_port > 65535):
- msg = _("The port number range is <1-65535>")
- raise exceptions.CommandError(msg)
- attrs['internal_port'] = parsed_args.internal_protocol_port
-
- if parsed_args.external_protocol_port is not None:
- if (parsed_args.external_protocol_port <= 0 or
- parsed_args.external_protocol_port > 65535):
- msg = _("The port number range is <1-65535>")
- raise exceptions.CommandError(msg)
- attrs['external_port'] = parsed_args.external_protocol_port
+ validate_and_assign_port_ranges(parsed_args, attrs)
if parsed_args.port:
port = client.find_port(parsed_args.port,
@@ -226,7 +268,9 @@ class ListFloatingIPPortForwarding(command.Lister):
'internal_port_id',
'internal_ip_address',
'internal_port',
+ 'internal_port_range',
'external_port',
+ 'external_port_range',
'protocol',
'description',
)
@@ -235,7 +279,9 @@ class ListFloatingIPPortForwarding(command.Lister):
'Internal Port ID',
'Internal IP Address',
'Internal Port',
+ 'Internal Port Range',
'External Port',
+ 'External Port Range',
'Protocol',
'Description',
)
@@ -246,8 +292,13 @@ class ListFloatingIPPortForwarding(command.Lister):
port = client.find_port(parsed_args.port,
ignore_missing=False)
query['internal_port_id'] = port.id
- if parsed_args.external_protocol_port is not None:
- query['external_port'] = parsed_args.external_protocol_port
+ external_port = parsed_args.external_protocol_port
+ if external_port:
+ if ':' in external_port:
+ query['external_port_range'] = external_port
+ else:
+ query['external_port'] = int(
+ parsed_args.external_protocol_port)
if parsed_args.protocol is not None:
query['protocol'] = parsed_args.protocol
@@ -297,14 +348,12 @@ class SetFloatingIPPortForwarding(common.NeutronCommandWithExtraArgs):
parser.add_argument(
'--internal-protocol-port',
metavar='<port-number>',
- type=int,
help=_("The TCP/UDP/other protocol port number of the "
"network port fixed IPv4 address associated to "
"the floating IP port forwarding")
)
parser.add_argument(
'--external-protocol-port',
- type=int,
metavar='<port-number>',
help=_("The TCP/UDP/other protocol port number of the "
"port forwarding's floating IP address")
@@ -339,19 +388,8 @@ class SetFloatingIPPortForwarding(common.NeutronCommandWithExtraArgs):
if parsed_args.internal_ip_address:
attrs['internal_ip_address'] = parsed_args.internal_ip_address
- if parsed_args.internal_protocol_port is not None:
- if (parsed_args.internal_protocol_port <= 0 or
- parsed_args.internal_protocol_port > 65535):
- msg = _("The port number range is <1-65535>")
- raise exceptions.CommandError(msg)
- attrs['internal_port'] = parsed_args.internal_protocol_port
-
- if parsed_args.external_protocol_port is not None:
- if (parsed_args.external_protocol_port <= 0 or
- parsed_args.external_protocol_port > 65535):
- msg = _("The port number range is <1-65535>")
- raise exceptions.CommandError(msg)
- attrs['external_port'] = parsed_args.external_protocol_port
+
+ validate_and_assign_port_ranges(parsed_args, attrs)
if parsed_args.protocol:
attrs['protocol'] = parsed_args.protocol
diff --git a/openstackclient/network/v2/network_qos_policy.py b/openstackclient/network/v2/network_qos_policy.py
index d77e5db9..29967d7d 100644
--- a/openstackclient/network/v2/network_qos_policy.py
+++ b/openstackclient/network/v2/network_qos_policy.py
@@ -15,6 +15,7 @@
import logging
+from cliff import columns as cliff_columns
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
@@ -26,6 +27,16 @@ from openstackclient.network import common
LOG = logging.getLogger(__name__)
+class RulesColumn(cliff_columns.FormattableColumn):
+ def human_readable(self):
+ return '\n'.join(str(v) for v in self._value)
+
+
+_formatters = {
+ 'rules': RulesColumn,
+}
+
+
def _get_columns(item):
column_map = {
'is_shared': 'shared',
@@ -287,5 +298,5 @@ class ShowNetworkQosPolicy(command.ShowOne):
obj = client.find_qos_policy(parsed_args.policy,
ignore_missing=False)
display_columns, columns = _get_columns(obj)
- data = utils.get_item_properties(obj, columns)
+ data = utils.get_item_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
diff --git a/openstackclient/network/v2/network_qos_rule_type.py b/openstackclient/network/v2/network_qos_rule_type.py
index 9af22876..3f4f6a19 100644
--- a/openstackclient/network/v2/network_qos_rule_type.py
+++ b/openstackclient/network/v2/network_qos_rule_type.py
@@ -32,6 +32,23 @@ def _get_columns(item):
class ListNetworkQosRuleType(command.Lister):
_description = _("List QoS rule types")
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ supported = parser.add_mutually_exclusive_group()
+ supported.add_argument(
+ '--all-supported',
+ action='store_true',
+ help=_("List all the QoS rule types supported by any loaded "
+ "mechanism drivers (the union of all sets of supported "
+ "rules)")
+ )
+ supported.add_argument(
+ '--all-rules',
+ action='store_true',
+ help=_("List all QoS rule types implemented in Neutron QoS driver")
+ )
+ return parser
+
def take_action(self, parsed_args):
client = self.app.client_manager.network
columns = (
@@ -40,7 +57,13 @@ class ListNetworkQosRuleType(command.Lister):
column_headers = (
'Type',
)
- data = client.qos_rule_types()
+
+ args = {}
+ if parsed_args.all_supported:
+ args['all_supported'] = True
+ elif parsed_args.all_rules:
+ args['all_rules'] = True
+ data = client.qos_rule_types(**args)
return (column_headers,
(utils.get_item_properties(
diff --git a/openstackclient/network/v2/network_trunk.py b/openstackclient/network/v2/network_trunk.py
new file mode 100644
index 00000000..c5f62901
--- /dev/null
+++ b/openstackclient/network/v2/network_trunk.py
@@ -0,0 +1,402 @@
+# Copyright 2016 ZTE Corporation.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Network trunk and subports action implementations"""
+import logging
+
+from cliff import columns as cliff_columns
+from osc_lib.cli import format_columns
+from osc_lib.cli import identity as identity_utils
+from osc_lib.cli import parseractions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils as osc_utils
+
+from openstackclient.i18n import _
+
+LOG = logging.getLogger(__name__)
+
+TRUNK = 'trunk'
+TRUNKS = 'trunks'
+SUB_PORTS = 'sub_ports'
+
+
+class AdminStateColumn(cliff_columns.FormattableColumn):
+ def human_readable(self):
+ return 'UP' if self._value else 'DOWN'
+
+
+class CreateNetworkTrunk(command.ShowOne):
+ """Create a network trunk for a given project"""
+
+ def get_parser(self, prog_name):
+ parser = super(CreateNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'name',
+ metavar='<name>',
+ help=_("Name of the trunk to create")
+ )
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("A description of the trunk")
+ )
+ parser.add_argument(
+ '--parent-port',
+ metavar='<parent-port>',
+ required=True,
+ help=_("Parent port belonging to this trunk (name or ID)")
+ )
+ parser.add_argument(
+ '--subport',
+ metavar='<port=,segmentation-type=,segmentation-id=>',
+ action=parseractions.MultiKeyValueAction, dest='add_subports',
+ optional_keys=['segmentation-id', 'segmentation-type'],
+ required_keys=['port'],
+ help=_("Subport to add. Subport is of form "
+ "\'port=<name or ID>,segmentation-type=<segmentation-type>,"
+ "segmentation-id=<segmentation-ID>\' (--subport) option "
+ "can be repeated")
+ )
+ admin_group = parser.add_mutually_exclusive_group()
+ admin_group.add_argument(
+ '--enable',
+ action='store_true',
+ default=True,
+ help=_("Enable trunk (default)")
+ )
+ admin_group.add_argument(
+ '--disable',
+ action='store_true',
+ help=_("Disable trunk")
+ )
+ identity_utils.add_project_owner_option_to_parser(parser)
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ attrs = _get_attrs_for_trunk(self.app.client_manager,
+ parsed_args)
+ obj = client.create_trunk(**attrs)
+ display_columns, columns = _get_columns(obj)
+ data = osc_utils.get_dict_properties(obj, columns,
+ formatters=_formatters)
+ return display_columns, data
+
+
+class DeleteNetworkTrunk(command.Command):
+ """Delete a given network trunk"""
+
+ def get_parser(self, prog_name):
+ parser = super(DeleteNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ nargs="+",
+ help=_("Trunk(s) to delete (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ result = 0
+ for trunk in parsed_args.trunk:
+ try:
+ trunk_id = client.find_trunk(trunk).id
+ client.delete_trunk(trunk_id)
+ except Exception as e:
+ result += 1
+ LOG.error(_("Failed to delete trunk with name "
+ "or ID '%(trunk)s': %(e)s"),
+ {'trunk': trunk, 'e': e})
+ if result > 0:
+ total = len(parsed_args.trunk)
+ msg = (_("%(result)s of %(total)s trunks failed "
+ "to delete.") % {'result': result, 'total': total})
+ raise exceptions.CommandError(msg)
+
+
+class ListNetworkTrunk(command.Lister):
+ """List all network trunks"""
+
+ def get_parser(self, prog_name):
+ parser = super(ListNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ '--long',
+ action='store_true',
+ default=False,
+ help=_("List additional fields in output")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ data = client.trunks()
+ headers = (
+ 'ID',
+ 'Name',
+ 'Parent Port',
+ 'Description'
+ )
+ columns = (
+ 'id',
+ 'name',
+ 'port_id',
+ 'description'
+ )
+ if parsed_args.long:
+ headers += (
+ 'Status',
+ 'State',
+ 'Created At',
+ 'Updated At',
+ )
+ columns += (
+ 'status',
+ 'admin_state_up',
+ 'created_at',
+ 'updated_at'
+ )
+ return (headers,
+ (osc_utils.get_item_properties(
+ s, columns,
+ formatters=_formatters,
+ ) for s in data))
+
+
+class SetNetworkTrunk(command.Command):
+ """Set network trunk properties"""
+
+ def get_parser(self, prog_name):
+ parser = super(SetNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ help=_("Trunk to modify (name or ID)")
+ )
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help=_("Set trunk name")
+ )
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("A description of the trunk")
+ )
+ parser.add_argument(
+ '--subport',
+ metavar='<port=,segmentation-type=,segmentation-id=>',
+ action=parseractions.MultiKeyValueAction, dest='set_subports',
+ optional_keys=['segmentation-id', 'segmentation-type'],
+ required_keys=['port'],
+ help=_("Subport to add. Subport is of form "
+ "\'port=<name or ID>,segmentation-type=<segmentation-type>"
+ ",segmentation-id=<segmentation-ID>\' (--subport) option "
+ "can be repeated")
+ )
+ admin_group = parser.add_mutually_exclusive_group()
+ admin_group.add_argument(
+ '--enable',
+ action='store_true',
+ help=_("Enable trunk")
+ )
+ admin_group.add_argument(
+ '--disable',
+ action='store_true',
+ help=_("Disable trunk")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ trunk_id = client.find_trunk(parsed_args.trunk)
+ attrs = _get_attrs_for_trunk(self.app.client_manager, parsed_args)
+ try:
+ client.update_trunk(trunk_id, **attrs)
+ except Exception as e:
+ msg = (_("Failed to set trunk '%(t)s': %(e)s")
+ % {'t': parsed_args.trunk, 'e': e})
+ raise exceptions.CommandError(msg)
+ if parsed_args.set_subports:
+ subport_attrs = _get_attrs_for_subports(self.app.client_manager,
+ parsed_args)
+ try:
+ client.add_trunk_subports(trunk_id, subport_attrs)
+ except Exception as e:
+ msg = (_("Failed to add subports to trunk '%(t)s': %(e)s")
+ % {'t': parsed_args.trunk, 'e': e})
+ raise exceptions.CommandError(msg)
+
+
+class ShowNetworkTrunk(command.ShowOne):
+ """Show information of a given network trunk"""
+ def get_parser(self, prog_name):
+ parser = super(ShowNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ help=_("Trunk to display (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ trunk_id = client.find_trunk(parsed_args.trunk).id
+ obj = client.get_trunk(trunk_id)
+ display_columns, columns = _get_columns(obj)
+ data = osc_utils.get_dict_properties(obj, columns,
+ formatters=_formatters)
+ return display_columns, data
+
+
+class ListNetworkSubport(command.Lister):
+ """List all subports for a given network trunk"""
+
+ def get_parser(self, prog_name):
+ parser = super(ListNetworkSubport, self).get_parser(prog_name)
+ parser.add_argument(
+ '--trunk',
+ required=True,
+ metavar="<trunk>",
+ help=_("List subports belonging to this trunk (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ trunk_id = client.find_trunk(parsed_args.trunk)
+ data = client.get_trunk_subports(trunk_id)
+ headers = ('Port', 'Segmentation Type', 'Segmentation ID')
+ columns = ('port_id', 'segmentation_type', 'segmentation_id')
+ return (headers,
+ (osc_utils.get_dict_properties(
+ s, columns,
+ ) for s in data[SUB_PORTS]))
+
+
+class UnsetNetworkTrunk(command.Command):
+ """Unset subports from a given network trunk"""
+
+ def get_parser(self, prog_name):
+ parser = super(UnsetNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ help=_("Unset subports from this trunk (name or ID)")
+ )
+ parser.add_argument(
+ '--subport',
+ metavar="<subport>",
+ required=True,
+ action='append', dest='unset_subports',
+ help=_("Subport to delete (name or ID of the port) "
+ "(--subport) option can be repeated")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ attrs = _get_attrs_for_subports(self.app.client_manager, parsed_args)
+ trunk_id = client.find_trunk(parsed_args.trunk)
+ client.delete_trunk_subports(trunk_id, attrs)
+
+
+_formatters = {
+ 'admin_state_up': AdminStateColumn,
+ 'sub_ports': format_columns.ListDictColumn,
+}
+
+
+def _get_columns(item):
+ column_map = {}
+ hidden_columns = ['location', 'tenant_id']
+ return osc_utils.get_osc_show_columns_for_sdk_resource(
+ item,
+ column_map,
+ hidden_columns
+ )
+
+
+def _get_attrs_for_trunk(client_manager, parsed_args):
+ attrs = {}
+ if parsed_args.name is not None:
+ attrs['name'] = str(parsed_args.name)
+ if parsed_args.description is not None:
+ attrs['description'] = str(parsed_args.description)
+ if parsed_args.enable:
+ attrs['admin_state_up'] = True
+ if parsed_args.disable:
+ attrs['admin_state_up'] = False
+ if 'parent_port' in parsed_args and parsed_args.parent_port is not None:
+ port_id = client_manager.network.find_port(
+ parsed_args.parent_port)['id']
+ attrs['port_id'] = port_id
+ if 'add_subports' in parsed_args and parsed_args.add_subports is not None:
+ attrs[SUB_PORTS] = _format_subports(client_manager,
+ parsed_args.add_subports)
+
+ # "trunk set" command doesn't support setting project.
+ if 'project' in parsed_args and parsed_args.project is not None:
+ identity_client = client_manager.identity
+ project_id = identity_utils.find_project(
+ identity_client,
+ parsed_args.project,
+ parsed_args.project_domain,
+ ).id
+ attrs['tenant_id'] = project_id
+
+ return attrs
+
+
+def _format_subports(client_manager, subports):
+ attrs = []
+ for subport in subports:
+ subport_attrs = {}
+ if subport.get('port'):
+ port_id = client_manager.network.find_port(subport['port'])['id']
+ subport_attrs['port_id'] = port_id
+ if subport.get('segmentation-id'):
+ try:
+ subport_attrs['segmentation_id'] = int(
+ subport['segmentation-id'])
+ except ValueError:
+ msg = (_("Segmentation-id '%s' is not an integer") %
+ subport['segmentation-id'])
+ raise exceptions.CommandError(msg)
+ if subport.get('segmentation-type'):
+ subport_attrs['segmentation_type'] = subport['segmentation-type']
+ attrs.append(subport_attrs)
+ return attrs
+
+
+def _get_attrs_for_subports(client_manager, parsed_args):
+ attrs = {}
+ if 'set_subports' in parsed_args and parsed_args.set_subports is not None:
+ attrs = _format_subports(client_manager,
+ parsed_args.set_subports)
+ if ('unset_subports' in parsed_args and
+ parsed_args.unset_subports is not None):
+ subports_list = []
+ for subport in parsed_args.unset_subports:
+ port_id = client_manager.network.find_port(subport)['id']
+ subports_list.append({'port_id': port_id})
+ attrs = subports_list
+ return attrs
+
+
+def _get_id(client, id_or_name, resource):
+ return client.find_resource(resource, str(id_or_name))['id']
diff --git a/openstackclient/tests/functional/compute/v2/test_server.py b/openstackclient/tests/functional/compute/v2/test_server.py
index 37183a79..05945a02 100644
--- a/openstackclient/tests/functional/compute/v2/test_server.py
+++ b/openstackclient/tests/functional/compute/v2/test_server.py
@@ -1375,10 +1375,8 @@ class ServerTests(common.ComputeTestCase):
parse_output=True,
)
- self.assertIsNotNone(cmd_output['ID'])
self.assertEqual(server_id, cmd_output['Server ID'])
self.assertEqual(volume_id, cmd_output['Volume ID'])
- volume_attachment_id = cmd_output['ID']
cmd_output = self.openstack(
'server volume list ' +
@@ -1386,7 +1384,6 @@ class ServerTests(common.ComputeTestCase):
parse_output=True,
)
- self.assertEqual(volume_attachment_id, cmd_output[0]['ID'])
self.assertEqual(server_id, cmd_output[0]['Server ID'])
self.assertEqual(volume_id, cmd_output[0]['Volume ID'])
diff --git a/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py b/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py
index 6b719cbe..4ead65cc 100644
--- a/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py
+++ b/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py
@@ -21,6 +21,13 @@ class NetworkQosRuleTypeTests(common.NetworkTests):
AVAILABLE_RULE_TYPES = ['dscp_marking',
'bandwidth_limit']
+ # NOTE(ralonsoh): this list was updated in Yoga (February 2022)
+ ALL_AVAILABLE_RULE_TYPES = ['dscp_marking',
+ 'bandwidth_limit',
+ 'minimum_bandwidth',
+ 'packet_rate_limit',
+ 'minimum_packet_rate',
+ ]
def setUp(self):
super(NetworkQosRuleTypeTests, self).setUp()
@@ -36,6 +43,28 @@ class NetworkQosRuleTypeTests(common.NetworkTests):
for rule_type in self.AVAILABLE_RULE_TYPES:
self.assertIn(rule_type, [x['Type'] for x in cmd_output])
+ def test_qos_rule_type_list_all_supported(self):
+ if not self.is_extension_enabled('qos-rule-type-filter'):
+ self.skipTest('No "qos-rule-type-filter" extension present')
+
+ cmd_output = self.openstack(
+ 'network qos rule type list --all-supported -f json',
+ parse_output=True
+ )
+ for rule_type in self.AVAILABLE_RULE_TYPES:
+ self.assertIn(rule_type, [x['Type'] for x in cmd_output])
+
+ def test_qos_rule_type_list_all_rules(self):
+ if not self.is_extension_enabled('qos-rule-type-filter'):
+ self.skipTest('No "qos-rule-type-filter" extension present')
+
+ cmd_output = self.openstack(
+ 'network qos rule type list --all-rules -f json',
+ parse_output=True
+ )
+ for rule_type in self.ALL_AVAILABLE_RULE_TYPES:
+ self.assertIn(rule_type, [x['Type'] for x in cmd_output])
+
def test_qos_rule_type_details(self):
for rule_type in self.AVAILABLE_RULE_TYPES:
cmd_output = self.openstack(
diff --git a/openstackclient/tests/functional/network/v2/test_network_trunk.py b/openstackclient/tests/functional/network/v2/test_network_trunk.py
new file mode 100644
index 00000000..bbb77a0d
--- /dev/null
+++ b/openstackclient/tests/functional/network/v2/test_network_trunk.py
@@ -0,0 +1,149 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import uuid
+
+from openstackclient.tests.functional.network.v2 import common
+
+
+class NetworkTrunkTests(common.NetworkTests):
+ """Functional tests for Network Trunks"""
+
+ def setUp(self):
+ super().setUp()
+ # Nothing in this class works with Nova Network
+ if not self.haz_network:
+ self.skipTest("No Network service present")
+
+ network_name = uuid.uuid4().hex
+ subnet_name = uuid.uuid4().hex
+ self.parent_port_name = uuid.uuid4().hex
+ self.sub_port_name = uuid.uuid4().hex
+
+ self.openstack('network create %s' % network_name)
+ self.addCleanup(self.openstack, 'network delete %s' % network_name)
+
+ self.openstack(
+ 'subnet create %s '
+ '--network %s --subnet-range 10.0.0.0/24' % (
+ subnet_name, network_name))
+ self.openstack('port create %s --network %s' %
+ (self.parent_port_name, network_name))
+ self.addCleanup(self.openstack, 'port delete %s' %
+ self.parent_port_name)
+ json_out = self.openstack('port create %s --network %s -f json' %
+ (self.sub_port_name, network_name))
+ self.sub_port_id = json.loads(json_out)['id']
+ self.addCleanup(self.openstack, 'port delete %s' % self.sub_port_name)
+
+ def test_network_trunk_create_delete(self):
+ trunk_name = uuid.uuid4().hex
+ self.openstack('network trunk create %s --parent-port %s -f json ' %
+ (trunk_name, self.parent_port_name))
+ raw_output = self.openstack(
+ 'network trunk delete ' +
+ trunk_name
+ )
+ self.assertEqual('', raw_output)
+
+ def test_network_trunk_list(self):
+ trunk_name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
+ 'network trunk create %s --parent-port %s -f json ' %
+ (trunk_name, self.parent_port_name)))
+ self.addCleanup(self.openstack,
+ 'network trunk delete ' + trunk_name)
+ self.assertEqual(trunk_name, json_output['name'])
+
+ json_output = json.loads(self.openstack(
+ 'network trunk list -f json'
+ ))
+ self.assertIn(trunk_name, [tr['Name'] for tr in json_output])
+
+ def test_network_trunk_set_unset(self):
+ trunk_name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
+ 'network trunk create %s --parent-port %s -f json ' %
+ (trunk_name, self.parent_port_name)))
+ self.addCleanup(self.openstack,
+ 'network trunk delete ' + trunk_name)
+ self.assertEqual(trunk_name, json_output['name'])
+
+ self.openstack(
+ 'network trunk set '
+ '--enable ' +
+ trunk_name
+ )
+
+ json_output = json.loads(self.openstack(
+ 'network trunk show -f json ' +
+ trunk_name
+ ))
+ self.assertTrue(json_output['is_admin_state_up'])
+
+ # Add subport to trunk
+ self.openstack(
+ 'network trunk set ' +
+ '--subport port=%s,segmentation-type=vlan,segmentation-id=42 ' %
+ (self.sub_port_name) +
+ trunk_name
+ )
+ json_output = json.loads(self.openstack(
+ 'network trunk show -f json ' +
+ trunk_name
+ ))
+ self.assertEqual(
+ [{
+ 'port_id': self.sub_port_id,
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'
+ }],
+ json_output['sub_ports'])
+
+ # Remove subport from trunk
+ self.openstack(
+ 'network trunk unset ' +
+ trunk_name +
+ ' --subport ' +
+ self.sub_port_name
+ )
+ json_output = json.loads(self.openstack(
+ 'network trunk show -f json ' +
+ trunk_name
+ ))
+ self.assertEqual(
+ [],
+ json_output['sub_ports'])
+
+ def test_network_trunk_list_subports(self):
+ trunk_name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
+ 'network trunk create %s --parent-port %s '
+ '--subport port=%s,segmentation-type=vlan,segmentation-id=42 '
+ '-f json ' %
+ (trunk_name, self.parent_port_name, self.sub_port_name)))
+ self.addCleanup(self.openstack,
+ 'network trunk delete ' + trunk_name)
+ self.assertEqual(trunk_name, json_output['name'])
+
+ json_output = json.loads(self.openstack(
+ 'network subport list --trunk %s -f json' % trunk_name))
+ self.assertEqual(
+ [{
+ 'Port': self.sub_port_id,
+ 'Segmentation ID': 42,
+ 'Segmentation Type': 'vlan'
+ }],
+ json_output)
diff --git a/openstackclient/tests/unit/common/test_project_cleanup.py b/openstackclient/tests/unit/common/test_project_cleanup.py
index d235aeb0..50c434b9 100644
--- a/openstackclient/tests/unit/common/test_project_cleanup.py
+++ b/openstackclient/tests/unit/common/test_project_cleanup.py
@@ -85,6 +85,32 @@ class TestProjectCleanup(TestProjectCleanupBase):
self.assertIsNone(result)
+ def test_project_cleanup_with_auto_approve(self):
+ arglist = [
+ '--project', self.project.id,
+ '--auto-approve',
+ ]
+ verifylist = [
+ ('dry_run', False),
+ ('auth_project', False),
+ ('project', self.project.id),
+ ('auto_approve', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = None
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.sdk_connect_as_project_mock.assert_called_with(
+ self.project)
+ calls = [
+ mock.call(dry_run=True, status_queue=mock.ANY, filters={}),
+ mock.call(dry_run=False, status_queue=mock.ANY, filters={})
+ ]
+ self.project_cleanup_mock.assert_has_calls(calls)
+
+ self.assertIsNone(result)
+
def test_project_cleanup_with_project(self):
arglist = [
'--project', self.project.id,
diff --git a/openstackclient/tests/unit/compute/v2/fakes.py b/openstackclient/tests/unit/compute/v2/fakes.py
index b2702128..f7f07509 100644
--- a/openstackclient/tests/unit/compute/v2/fakes.py
+++ b/openstackclient/tests/unit/compute/v2/fakes.py
@@ -21,9 +21,11 @@ import uuid
from novaclient import api_versions
from openstack.compute.v2 import flavor as _flavor
from openstack.compute.v2 import hypervisor as _hypervisor
+from openstack.compute.v2 import migration as _migration
from openstack.compute.v2 import server as _server
from openstack.compute.v2 import server_group as _server_group
from openstack.compute.v2 import server_interface as _server_interface
+from openstack.compute.v2 import server_migration as _server_migration
from openstack.compute.v2 import service
from openstack.compute.v2 import volume_attachment
@@ -1433,242 +1435,155 @@ class FakeRateLimit(object):
self.next_available = next_available
-class FakeMigration(object):
- """Fake one or more migrations."""
+def create_one_migration(attrs=None):
+ """Create a fake migration.
- @staticmethod
- def create_one_migration(attrs=None, methods=None):
- """Create a fake migration.
-
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :return:
- A FakeResource object, with id, type, and so on
- """
- attrs = attrs or {}
- methods = methods or {}
+ :param dict attrs: A dictionary with all attributes
+ :return: A fake openstack.compute.v2.migration.Migration object
+ """
+ attrs = attrs or {}
- # Set default attributes.
- migration_info = {
- "dest_host": "10.0.2.15",
- "status": "migrating",
- "migration_type": "migration",
- "updated_at": "2017-01-31T08:03:25.000000",
- "created_at": "2017-01-31T08:03:21.000000",
- "dest_compute": "compute-" + uuid.uuid4().hex,
- "id": random.randint(1, 999),
- "source_node": "node-" + uuid.uuid4().hex,
- "instance_uuid": uuid.uuid4().hex,
- "dest_node": "node-" + uuid.uuid4().hex,
- "source_compute": "compute-" + uuid.uuid4().hex,
- "uuid": uuid.uuid4().hex,
- "old_instance_type_id": uuid.uuid4().hex,
- "new_instance_type_id": uuid.uuid4().hex,
- "project_id": uuid.uuid4().hex,
- "user_id": uuid.uuid4().hex
- }
+ # Set default attributes.
+ migration_info = {
+ "created_at": "2017-01-31T08:03:21.000000",
+ "dest_compute": "compute-" + uuid.uuid4().hex,
+ "dest_host": "10.0.2.15",
+ "dest_node": "node-" + uuid.uuid4().hex,
+ "id": random.randint(1, 999),
+ "migration_type": "migration",
+ "new_flavor_id": uuid.uuid4().hex,
+ "old_flavor_id": uuid.uuid4().hex,
+ "project_id": uuid.uuid4().hex,
+ "server_id": uuid.uuid4().hex,
+ "source_compute": "compute-" + uuid.uuid4().hex,
+ "source_node": "node-" + uuid.uuid4().hex,
+ "status": "migrating",
+ "updated_at": "2017-01-31T08:03:25.000000",
+ "user_id": uuid.uuid4().hex,
+ "uuid": uuid.uuid4().hex,
+ }
- # Overwrite default attributes.
- migration_info.update(attrs)
+ # Overwrite default attributes.
+ migration_info.update(attrs)
- migration = fakes.FakeResource(info=copy.deepcopy(migration_info),
- methods=methods,
- loaded=True)
- return migration
+ migration = _migration.Migration(**migration_info)
+ return migration
- @staticmethod
- def create_migrations(attrs=None, methods=None, count=2):
- """Create multiple fake migrations.
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :param int count:
- The number of migrations to fake
- :return:
- A list of FakeResource objects faking the migrations
- """
- migrations = []
- for i in range(0, count):
- migrations.append(
- FakeMigration.create_one_migration(
- attrs, methods))
+def create_migrations(attrs=None, count=2):
+ """Create multiple fake migrations.
- return migrations
+ :param dict attrs: A dictionary with all attributes
+ :param int count: The number of migrations to fake
+ :return: A list of fake openstack.compute.v2.migration.Migration objects
+ """
+ migrations = []
+ for i in range(0, count):
+ migrations.append(create_one_migration(attrs))
+ return migrations
-class FakeServerMigration(object):
- """Fake one or more server migrations."""
- @staticmethod
- def create_one_server_migration(attrs=None, methods=None):
- """Create a fake server migration.
-
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :return:
- A FakeResource object, with id, type, and so on
- """
- attrs = attrs or {}
- methods = methods or {}
+def create_one_server_migration(attrs=None):
+ """Create a fake server migration.
- # Set default attributes.
+ :param dict attrs: A dictionary with all attributes
+ :return A fake openstack.compute.v2.server_migration.ServerMigration object
+ """
+ attrs = attrs or {}
- migration_info = {
- "created_at": "2016-01-29T13:42:02.000000",
- "dest_compute": "compute2",
- "dest_host": "1.2.3.4",
- "dest_node": "node2",
- "id": random.randint(1, 999),
- "server_uuid": uuid.uuid4().hex,
- "source_compute": "compute1",
- "source_node": "node1",
- "status": "running",
- "memory_total_bytes": random.randint(1, 99999),
- "memory_processed_bytes": random.randint(1, 99999),
- "memory_remaining_bytes": random.randint(1, 99999),
- "disk_total_bytes": random.randint(1, 99999),
- "disk_processed_bytes": random.randint(1, 99999),
- "disk_remaining_bytes": random.randint(1, 99999),
- "updated_at": "2016-01-29T13:42:02.000000",
- # added in 2.59
- "uuid": uuid.uuid4().hex,
- # added in 2.80
- "user_id": uuid.uuid4().hex,
- "project_id": uuid.uuid4().hex,
- }
+ # Set default attributes.
- # Overwrite default attributes.
- migration_info.update(attrs)
+ migration_info = {
+ "created_at": "2016-01-29T13:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": random.randint(1, 999),
+ "server_uuid": uuid.uuid4().hex,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "memory_total_bytes": random.randint(1, 99999),
+ "memory_processed_bytes": random.randint(1, 99999),
+ "memory_remaining_bytes": random.randint(1, 99999),
+ "disk_total_bytes": random.randint(1, 99999),
+ "disk_processed_bytes": random.randint(1, 99999),
+ "disk_remaining_bytes": random.randint(1, 99999),
+ "updated_at": "2016-01-29T13:42:02.000000",
+ # added in 2.59
+ "uuid": uuid.uuid4().hex,
+ # added in 2.80
+ "user_id": uuid.uuid4().hex,
+ "project_id": uuid.uuid4().hex,
+ }
- migration = fakes.FakeResource(
- info=copy.deepcopy(migration_info),
- methods=methods,
- loaded=True)
- return migration
+ # Overwrite default attributes.
+ migration_info.update(attrs)
+ migration = _server_migration.ServerMigration(**migration_info)
+ return migration
-class FakeVolumeAttachment(object):
- """Fake one or more volume attachments (BDMs)."""
- @staticmethod
- def create_one_volume_attachment(attrs=None, methods=None):
- """Create a fake volume attachment.
+def create_server_migrations(attrs=None, methods=None, count=2):
+ """Create multiple server migrations.
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :return:
- A FakeResource object, with id, device, and so on
- """
- attrs = attrs or {}
- methods = methods or {}
-
- # Set default attributes.
- volume_attachment_info = {
- "id": uuid.uuid4().hex,
- "device": "/dev/sdb",
- "serverId": uuid.uuid4().hex,
- "volumeId": uuid.uuid4().hex,
- # introduced in API microversion 2.70
- "tag": "foo",
- # introduced in API microversion 2.79
- "delete_on_termination": True,
- # introduced in API microversion 2.89
- "attachment_id": uuid.uuid4().hex,
- "bdm_uuid": uuid.uuid4().hex
- }
-
- # Overwrite default attributes.
- volume_attachment_info.update(attrs)
-
- volume_attachment = fakes.FakeResource(
- info=copy.deepcopy(volume_attachment_info),
- methods=methods,
- loaded=True)
- return volume_attachment
+ :param dict attrs: A dictionary with all attributes
+ :param int count: The number of server migrations to fake
+ :return A list of fake
+ openstack.compute.v2.server_migration.ServerMigration objects
+ """
+ migrations = []
+ for i in range(0, count):
+ migrations.append(
+ create_one_server_migration(attrs, methods))
- @staticmethod
- def create_volume_attachments(attrs=None, methods=None, count=2):
- """Create multiple fake volume attachments (BDMs).
+ return migrations
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :param int count:
- The number of volume attachments to fake
- :return:
- A list of FakeResource objects faking the volume attachments.
- """
- volume_attachments = []
- for i in range(0, count):
- volume_attachments.append(
- FakeVolumeAttachment.create_one_volume_attachment(
- attrs, methods))
- return volume_attachments
+def create_one_volume_attachment(attrs=None):
+ """Create a fake volume attachment.
- @staticmethod
- def create_one_sdk_volume_attachment(attrs=None, methods=None):
- """Create a fake sdk VolumeAttachment.
+ :param dict attrs: A dictionary with all attributes
+ :return: A fake openstack.compute.v2.volume_attachment.VolumeAttachment
+ object
+ """
+ attrs = attrs or {}
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :return:
- A fake VolumeAttachment object, with id, device, and so on
- """
- attrs = attrs or {}
- methods = methods or {}
+ # Set default attributes.
+ volume_attachment_info = {
+ "id": uuid.uuid4().hex,
+ "device": "/dev/sdb",
+ "server_id": uuid.uuid4().hex,
+ "volume_id": uuid.uuid4().hex,
+ # introduced in API microversion 2.70
+ "tag": "foo",
+ # introduced in API microversion 2.79
+ "delete_on_termination": True,
+ # introduced in API microversion 2.89
+ "attachment_id": uuid.uuid4().hex,
+ "bdm_id": uuid.uuid4().hex,
+ }
- # Set default attributes.
- volume_attachment_info = {
- "id": uuid.uuid4().hex,
- "device": "/dev/sdb",
- "server_id": uuid.uuid4().hex,
- "volume_id": uuid.uuid4().hex,
- # introduced in API microversion 2.70
- "tag": "foo",
- # introduced in API microversion 2.79
- "delete_on_termination": True,
- # introduced in API microversion 2.89
- "attachment_id": uuid.uuid4().hex,
- "bdm_uuid": uuid.uuid4().hex
- }
+ # Overwrite default attributes.
+ volume_attachment_info.update(attrs)
- # Overwrite default attributes.
- volume_attachment_info.update(attrs)
+ return volume_attachment.VolumeAttachment(**volume_attachment_info)
- return volume_attachment.VolumeAttachment(**volume_attachment_info)
- @staticmethod
- def create_sdk_volume_attachments(attrs=None, methods=None, count=2):
- """Create multiple fake VolumeAttachment objects (BDMs).
+def create_volume_attachments(attrs=None, count=2):
+ """Create multiple fake volume attachments.
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :param int count:
- The number of volume attachments to fake
- :return:
- A list of VolumeAttachment objects faking the volume attachments.
- """
- volume_attachments = []
- for i in range(0, count):
- volume_attachments.append(
- FakeVolumeAttachment.create_one_sdk_volume_attachment(
- attrs, methods))
+ :param dict attrs: A dictionary with all attributes
+ :param int count: The number of volume attachments to fake
+ :return: A list of fake
+ openstack.compute.v2.volume_attachment.VolumeAttachment objects
+ """
+ volume_attachments = []
+ for i in range(0, count):
+ volume_attachments.append(create_one_volume_attachment(attrs))
- return volume_attachments
+ return volume_attachments
def create_one_hypervisor(attrs=None):
diff --git a/openstackclient/tests/unit/compute/v2/test_host.py b/openstackclient/tests/unit/compute/v2/test_host.py
index 4e1b5ad1..ec91b37a 100644
--- a/openstackclient/tests/unit/compute/v2/test_host.py
+++ b/openstackclient/tests/unit/compute/v2/test_host.py
@@ -17,6 +17,7 @@ from unittest import mock
from openstackclient.compute.v2 import host
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
+from openstackclient.tests.unit import fakes
from openstackclient.tests.unit import utils as tests_utils
@@ -26,7 +27,10 @@ class TestHost(compute_fakes.TestComputev2):
super(TestHost, self).setUp()
# Get a shortcut to the compute client
- self.compute = self.app.client_manager.compute
+ self.app.client_manager.sdk_connection = mock.Mock()
+ self.app.client_manager.sdk_connection.compute = mock.Mock()
+ self.sdk_client = self.app.client_manager.sdk_connection.compute
+ self.sdk_client.get = mock.Mock()
@mock.patch(
@@ -34,27 +38,29 @@ class TestHost(compute_fakes.TestComputev2):
)
class TestHostList(TestHost):
- host = compute_fakes.FakeHost.create_one_host()
-
- columns = (
- 'Host Name',
- 'Service',
- 'Zone',
- )
-
- data = [(
- host['host_name'],
- host['service'],
- host['zone'],
- )]
+ _host = compute_fakes.FakeHost.create_one_host()
def setUp(self):
super(TestHostList, self).setUp()
+ self.sdk_client.get.return_value = fakes.FakeResponse(
+ data={'hosts': [self._host]}
+ )
+
+ self.columns = (
+ 'Host Name', 'Service', 'Zone'
+ )
+
+ self.data = [(
+ self._host['host_name'],
+ self._host['service'],
+ self._host['zone'],
+ )]
+
self.cmd = host.ListHost(self.app, None)
def test_host_list_no_option(self, h_mock):
- h_mock.return_value = [self.host]
+ h_mock.return_value = [self._host]
arglist = []
verifylist = []
@@ -62,24 +68,24 @@ class TestHostList(TestHost):
columns, data = self.cmd.take_action(parsed_args)
- h_mock.assert_called_with(None)
+ self.sdk_client.get.assert_called_with('/os-hosts', microversion='2.1')
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
def test_host_list_with_option(self, h_mock):
- h_mock.return_value = [self.host]
+ h_mock.return_value = [self._host]
arglist = [
- '--zone', self.host['zone'],
+ '--zone', self._host['zone'],
]
verifylist = [
- ('zone', self.host['zone']),
+ ('zone', self._host['zone']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- h_mock.assert_called_with(self.host['zone'])
+ self.sdk_client.get.assert_called_with('/os-hosts', microversion='2.1')
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
@@ -141,31 +147,43 @@ class TestHostSet(TestHost):
)
class TestHostShow(TestHost):
- host = compute_fakes.FakeHost.create_one_host()
-
- columns = (
- 'Host',
- 'Project',
- 'CPU',
- 'Memory MB',
- 'Disk GB',
- )
-
- data = [(
- host['host'],
- host['project'],
- host['cpu'],
- host['memory_mb'],
- host['disk_gb'],
- )]
+ _host = compute_fakes.FakeHost.create_one_host()
def setUp(self):
super(TestHostShow, self).setUp()
+ output_data = {"resource": {
+ "host": self._host['host'],
+ "project": self._host['project'],
+ "cpu": self._host['cpu'],
+ "memory_mb": self._host['memory_mb'],
+ "disk_gb": self._host['disk_gb']
+ }}
+
+ self.sdk_client.get.return_value = fakes.FakeResponse(
+ data={'host': [output_data]}
+ )
+
+ self.columns = (
+ 'Host',
+ 'Project',
+ 'CPU',
+ 'Memory MB',
+ 'Disk GB',
+ )
+
+ self.data = [(
+ self._host['host'],
+ self._host['project'],
+ self._host['cpu'],
+ self._host['memory_mb'],
+ self._host['disk_gb'],
+ )]
+
self.cmd = host.ShowHost(self.app, None)
def test_host_show_no_option(self, h_mock):
- h_mock.host_show.return_value = [self.host]
+ h_mock.host_show.return_value = [self._host]
arglist = []
verifylist = []
@@ -174,18 +192,21 @@ class TestHostShow(TestHost):
self.cmd, arglist, verifylist)
def test_host_show_with_option(self, h_mock):
- h_mock.return_value = [self.host]
+ h_mock.return_value = [self._host]
arglist = [
- self.host['host_name'],
+ self._host['host_name'],
]
verifylist = [
- ('host', self.host['host_name']),
+ ('host', self._host['host_name']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- h_mock.assert_called_with(self.host['host_name'])
+ self.sdk_client.get.assert_called_with(
+ '/os-hosts/' + self._host['host_name'],
+ microversion='2.1'
+ )
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
diff --git a/openstackclient/tests/unit/compute/v2/test_server.py b/openstackclient/tests/unit/compute/v2/test_server.py
index 1d1b7d77..247f1587 100644
--- a/openstackclient/tests/unit/compute/v2/test_server.py
+++ b/openstackclient/tests/unit/compute/v2/test_server.py
@@ -912,8 +912,7 @@ class TestServerVolume(TestServer):
'volume_id': self.volumes[0].id,
}
self.volume_attachment = \
- compute_fakes.FakeVolumeAttachment.\
- create_one_sdk_volume_attachment(attrs=attrs)
+ compute_fakes.create_one_volume_attachment(attrs=attrs)
self.sdk_client.create_volume_attachment.return_value = \
self.volume_attachment
diff --git a/openstackclient/tests/unit/compute/v2/test_server_migration.py b/openstackclient/tests/unit/compute/v2/test_server_migration.py
index 93c1865a..afe868d9 100644
--- a/openstackclient/tests/unit/compute/v2/test_server_migration.py
+++ b/openstackclient/tests/unit/compute/v2/test_server_migration.py
@@ -40,6 +40,18 @@ class TestServerMigration(compute_fakes.TestComputev2):
self.app.client_manager.sdk_connection.compute = mock.Mock()
self.sdk_client = self.app.client_manager.sdk_connection.compute
+ patcher = mock.patch.object(
+ sdk_utils, 'supports_microversion', return_value=True)
+ self.addCleanup(patcher.stop)
+ self.supports_microversion_mock = patcher.start()
+
+ def _set_mock_microversion(self, mock_v):
+ """Set a specific microversion for the mock supports_microversion()."""
+ self.supports_microversion_mock.reset_mock(return_value=True)
+ self.supports_microversion_mock.side_effect = (
+ lambda _, v:
+ api_versions.APIVersion(v) <= api_versions.APIVersion(mock_v))
+
class TestListMigration(TestServerMigration):
"""Test fetch all migrations."""
@@ -51,19 +63,20 @@ class TestListMigration(TestServerMigration):
]
MIGRATION_FIELDS = [
- 'source_node', 'dest_node', 'source_compute', 'dest_compute',
- 'dest_host', 'status', 'server_id', 'old_flavor_id',
+ 'source_node', 'dest_node', 'source_compute',
+ 'dest_compute', 'dest_host', 'status', 'server_id', 'old_flavor_id',
'new_flavor_id', 'created_at', 'updated_at'
]
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
+ self._set_mock_microversion('2.1')
+
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
self.sdk_client.find_server.return_value = self.server
- self.migrations = compute_fakes.FakeMigration.create_migrations(
- count=3)
+ self.migrations = compute_fakes.create_migrations(count=3)
self.sdk_client.migrations.return_value = self.migrations
self.data = (common_utils.get_item_properties(
@@ -72,20 +85,6 @@ class TestListMigration(TestServerMigration):
# Get the command object to test
self.cmd = server_migration.ListMigration(self.app, None)
- patcher = mock.patch.object(
- sdk_utils, 'supports_microversion', return_value=True)
- self.addCleanup(patcher.stop)
- self.supports_microversion_mock = patcher.start()
- self._set_mock_microversion(
- self.app.client_manager.compute.api_version.get_string())
-
- def _set_mock_microversion(self, mock_v):
- """Set a specific microversion for the mock supports_microversion()."""
- self.supports_microversion_mock.reset_mock(return_value=True)
- self.supports_microversion_mock.side_effect = (
- lambda _, v:
- api_versions.APIVersion(v) <= api_versions.APIVersion(mock_v))
-
def test_server_migration_list_no_options(self):
arglist = []
verifylist = []
@@ -601,12 +600,15 @@ class TestServerMigrationShow(TestServerMigration):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
- self.servers_mock.get.return_value = self.server
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
+ self.sdk_client.find_server.return_value = self.server
- self.server_migration = compute_fakes.FakeServerMigration\
- .create_one_server_migration()
- self.server_migrations_mock.get.return_value = self.server_migration
+ self.server_migration = compute_fakes.create_one_server_migration()
+ self.sdk_client.get_server_migration.return_value =\
+ self.server_migration
+ self.sdk_client.server_migrations.return_value = iter(
+ [self.server_migration]
+ )
self.columns = (
'ID',
@@ -629,7 +631,7 @@ class TestServerMigrationShow(TestServerMigration):
self.data = (
self.server_migration.id,
- self.server_migration.server_uuid,
+ self.server_migration.server_id,
self.server_migration.status,
self.server_migration.source_compute,
self.server_migration.source_node,
@@ -662,19 +664,18 @@ class TestServerMigrationShow(TestServerMigration):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.get.assert_called_with(
- self.server.id, '2',)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.get_server_migration.assert_called_with(
+ self.server.id, '2', ignore_missing=False)
def test_server_migration_show(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.24')
+ self._set_mock_microversion('2.24')
self._test_server_migration_show()
def test_server_migration_show_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
self.columns += ('UUID',)
self.data += (self.server_migration.uuid,)
@@ -682,8 +683,7 @@ class TestServerMigrationShow(TestServerMigration):
self._test_server_migration_show()
def test_server_migration_show_v280(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.80')
+ self._set_mock_microversion('2.80')
self.columns += ('UUID', 'User ID', 'Project ID')
self.data += (
@@ -695,8 +695,7 @@ class TestServerMigrationShow(TestServerMigration):
self._test_server_migration_show()
def test_server_migration_show_pre_v224(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.23')
+ self._set_mock_microversion('2.23')
arglist = [
self.server.id,
@@ -714,9 +713,11 @@ class TestServerMigrationShow(TestServerMigration):
str(ex))
def test_server_migration_show_by_uuid(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
- self.server_migrations_mock.list.return_value = [self.server_migration]
+ self._set_mock_microversion('2.59')
+
+ self.sdk_client.server_migrations.return_value = iter(
+ [self.server_migration]
+ )
self.columns += ('UUID',)
self.data += (self.server_migration.uuid,)
@@ -733,14 +734,14 @@ class TestServerMigrationShow(TestServerMigration):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.list.assert_called_with(self.server.id)
- self.server_migrations_mock.get.assert_not_called()
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.server_migrations.assert_called_with(self.server.id)
+ self.sdk_client.get_server_migration.assert_not_called()
def test_server_migration_show_by_uuid_no_matches(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
- self.server_migrations_mock.list.return_value = []
+ self._set_mock_microversion('2.59')
+ self.sdk_client.server_migrations.return_value = iter([])
arglist = [
self.server.id,
@@ -758,8 +759,7 @@ class TestServerMigrationShow(TestServerMigration):
str(ex))
def test_server_migration_show_by_uuid_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
self.server.id,
@@ -777,8 +777,7 @@ class TestServerMigrationShow(TestServerMigration):
str(ex))
def test_server_migration_show_invalid_id(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.24')
+ self._set_mock_microversion('2.24')
arglist = [
self.server.id,
@@ -801,17 +800,16 @@ class TestServerMigrationAbort(TestServerMigration):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
# Return value for utils.find_resource for server.
- self.servers_mock.get.return_value = self.server
+ self.sdk_client.find_server.return_value = self.server
# Get the command object to test
self.cmd = server_migration.AbortMigration(self.app, None)
def test_migration_abort(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.24')
+ self._set_mock_microversion('2.24')
arglist = [
self.server.id,
@@ -822,14 +820,14 @@ class TestServerMigrationAbort(TestServerMigration):
result = self.cmd.take_action(parsed_args)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.live_migration_abort.assert_called_with(
- self.server.id, '2',)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.abort_server_migration.assert_called_with(
+ '2', self.server.id, ignore_missing=False)
self.assertIsNone(result)
def test_migration_abort_pre_v224(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.23')
+ self._set_mock_microversion('2.23')
arglist = [
self.server.id,
@@ -847,12 +845,12 @@ class TestServerMigrationAbort(TestServerMigration):
str(ex))
def test_server_migration_abort_by_uuid(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
- self.server_migration = compute_fakes.FakeServerMigration\
- .create_one_server_migration()
- self.server_migrations_mock.list.return_value = [self.server_migration]
+ self.server_migration = compute_fakes.create_one_server_migration()
+ self.sdk_client.server_migrations.return_value = iter(
+ [self.server_migration]
+ )
arglist = [
self.server.id,
@@ -863,17 +861,19 @@ class TestServerMigrationAbort(TestServerMigration):
result = self.cmd.take_action(parsed_args)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.list.assert_called_with(self.server.id)
- self.server_migrations_mock.live_migration_abort.assert_called_with(
- self.server.id, self.server_migration.id)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.server_migrations.assert_called_with(self.server.id)
+ self.sdk_client.abort_server_migration.assert_called_with(
+ self.server_migration.id, self.server.id, ignore_missing=False)
self.assertIsNone(result)
def test_server_migration_abort_by_uuid_no_matches(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
- self.server_migrations_mock.list.return_value = []
+ self.sdk_client.server_migrations.return_value = iter(
+ []
+ )
arglist = [
self.server.id,
@@ -891,8 +891,7 @@ class TestServerMigrationAbort(TestServerMigration):
str(ex))
def test_server_migration_abort_by_uuid_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
self.server.id,
@@ -915,17 +914,16 @@ class TestServerMigrationForceComplete(TestServerMigration):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
# Return value for utils.find_resource for server.
- self.servers_mock.get.return_value = self.server
+ self.sdk_client.find_server.return_value = self.server
# Get the command object to test
self.cmd = server_migration.ForceCompleteMigration(self.app, None)
def test_migration_force_complete(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.22')
+ self._set_mock_microversion('2.22')
arglist = [
self.server.id,
@@ -936,14 +934,14 @@ class TestServerMigrationForceComplete(TestServerMigration):
result = self.cmd.take_action(parsed_args)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.live_migrate_force_complete\
- .assert_called_with(self.server.id, '2',)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.force_complete_server_migration\
+ .assert_called_with('2', self.server.id)
self.assertIsNone(result)
def test_migration_force_complete_pre_v222(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.21')
+ self._set_mock_microversion('2.21')
arglist = [
self.server.id,
@@ -961,12 +959,12 @@ class TestServerMigrationForceComplete(TestServerMigration):
str(ex))
def test_server_migration_force_complete_by_uuid(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
- self.server_migration = compute_fakes.FakeServerMigration\
- .create_one_server_migration()
- self.server_migrations_mock.list.return_value = [self.server_migration]
+ self.server_migration = compute_fakes.create_one_server_migration()
+ self.sdk_client.server_migrations.return_value = iter(
+ [self.server_migration]
+ )
arglist = [
self.server.id,
@@ -977,17 +975,17 @@ class TestServerMigrationForceComplete(TestServerMigration):
result = self.cmd.take_action(parsed_args)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.list.assert_called_with(self.server.id)
- self.server_migrations_mock.live_migrate_force_complete\
- .assert_called_with(self.server.id, self.server_migration.id)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.server_migrations.assert_called_with(self.server.id)
+ self.sdk_client.force_complete_server_migration.\
+ assert_called_with(self.server_migration.id, self.server.id)
self.assertIsNone(result)
def test_server_migration_force_complete_by_uuid_no_matches(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
- self.server_migrations_mock.list.return_value = []
+ self.sdk_client.server_migrations.return_value = iter([])
arglist = [
self.server.id,
@@ -1005,8 +1003,7 @@ class TestServerMigrationForceComplete(TestServerMigration):
str(ex))
def test_server_migration_force_complete_by_uuid_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
self.server.id,
diff --git a/openstackclient/tests/unit/compute/v2/test_server_volume.py b/openstackclient/tests/unit/compute/v2/test_server_volume.py
index 02d378f8..f86bc7dd 100644
--- a/openstackclient/tests/unit/compute/v2/test_server_volume.py
+++ b/openstackclient/tests/unit/compute/v2/test_server_volume.py
@@ -11,11 +11,15 @@
# under the License.
#
+from unittest import mock
+
from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib import exceptions
from openstackclient.compute.v2 import server_volume
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
class TestServerVolume(compute_fakes.TestComputev2):
@@ -23,13 +27,11 @@ class TestServerVolume(compute_fakes.TestComputev2):
def setUp(self):
super().setUp()
- # Get a shortcut to the compute client ServerManager Mock
- self.servers_mock = self.app.client_manager.compute.servers
- self.servers_mock.reset_mock()
-
- # Get a shortcut to the compute client VolumeManager mock
- self.servers_volumes_mock = self.app.client_manager.compute.volumes
- self.servers_volumes_mock.reset_mock()
+ self.app.client_manager.sdk_connection = mock.Mock()
+ self.app.client_manager.sdk_connection.compute = mock.Mock()
+ self.app.client_manager.sdk_connection.volume = mock.Mock()
+ self.compute_client = self.app.client_manager.sdk_connection.compute
+ self.volume_client = self.app.client_manager.sdk_connection.volume
class TestServerVolumeList(TestServerVolume):
@@ -37,20 +39,21 @@ class TestServerVolumeList(TestServerVolume):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
- self.volume_attachments = (
- compute_fakes.FakeVolumeAttachment.create_volume_attachments())
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
+ self.volume_attachments = compute_fakes.create_volume_attachments()
- self.servers_mock.get.return_value = self.server
- self.servers_volumes_mock.get_server_volumes.return_value = (
+ self.compute_client.find_server.return_value = self.server
+ self.compute_client.volume_attachments.return_value = (
self.volume_attachments)
# Get the command object to test
self.cmd = server_volume.ListServerVolume(self.app, None)
- def test_server_volume_list(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_list(self, sm_mock):
self.app.client_manager.compute.api_version = \
api_versions.APIVersion('2.1')
+ sm_mock.side_effect = [False, False, False, False]
arglist = [
self.server.id,
@@ -68,24 +71,25 @@ class TestServerVolumeList(TestServerVolume):
(
self.volume_attachments[0].id,
self.volume_attachments[0].device,
- self.volume_attachments[0].serverId,
- self.volume_attachments[0].volumeId,
+ self.volume_attachments[0].server_id,
+ self.volume_attachments[0].volume_id,
),
(
self.volume_attachments[1].id,
self.volume_attachments[1].device,
- self.volume_attachments[1].serverId,
- self.volume_attachments[1].volumeId,
+ self.volume_attachments[1].server_id,
+ self.volume_attachments[1].volume_id,
),
),
tuple(data),
)
- self.servers_volumes_mock.get_server_volumes.assert_called_once_with(
- self.server.id)
+ self.compute_client.volume_attachments.assert_called_once_with(
+ self.server,
+ )
- def test_server_volume_list_with_tags(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.70')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_list_with_tags(self, sm_mock):
+ sm_mock.side_effect = [False, True, False, False]
arglist = [
self.server.id,
@@ -105,27 +109,27 @@ class TestServerVolumeList(TestServerVolume):
(
self.volume_attachments[0].id,
self.volume_attachments[0].device,
- self.volume_attachments[0].serverId,
- self.volume_attachments[0].volumeId,
+ self.volume_attachments[0].server_id,
+ self.volume_attachments[0].volume_id,
self.volume_attachments[0].tag,
),
(
self.volume_attachments[1].id,
self.volume_attachments[1].device,
- self.volume_attachments[1].serverId,
- self.volume_attachments[1].volumeId,
+ self.volume_attachments[1].server_id,
+ self.volume_attachments[1].volume_id,
self.volume_attachments[1].tag,
),
),
tuple(data),
)
- self.servers_volumes_mock.get_server_volumes.assert_called_once_with(
- self.server.id)
-
- def test_server_volume_list_with_delete_on_attachment(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.79')
+ self.compute_client.volume_attachments.assert_called_once_with(
+ self.server,
+ )
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_list_with_delete_on_attachment(self, sm_mock):
+ sm_mock.side_effect = [False, True, True, False]
arglist = [
self.server.id,
]
@@ -148,29 +152,30 @@ class TestServerVolumeList(TestServerVolume):
(
self.volume_attachments[0].id,
self.volume_attachments[0].device,
- self.volume_attachments[0].serverId,
- self.volume_attachments[0].volumeId,
+ self.volume_attachments[0].server_id,
+ self.volume_attachments[0].volume_id,
self.volume_attachments[0].tag,
self.volume_attachments[0].delete_on_termination,
),
(
self.volume_attachments[1].id,
self.volume_attachments[1].device,
- self.volume_attachments[1].serverId,
- self.volume_attachments[1].volumeId,
+ self.volume_attachments[1].server_id,
+ self.volume_attachments[1].volume_id,
self.volume_attachments[1].tag,
self.volume_attachments[1].delete_on_termination,
),
),
tuple(data),
)
- self.servers_volumes_mock.get_server_volumes.assert_called_once_with(
- self.server.id)
+ self.compute_client.volume_attachments.assert_called_once_with(
+ self.server,
+ )
- def test_server_volume_list_with_attachment_ids(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.89')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_list_with_attachment_ids(self, sm_mock):
+ sm_mock.side_effect = [True, True, True, True]
arglist = [
self.server.id,
]
@@ -193,28 +198,29 @@ class TestServerVolumeList(TestServerVolume):
(
(
self.volume_attachments[0].device,
- self.volume_attachments[0].serverId,
- self.volume_attachments[0].volumeId,
+ self.volume_attachments[0].server_id,
+ self.volume_attachments[0].volume_id,
self.volume_attachments[0].tag,
self.volume_attachments[0].delete_on_termination,
self.volume_attachments[0].attachment_id,
- self.volume_attachments[0].bdm_uuid
+ self.volume_attachments[0].bdm_id
),
(
self.volume_attachments[1].device,
- self.volume_attachments[1].serverId,
- self.volume_attachments[1].volumeId,
+ self.volume_attachments[1].server_id,
+ self.volume_attachments[1].volume_id,
self.volume_attachments[1].tag,
self.volume_attachments[1].delete_on_termination,
self.volume_attachments[1].attachment_id,
- self.volume_attachments[1].bdm_uuid
+ self.volume_attachments[1].bdm_id
),
),
tuple(data),
)
- self.servers_volumes_mock.get_server_volumes.assert_called_once_with(
- self.server.id)
+ self.compute_client.volume_attachments.assert_called_once_with(
+ self.server,
+ )
class TestServerVolumeUpdate(TestServerVolume):
@@ -222,21 +228,23 @@ class TestServerVolumeUpdate(TestServerVolume):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
- self.servers_mock.get.return_value = self.server
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
+ self.compute_client.find_server.return_value = self.server
+
+ self.volume = volume_fakes.create_one_sdk_volume()
+ self.volume_client.find_volume.return_value = self.volume
# Get the command object to test
self.cmd = server_volume.UpdateServerVolume(self.app, None)
def test_server_volume_update(self):
-
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -244,67 +252,73 @@ class TestServerVolumeUpdate(TestServerVolume):
result = self.cmd.take_action(parsed_args)
# This is a no-op
- self.servers_volumes_mock.update_server_volume.assert_not_called()
+ self.compute_client.update_volume_attachment.assert_not_called()
self.assertIsNone(result)
- def test_server_volume_update_with_delete_on_termination(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.85')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_update_with_delete_on_termination(self, sm_mock):
+ sm_mock.return_value = True
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
'--delete-on-termination',
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
- self.servers_volumes_mock.update_server_volume.assert_called_once_with(
- self.server.id, 'foo', 'foo',
- delete_on_termination=True)
+ self.compute_client.update_volume_attachment.assert_called_once_with(
+ self.server,
+ self.volume,
+ delete_on_termination=True,
+ )
self.assertIsNone(result)
- def test_server_volume_update_with_preserve_on_termination(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.85')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_update_with_preserve_on_termination(self, sm_mock):
+ sm_mock.return_value = True
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
'--preserve-on-termination',
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
- self.servers_volumes_mock.update_server_volume.assert_called_once_with(
- self.server.id, 'foo', 'foo',
- delete_on_termination=False)
+ self.compute_client.update_volume_attachment.assert_called_once_with(
+ self.server,
+ self.volume,
+ delete_on_termination=False
+ )
self.assertIsNone(result)
- def test_server_volume_update_with_delete_on_termination_pre_v285(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.84')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_update_with_delete_on_termination_pre_v285(
+ self, sm_mock,
+ ):
+ sm_mock.return_value = False
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
'--delete-on-termination',
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -312,20 +326,24 @@ class TestServerVolumeUpdate(TestServerVolume):
self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
- parsed_args)
+ parsed_args,
+ )
+ self.compute_client.update_volume_attachment.assert_not_called()
- def test_server_volume_update_with_preserve_on_termination_pre_v285(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.84')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_update_with_preserve_on_termination_pre_v285(
+ self, sm_mock,
+ ):
+ sm_mock.return_value = False
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
'--preserve-on-termination',
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -333,4 +351,6 @@ class TestServerVolumeUpdate(TestServerVolume):
self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
- parsed_args)
+ parsed_args,
+ )
+ self.compute_client.update_volume_attachment.assert_not_called()
diff --git a/openstackclient/tests/unit/image/v2/test_image.py b/openstackclient/tests/unit/image/v2/test_image.py
index 010c4a9d..019b4d9d 100644
--- a/openstackclient/tests/unit/image/v2/test_image.py
+++ b/openstackclient/tests/unit/image/v2/test_image.py
@@ -905,7 +905,10 @@ class TestImageList(TestImage):
marker=self._image.id,
)
- self.client.find_image.assert_called_with('graven')
+ self.client.find_image.assert_called_with(
+ 'graven',
+ ignore_missing=False,
+ )
def test_image_list_name_option(self):
arglist = [
@@ -1856,6 +1859,10 @@ class TestImageImport(TestImage):
self.client.import_image.assert_called_once_with(
self.image,
method='glance-direct',
+ uri=None,
+ remote_region=None,
+ remote_image=None,
+ remote_service_interface=None,
stores=None,
all_stores=None,
all_stores_must_succeed=False,
@@ -1880,7 +1887,10 @@ class TestImageImport(TestImage):
self.client.import_image.assert_called_once_with(
self.image,
method='web-download',
- # uri='https://example.com/',
+ uri='https://example.com/',
+ remote_region=None,
+ remote_image=None,
+ remote_service_interface=None,
stores=None,
all_stores=None,
all_stores_must_succeed=False,
@@ -1978,6 +1988,10 @@ class TestImageImport(TestImage):
self.client.import_image.assert_called_once_with(
self.image,
method='copy-image',
+ uri=None,
+ remote_region=None,
+ remote_image=None,
+ remote_service_interface=None,
stores=['fast'],
all_stores=None,
all_stores_must_succeed=False,
@@ -2005,9 +2019,10 @@ class TestImageImport(TestImage):
self.client.import_image.assert_called_once_with(
self.image,
method='glance-download',
- # remote_region='eu/dublin',
- # remote_image='remote-image-id',
- # remote_service_interface='private',
+ uri=None,
+ remote_region='eu/dublin',
+ remote_image='remote-image-id',
+ remote_service_interface='private',
stores=None,
all_stores=None,
all_stores_must_succeed=False,
diff --git a/openstackclient/tests/unit/network/v2/fakes.py b/openstackclient/tests/unit/network/v2/fakes.py
index 4d029a0e..6d922008 100644
--- a/openstackclient/tests/unit/network/v2/fakes.py
+++ b/openstackclient/tests/unit/network/v2/fakes.py
@@ -34,6 +34,7 @@ from openstack.network.v2 import port as _port
from openstack.network.v2 import rbac_policy as network_rbac
from openstack.network.v2 import segment as _segment
from openstack.network.v2 import service_profile as _flavor_profile
+from openstack.network.v2 import trunk as _trunk
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3
@@ -1065,11 +1066,13 @@ class FakeFloatingIPPortForwarding(object):
""""Fake one or more Port forwarding"""
@staticmethod
- def create_one_port_forwarding(attrs=None):
+ def create_one_port_forwarding(attrs=None, use_range=False):
"""Create a fake Port Forwarding.
:param Dictionary attrs:
A dictionary with all attributes
+ :param Boolean use_range:
+ A boolean which defines if we will use ranges or not
:return:
A FakeResource object with name, id, etc.
"""
@@ -1083,13 +1086,29 @@ class FakeFloatingIPPortForwarding(object):
'floatingip_id': floatingip_id,
'internal_port_id': 'internal-port-id-' + uuid.uuid4().hex,
'internal_ip_address': '192.168.1.2',
- 'internal_port': randint(1, 65535),
- 'external_port': randint(1, 65535),
'protocol': 'tcp',
'description': 'some description',
'location': 'MUNCHMUNCHMUNCH',
}
+ if use_range:
+ port_range = randint(0, 100)
+ internal_start = randint(1, 65535 - port_range)
+ internal_end = internal_start + port_range
+ internal_range = ':'.join(map(str, [internal_start, internal_end]))
+ external_start = randint(1, 65535 - port_range)
+ external_end = external_start + port_range
+ external_range = ':'.join(map(str, [external_start, external_end]))
+ port_forwarding_attrs['internal_port_range'] = internal_range
+ port_forwarding_attrs['external_port_range'] = external_range
+ port_forwarding_attrs['internal_port'] = None
+ port_forwarding_attrs['external_port'] = None
+ else:
+ port_forwarding_attrs['internal_port'] = randint(1, 65535)
+ port_forwarding_attrs['external_port'] = randint(1, 65535)
+ port_forwarding_attrs['internal_port_range'] = ''
+ port_forwarding_attrs['external_port_range'] = ''
+
# Overwrite default attributes.
port_forwarding_attrs.update(attrs)
@@ -1100,25 +1119,28 @@ class FakeFloatingIPPortForwarding(object):
return port_forwarding
@staticmethod
- def create_port_forwardings(attrs=None, count=2):
+ def create_port_forwardings(attrs=None, count=2, use_range=False):
"""Create multiple fake Port Forwarding.
:param Dictionary attrs:
A dictionary with all attributes
:param int count:
The number of Port Forwarding rule to fake
+ :param Boolean use_range:
+ A boolean which defines if we will use ranges or not
:return:
A list of FakeResource objects faking the Port Forwardings
"""
port_forwardings = []
for i in range(0, count):
port_forwardings.append(
- FakeFloatingIPPortForwarding.create_one_port_forwarding(attrs)
+ FakeFloatingIPPortForwarding.create_one_port_forwarding(
+ attrs, use_range=use_range)
)
return port_forwardings
@staticmethod
- def get_port_forwardings(port_forwardings=None, count=2):
+ def get_port_forwardings(port_forwardings=None, count=2, use_range=False):
"""Get a list of faked Port Forwardings.
If port forwardings list is provided, then initialize the Mock object
@@ -1128,13 +1150,16 @@ class FakeFloatingIPPortForwarding(object):
A list of FakeResource objects faking port forwardings
:param int count:
The number of Port Forwardings to fake
+ :param Boolean use_range:
+ A boolean which defines if we will use ranges or not
:return:
An iterable Mock object with side_effect set to a list of faked
Port Forwardings
"""
if port_forwardings is None:
port_forwardings = (
- FakeFloatingIPPortForwarding.create_port_forwardings(count)
+ FakeFloatingIPPortForwarding.create_port_forwardings(
+ count, use_range=use_range)
)
return mock.Mock(side_effect=port_forwardings)
@@ -2152,3 +2177,71 @@ def get_ndp_proxies(ndp_proxies=None, count=2):
create_ndp_proxies(count)
)
return mock.Mock(side_effect=ndp_proxies)
+
+
+def create_one_trunk(attrs=None):
+ """Create a fake trunk.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with name, id, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ trunk_attrs = {
+ 'id': 'trunk-id-' + uuid.uuid4().hex,
+ 'name': 'trunk-name-' + uuid.uuid4().hex,
+ 'description': '',
+ 'port_id': 'port-' + uuid.uuid4().hex,
+ 'admin_state_up': True,
+ 'project_id': 'project-id-' + uuid.uuid4().hex,
+ 'status': 'ACTIVE',
+ 'sub_ports': [{'port_id': 'subport-' +
+ uuid.uuid4().hex,
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 100}],
+ }
+ # Overwrite default attributes.
+ trunk_attrs.update(attrs)
+
+ trunk = _trunk.Trunk(**trunk_attrs)
+
+ return trunk
+
+
+def create_trunks(attrs=None, count=2):
+ """Create multiple fake trunks.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of trunks to fake
+ :return:
+ A list of FakeResource objects faking the trunks
+ """
+ trunks = []
+ for i in range(0, count):
+ trunks.append(create_one_trunk(attrs))
+
+ return trunks
+
+
+def get_trunks(trunks=None, count=2):
+ """Get an iterable Mock object with a list of faked trunks.
+
+ If trunk list is provided, then initialize the Mock object
+ with the list. Otherwise create one.
+
+ :param List trunks:
+ A list of FakeResource objects faking trunks
+ :param int count:
+ The number of trunks to fake
+ :return:
+ An iterable Mock object with side_effect set to a list of faked
+ trunks
+ """
+ if trunks is None:
+ trunks = create_trunks(count)
+ return mock.Mock(side_effect=trunks)
diff --git a/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py b/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py
index 97399f43..d0f5af8c 100644
--- a/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py
+++ b/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py
@@ -49,6 +49,18 @@ class TestCreateFloatingIPPortForwarding(TestFloatingIPPortForwarding):
}
)
)
+
+ self.new_port_forwarding_with_ranges = (
+ network_fakes.FakeFloatingIPPortForwarding.
+ create_one_port_forwarding(
+ use_range=True,
+ attrs={
+ 'internal_port_id': self.port.id,
+ 'floatingip_id': self.floating_ip.id,
+ }
+ )
+ )
+
self.network.create_floating_ip_port_forwarding = mock.Mock(
return_value=self.new_port_forwarding)
@@ -63,22 +75,26 @@ class TestCreateFloatingIPPortForwarding(TestFloatingIPPortForwarding):
self.columns = (
'description',
'external_port',
+ 'external_port_range',
'floatingip_id',
'id',
'internal_ip_address',
'internal_port',
'internal_port_id',
+ 'internal_port_range',
'protocol'
)
self.data = (
self.new_port_forwarding.description,
self.new_port_forwarding.external_port,
+ self.new_port_forwarding.external_port_range,
self.new_port_forwarding.floatingip_id,
self.new_port_forwarding.id,
self.new_port_forwarding.internal_ip_address,
self.new_port_forwarding.internal_port,
self.new_port_forwarding.internal_port_id,
+ self.new_port_forwarding.internal_port_range,
self.new_port_forwarding.protocol,
)
@@ -90,6 +106,160 @@ class TestCreateFloatingIPPortForwarding(TestFloatingIPPortForwarding):
self.assertRaises(tests_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
+ def test_create_all_options_with_range(self):
+ arglist = [
+ '--port', self.new_port_forwarding_with_ranges.internal_port_id,
+ '--internal-protocol-port',
+ self.new_port_forwarding_with_ranges.internal_port_range,
+ '--external-protocol-port',
+ self.new_port_forwarding_with_ranges.external_port_range,
+ '--protocol', self.new_port_forwarding_with_ranges.protocol,
+ self.new_port_forwarding_with_ranges.floatingip_id,
+ '--internal-ip-address',
+ self.new_port_forwarding_with_ranges.internal_ip_address,
+ '--description',
+ self.new_port_forwarding_with_ranges.description,
+ ]
+ verifylist = [
+ ('port', self.new_port_forwarding_with_ranges.internal_port_id),
+ ('internal_protocol_port',
+ self.new_port_forwarding_with_ranges.internal_port_range),
+ ('external_protocol_port',
+ self.new_port_forwarding_with_ranges.external_port_range),
+ ('protocol', self.new_port_forwarding_with_ranges.protocol),
+ ('floating_ip',
+ self.new_port_forwarding_with_ranges.floatingip_id),
+ ('internal_ip_address', self.new_port_forwarding_with_ranges.
+ internal_ip_address),
+ ('description', self.new_port_forwarding_with_ranges.description),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.create_floating_ip_port_forwarding.\
+ assert_called_once_with(
+ self.new_port_forwarding.floatingip_id,
+ **{
+ 'external_port_range':
+ self.new_port_forwarding_with_ranges.
+ external_port_range,
+ 'internal_ip_address':
+ self.new_port_forwarding_with_ranges.
+ internal_ip_address,
+ 'internal_port_range':
+ self.new_port_forwarding_with_ranges.
+ internal_port_range,
+ 'internal_port_id':
+ self.new_port_forwarding_with_ranges.internal_port_id,
+ 'protocol': self.new_port_forwarding_with_ranges.protocol,
+ 'description':
+ self.new_port_forwarding_with_ranges.description,
+ })
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+ def test_create_all_options_with_range_invalid_port_exception(self):
+ invalid_port_range = '999999:999999'
+ arglist = [
+ '--port', self.new_port_forwarding_with_ranges.internal_port_id,
+ '--internal-protocol-port', invalid_port_range,
+ '--external-protocol-port', invalid_port_range,
+ '--protocol', self.new_port_forwarding_with_ranges.protocol,
+ self.new_port_forwarding_with_ranges.floatingip_id,
+ '--internal-ip-address',
+ self.new_port_forwarding_with_ranges.internal_ip_address,
+ '--description',
+ self.new_port_forwarding_with_ranges.description,
+ ]
+ verifylist = [
+ ('port', self.new_port_forwarding_with_ranges.internal_port_id),
+ ('internal_protocol_port', invalid_port_range),
+ ('external_protocol_port', invalid_port_range),
+ ('protocol', self.new_port_forwarding_with_ranges.protocol),
+ ('floating_ip',
+ self.new_port_forwarding_with_ranges.floatingip_id),
+ ('internal_ip_address', self.new_port_forwarding_with_ranges.
+ internal_ip_address),
+ ('description', self.new_port_forwarding_with_ranges.description),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ msg = 'The port number range is <1-65535>'
+ try:
+ self.cmd.take_action(parsed_args)
+ self.fail('CommandError should be raised.')
+ except exceptions.CommandError as e:
+ self.assertEqual(msg, str(e))
+ self.network.create_floating_ip_port_forwarding.assert_not_called()
+
+ def test_create_all_options_with_invalid_range_exception(self):
+ invalid_port_range = '80:70'
+ arglist = [
+ '--port', self.new_port_forwarding_with_ranges.internal_port_id,
+ '--internal-protocol-port', invalid_port_range,
+ '--external-protocol-port', invalid_port_range,
+ '--protocol', self.new_port_forwarding_with_ranges.protocol,
+ self.new_port_forwarding_with_ranges.floatingip_id,
+ '--internal-ip-address',
+ self.new_port_forwarding_with_ranges.internal_ip_address,
+ '--description',
+ self.new_port_forwarding_with_ranges.description,
+ ]
+ verifylist = [
+ ('port', self.new_port_forwarding_with_ranges.internal_port_id),
+ ('internal_protocol_port', invalid_port_range),
+ ('external_protocol_port', invalid_port_range),
+ ('protocol', self.new_port_forwarding_with_ranges.protocol),
+ ('floating_ip',
+ self.new_port_forwarding_with_ranges.floatingip_id),
+ ('internal_ip_address', self.new_port_forwarding_with_ranges.
+ internal_ip_address),
+ ('description', self.new_port_forwarding_with_ranges.description),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ msg = 'The last number in port range must be greater or equal to ' \
+ 'the first'
+ try:
+ self.cmd.take_action(parsed_args)
+ self.fail('CommandError should be raised.')
+ except exceptions.CommandError as e:
+ self.assertEqual(msg, str(e))
+ self.network.create_floating_ip_port_forwarding.assert_not_called()
+
+ def test_create_all_options_with_unmatch_ranges_exception(self):
+ internal_range = '80:90'
+ external_range = '8080:8100'
+ arglist = [
+ '--port', self.new_port_forwarding_with_ranges.internal_port_id,
+ '--internal-protocol-port', internal_range,
+ '--external-protocol-port', external_range,
+ '--protocol', self.new_port_forwarding_with_ranges.protocol,
+ self.new_port_forwarding_with_ranges.floatingip_id,
+ '--internal-ip-address',
+ self.new_port_forwarding_with_ranges.internal_ip_address,
+ '--description',
+ self.new_port_forwarding_with_ranges.description,
+ ]
+ verifylist = [
+ ('port', self.new_port_forwarding_with_ranges.internal_port_id),
+ ('internal_protocol_port', internal_range),
+ ('external_protocol_port', external_range),
+ ('protocol', self.new_port_forwarding_with_ranges.protocol),
+ ('floating_ip',
+ self.new_port_forwarding_with_ranges.floatingip_id),
+ ('internal_ip_address', self.new_port_forwarding_with_ranges.
+ internal_ip_address),
+ ('description', self.new_port_forwarding_with_ranges.description),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ msg = "The relation between internal and external ports does not " \
+ "match the pattern 1:N and N:N"
+ try:
+ self.cmd.take_action(parsed_args)
+ self.fail('CommandError should be raised.')
+ except exceptions.CommandError as e:
+ self.assertEqual(msg, str(e))
+ self.network.create_floating_ip_port_forwarding.assert_not_called()
+
def test_create_all_options(self):
arglist = [
'--port', self.new_port_forwarding.internal_port_id,
@@ -106,8 +276,10 @@ class TestCreateFloatingIPPortForwarding(TestFloatingIPPortForwarding):
]
verifylist = [
('port', self.new_port_forwarding.internal_port_id),
- ('internal_protocol_port', self.new_port_forwarding.internal_port),
- ('external_protocol_port', self.new_port_forwarding.external_port),
+ ('internal_protocol_port',
+ str(self.new_port_forwarding.internal_port)),
+ ('external_protocol_port',
+ str(self.new_port_forwarding.external_port)),
('protocol', self.new_port_forwarding.protocol),
('floating_ip', self.new_port_forwarding.floatingip_id),
('internal_ip_address', self.new_port_forwarding.
@@ -253,7 +425,9 @@ class TestListFloatingIPPortForwarding(TestFloatingIPPortForwarding):
'Internal Port ID',
'Internal IP Address',
'Internal Port',
+ 'Internal Port Range',
'External Port',
+ 'External Port Range',
'Protocol',
'Description',
)
@@ -275,7 +449,9 @@ class TestListFloatingIPPortForwarding(TestFloatingIPPortForwarding):
port_forwarding.internal_port_id,
port_forwarding.internal_ip_address,
port_forwarding.internal_port,
+ port_forwarding.internal_port_range,
port_forwarding.external_port,
+ port_forwarding.external_port_range,
port_forwarding.protocol,
port_forwarding.description,
))
@@ -330,7 +506,7 @@ class TestListFloatingIPPortForwarding(TestFloatingIPPortForwarding):
query = {
'internal_port_id': self.port_forwardings[0].internal_port_id,
- 'external_port': str(self.port_forwardings[0].external_port),
+ 'external_port': self.port_forwardings[0].external_port,
'protocol': self.port_forwardings[0].protocol,
}
@@ -392,7 +568,7 @@ class TestSetFloatingIPPortForwarding(TestFloatingIPPortForwarding):
self.assertIsNone(result)
def test_set_all_thing(self):
- arglist = [
+ arglist_single = [
'--port', self.port.id,
'--internal-ip-address', 'new_internal_ip_address',
'--internal-protocol-port', '100',
@@ -402,21 +578,23 @@ class TestSetFloatingIPPortForwarding(TestFloatingIPPortForwarding):
self._port_forwarding.floatingip_id,
self._port_forwarding.id,
]
- verifylist = [
+ arglist_range = list(arglist_single)
+ arglist_range[5] = '100:110'
+ arglist_range[7] = '200:210'
+ verifylist_single = [
('port', self.port.id),
('internal_ip_address', 'new_internal_ip_address'),
- ('internal_protocol_port', 100),
- ('external_protocol_port', 200),
+ ('internal_protocol_port', '100'),
+ ('external_protocol_port', '200'),
('protocol', 'tcp'),
('description', 'some description'),
('floating_ip', self._port_forwarding.floatingip_id),
('port_forwarding_id', self._port_forwarding.id),
]
-
- parsed_args = self.check_parser(self.cmd, arglist, verifylist)
-
- result = self.cmd.take_action(parsed_args)
- attrs = {
+ verifylist_range = list(verifylist_single)
+ verifylist_range[2] = ('internal_protocol_port', '100:110')
+ verifylist_range[3] = ('external_protocol_port', '200:210')
+ attrs_single = {
'internal_port_id': self.port.id,
'internal_ip_address': 'new_internal_ip_address',
'internal_port': 100,
@@ -424,12 +602,25 @@ class TestSetFloatingIPPortForwarding(TestFloatingIPPortForwarding):
'protocol': 'tcp',
'description': 'some description',
}
- self.network.update_floating_ip_port_forwarding.assert_called_with(
- self._port_forwarding.floatingip_id,
- self._port_forwarding.id,
- **attrs
- )
- self.assertIsNone(result)
+ attrs_range = dict(attrs_single, internal_port_range='100:110',
+ external_port_range='200:210')
+ attrs_range.pop('internal_port')
+ attrs_range.pop('external_port')
+
+ def run_and_validate(arglist, verifylist, attrs):
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.update_floating_ip_port_forwarding.assert_called_with(
+ self._port_forwarding.floatingip_id,
+ self._port_forwarding.id,
+ **attrs
+ )
+ self.assertIsNone(result)
+
+ run_and_validate(arglist_single, verifylist_single, attrs_single)
+ run_and_validate(arglist_range, verifylist_range, attrs_range)
class TestShowFloatingIPPortForwarding(TestFloatingIPPortForwarding):
@@ -438,11 +629,13 @@ class TestShowFloatingIPPortForwarding(TestFloatingIPPortForwarding):
columns = (
'description',
'external_port',
+ 'external_port_range',
'floatingip_id',
'id',
'internal_ip_address',
'internal_port',
'internal_port_id',
+ 'internal_port_range',
'protocol',
)
@@ -459,11 +652,13 @@ class TestShowFloatingIPPortForwarding(TestFloatingIPPortForwarding):
self.data = (
self._port_forwarding.description,
self._port_forwarding.external_port,
+ self._port_forwarding.external_port_range,
self._port_forwarding.floatingip_id,
self._port_forwarding.id,
self._port_forwarding.internal_ip_address,
self._port_forwarding.internal_port,
self._port_forwarding.internal_port_id,
+ self._port_forwarding.internal_port_range,
self._port_forwarding.protocol,
)
self.network.find_floating_ip_port_forwarding = mock.Mock(
diff --git a/openstackclient/tests/unit/network/v2/test_network_qos_policy.py b/openstackclient/tests/unit/network/v2/test_network_qos_policy.py
index af4cb3fb..ca21ccf7 100644
--- a/openstackclient/tests/unit/network/v2/test_network_qos_policy.py
+++ b/openstackclient/tests/unit/network/v2/test_network_qos_policy.py
@@ -432,7 +432,7 @@ class TestShowNetworkQosPolicy(TestQosPolicy):
_qos_policy.is_default,
_qos_policy.name,
_qos_policy.project_id,
- _qos_policy.rules,
+ network_qos_policy.RulesColumn(_qos_policy.rules),
_qos_policy.shared,
)
diff --git a/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py b/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py
index 08a83fab..3aae822e 100644
--- a/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py
+++ b/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py
@@ -115,3 +115,37 @@ class TestListNetworkQosRuleType(TestNetworkQosRuleType):
self.network.qos_rule_types.assert_called_once_with(**{})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
+
+ def test_qos_rule_type_list_all_supported(self):
+ arglist = [
+ '--all-supported'
+ ]
+ verifylist = [
+ ('all_supported', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.qos_rule_types.assert_called_once_with(
+ **{'all_supported': True}
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
+ def test_qos_rule_type_list_all_rules(self):
+ arglist = [
+ '--all-rules'
+ ]
+ verifylist = [
+ ('all_rules', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.qos_rule_types.assert_called_once_with(
+ **{'all_rules': True}
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
diff --git a/openstackclient/tests/unit/network/v2/test_network_trunk.py b/openstackclient/tests/unit/network/v2/test_network_trunk.py
new file mode 100644
index 00000000..fae70fb0
--- /dev/null
+++ b/openstackclient/tests/unit/network/v2/test_network_trunk.py
@@ -0,0 +1,851 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import argparse
+import copy
+from unittest import mock
+from unittest.mock import call
+
+from osc_lib.cli import format_columns
+from osc_lib import exceptions
+import testtools
+
+from openstackclient.network.v2 import network_trunk
+from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3
+from openstackclient.tests.unit.network.v2 import fakes as network_fakes
+from openstackclient.tests.unit import utils as tests_utils
+
+
+# Tests for Neutron trunks
+#
+class TestNetworkTrunk(network_fakes.TestNetworkV2):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the network client
+ self.network = self.app.client_manager.network
+ # Get a shortcut to the ProjectManager Mock
+ self.projects_mock = self.app.client_manager.identity.projects
+ # Get a shortcut to the DomainManager Mock
+ self.domains_mock = self.app.client_manager.identity.domains
+
+
+class TestCreateNetworkTrunk(TestNetworkTrunk):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+
+ new_trunk = network_fakes.create_one_trunk(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+
+ columns = (
+ 'description',
+ 'id',
+ 'is_admin_state_up',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ 'tags'
+ )
+ data = (
+ new_trunk.description,
+ new_trunk.id,
+ new_trunk.is_admin_state_up,
+ new_trunk.name,
+ new_trunk.port_id,
+ new_trunk.project_id,
+ new_trunk.status,
+ format_columns.ListDictColumn(new_trunk.sub_ports),
+ [],
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.network.create_trunk = mock.Mock(return_value=self.new_trunk)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.parent_port, self.sub_port])
+
+ # Get the command object to test
+ self.cmd = network_trunk.CreateNetworkTrunk(self.app, self.namespace)
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ def test_create_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_create_default_options(self):
+ arglist = [
+ "--parent-port", self.new_trunk['port_id'],
+ self.new_trunk['name'],
+ ]
+ verifylist = [
+ ('parent_port', self.new_trunk['port_id']),
+ ('name', self.new_trunk['name']),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_trunk.assert_called_once_with(**{
+ 'name': self.new_trunk['name'],
+ 'admin_state_up': self.new_trunk['admin_state_up'],
+ 'port_id': self.new_trunk['port_id'],
+ })
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+ def test_create_full_options(self):
+ self.new_trunk['description'] = 'foo description'
+ subport = self.new_trunk.sub_ports[0]
+ arglist = [
+ "--disable",
+ "--description", self.new_trunk.description,
+ "--parent-port", self.new_trunk.port_id,
+ "--subport", 'port=%(port)s,segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type'],
+ 'port': subport['port_id']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('description', self.new_trunk.description),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'port': subport['port_id'],
+ 'segmentation-id': str(subport['segmentation_id']),
+ 'segmentation-type': subport['segmentation_type']}]),
+ ('disable', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_trunk.assert_called_once_with(**{
+ 'name': self.new_trunk.name,
+ 'description': self.new_trunk.description,
+ 'admin_state_up': False,
+ 'port_id': self.new_trunk.port_id,
+ 'sub_ports': [subport],
+ })
+ self.assertEqual(self.columns, columns)
+ data_with_desc = list(self.data)
+ data_with_desc[0] = self.new_trunk['description']
+ data_with_desc = tuple(data_with_desc)
+ self.assertEqual(data_with_desc, data)
+
+ def test_create_trunk_with_subport_invalid_segmentation_id_fail(self):
+ subport = self.new_trunk.sub_ports[0]
+ arglist = [
+ "--parent-port", self.new_trunk.port_id,
+ "--subport", "port=%(port)s,segmentation-type=%(seg_type)s,"
+ "segmentation-id=boom" % {
+ 'seg_type': subport['segmentation_type'],
+ 'port': subport['port_id']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'port': subport['port_id'],
+ 'segmentation-id': 'boom',
+ 'segmentation-type': subport['segmentation_type']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual("Segmentation-id 'boom' is not an integer",
+ str(e))
+
+ def test_create_network_trunk_subports_without_optional_keys(self):
+ subport = copy.copy(self.new_trunk.sub_ports[0])
+ # Pop out the segmentation-id and segmentation-type
+ subport.pop('segmentation_type')
+ subport.pop('segmentation_id')
+ arglist = [
+ '--parent-port', self.new_trunk.port_id,
+ '--subport', 'port=%(port)s' % {'port': subport['port_id']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'port': subport['port_id']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_trunk.assert_called_once_with(**{
+ 'name': self.new_trunk.name,
+ 'admin_state_up': True,
+ 'port_id': self.new_trunk.port_id,
+ 'sub_ports': [subport],
+ })
+ self.assertEqual(self.columns, columns)
+ data_with_desc = list(self.data)
+ data_with_desc[0] = self.new_trunk['description']
+ data_with_desc = tuple(data_with_desc)
+ self.assertEqual(data_with_desc, data)
+
+ def test_create_network_trunk_subports_without_required_key_fail(self):
+ subport = self.new_trunk.sub_ports[0]
+ arglist = [
+ '--parent-port', self.new_trunk.port_id,
+ '--subport', 'segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'segmentation_id': str(subport['segmentation_id']),
+ 'segmentation_type': subport['segmentation_type']}]),
+ ]
+
+ with testtools.ExpectedException(argparse.ArgumentTypeError):
+ self.check_parser(self.cmd, arglist, verifylist)
+
+
+class TestDeleteNetworkTrunk(TestNetworkTrunk):
+ # The trunk to be deleted.
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+
+ new_trunks = network_fakes.create_trunks(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+
+ def setUp(self):
+ super().setUp()
+ self.network.find_trunk = mock.Mock(
+ side_effect=[self.new_trunks[0], self.new_trunks[1]])
+ self.network.delete_trunk = mock.Mock(return_value=None)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.parent_port, self.sub_port])
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.DeleteNetworkTrunk(self.app, self.namespace)
+
+ def test_delete_trunkx(self):
+ arglist = [
+ self.new_trunks[0].name,
+ ]
+ verifylist = [
+ ('trunk', [self.new_trunks[0].name]),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.network.delete_trunk.assert_called_once_with(
+ self.new_trunks[0].id)
+ self.assertIsNone(result)
+
+ def test_delete_trunk_multiple(self):
+ arglist = []
+ verifylist = []
+
+ for t in self.new_trunks:
+ arglist.append(t['name'])
+ verifylist = [
+ ('trunk', arglist),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ calls = []
+ for t in self.new_trunks:
+ calls.append(call(t.id))
+ self.network.delete_trunk.assert_has_calls(calls)
+ self.assertIsNone(result)
+
+ def test_delete_trunk_multiple_with_exception(self):
+ arglist = [
+ self.new_trunks[0].name,
+ 'unexist_trunk',
+ ]
+ verifylist = [
+ ('trunk',
+ [self.new_trunks[0].name, 'unexist_trunk']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.find_trunk = mock.Mock(
+ side_effect=[self.new_trunks[0], exceptions.CommandError])
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual('1 of 2 trunks failed to delete.', str(e))
+ self.network.delete_trunk.assert_called_once_with(
+ self.new_trunks[0].id
+ )
+
+
+class TestShowNetworkTrunk(TestNetworkTrunk):
+
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ # The trunk to set.
+ new_trunk = network_fakes.create_one_trunk()
+ columns = (
+ 'description',
+ 'id',
+ 'is_admin_state_up',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ 'tags'
+ )
+ data = (
+ new_trunk.description,
+ new_trunk.id,
+ new_trunk.is_admin_state_up,
+ new_trunk.name,
+ new_trunk.port_id,
+ new_trunk.project_id,
+ new_trunk.status,
+ format_columns.ListDictColumn(new_trunk.sub_ports),
+ [],
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.network.find_trunk = mock.Mock(return_value=self.new_trunk)
+ self.network.get_trunk = mock.Mock(return_value=self.new_trunk)
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.ShowNetworkTrunk(self.app, self.namespace)
+
+ def test_show_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_show_all_options(self):
+ arglist = [
+ self.new_trunk.id,
+ ]
+ verifylist = [
+ ('trunk', self.new_trunk.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.get_trunk.assert_called_once_with(self.new_trunk.id)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+
+class TestListNetworkTrunk(TestNetworkTrunk):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ # Create trunks to be listed.
+ new_trunks = network_fakes.create_trunks(
+ {'created_at': '2001-01-01 00:00:00',
+ 'updated_at': '2001-01-01 00:00:00'}, count=3)
+
+ columns = (
+ 'ID',
+ 'Name',
+ 'Parent Port',
+ 'Description'
+ )
+ columns_long = columns + (
+ 'Status',
+ 'State',
+ 'Created At',
+ 'Updated At'
+ )
+ data = []
+ for t in new_trunks:
+ data.append((
+ t['id'],
+ t['name'],
+ t['port_id'],
+ t['description']
+ ))
+ data_long = []
+ for t in new_trunks:
+ data_long.append((
+ t['id'],
+ t['name'],
+ t['port_id'],
+ t['description'],
+ t['status'],
+ network_trunk.AdminStateColumn(''),
+ '2001-01-01 00:00:00',
+ '2001-01-01 00:00:00',
+ ))
+
+ def setUp(self):
+ super().setUp()
+ self.network.trunks = mock.Mock(return_value=self.new_trunks)
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.ListNetworkTrunk(self.app, self.namespace)
+
+ def test_trunk_list_no_option(self):
+ arglist = []
+ verifylist = []
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.trunks.assert_called_once_with()
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
+ def test_trunk_list_long(self):
+ arglist = [
+ '--long',
+ ]
+ verifylist = [
+ ('long', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.trunks.assert_called_once_with()
+ self.assertEqual(self.columns_long, columns)
+ self.assertEqual(self.data_long, list(data))
+
+
+class TestSetNetworkTrunk(TestNetworkTrunk):
+
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+ # Create trunks to be listed.
+ _trunk = network_fakes.create_one_trunk(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+ columns = (
+ 'admin_state_up',
+ 'id',
+ 'name',
+ 'description',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ )
+ data = (
+ _trunk.id,
+ _trunk.name,
+ _trunk.description,
+ _trunk.port_id,
+ _trunk.project_id,
+ _trunk.status,
+ format_columns.ListDictColumn(_trunk.sub_ports),
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.network.update_trunk = mock.Mock(return_value=self._trunk)
+ self.network.add_trunk_subports = mock.Mock(return_value=self._trunk)
+ self.network.find_trunk = mock.Mock(return_value=self._trunk)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.sub_port, self.sub_port])
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.SetNetworkTrunk(self.app, self.namespace)
+
+ def _test_set_network_trunk_attr(self, attr, value):
+ arglist = [
+ '--%s' % attr, value,
+ self._trunk[attr],
+ ]
+ verifylist = [
+ (attr, value),
+ ('trunk', self._trunk[attr]),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ attr: value,
+ }
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_name(self):
+ self._test_set_network_trunk_attr('name', 'trunky')
+
+ def test_set_network_trunk_description(self):
+ self._test_set_network_trunk_attr('description', 'description')
+
+ def test_set_network_trunk_admin_state_up_disable(self):
+ arglist = [
+ '--disable',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('disable', True),
+ ('trunk', self._trunk['name']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ 'admin_state_up': False,
+ }
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_admin_state_up_enable(self):
+ arglist = [
+ '--enable',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('enable', True),
+ ('trunk', self._trunk['name']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ 'admin_state_up': True,
+ }
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_nothing(self):
+ arglist = [self._trunk['name'], ]
+ verifylist = [('trunk', self._trunk['name']), ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {}
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_subports(self):
+ subport = self._trunk['sub_ports'][0]
+ arglist = [
+ '--subport', 'port=%(port)s,segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type'],
+ 'port': subport['port_id']},
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{
+ 'port': subport['port_id'],
+ 'segmentation-id': str(subport['segmentation_id']),
+ 'segmentation-type': subport['segmentation_type']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.add_trunk_subports.assert_called_once_with(
+ self._trunk, [subport])
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_subports_without_optional_keys(self):
+ subport = copy.copy(self._trunk['sub_ports'][0])
+ # Pop out the segmentation-id and segmentation-type
+ subport.pop('segmentation_type')
+ subport.pop('segmentation_id')
+ arglist = [
+ '--subport', 'port=%(port)s' % {'port': subport['port_id']},
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{
+ 'port': subport['port_id']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.add_trunk_subports.assert_called_once_with(
+ self._trunk, [subport])
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_subports_without_required_key_fail(self):
+ subport = self._trunk['sub_ports'][0]
+ arglist = [
+ '--subport', 'segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type']},
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{
+ 'segmentation-id': str(subport['segmentation_id']),
+ 'segmentation-type': subport['segmentation_type']}]),
+ ]
+
+ with testtools.ExpectedException(argparse.ArgumentTypeError):
+ self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.add_trunk_subports.assert_not_called()
+
+ def test_set_trunk_attrs_with_exception(self):
+ arglist = [
+ '--name', 'reallylongname',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('name', 'reallylongname'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.update_trunk = (
+ mock.Mock(side_effect=exceptions.CommandError)
+ )
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual(
+ "Failed to set trunk '%s': " % self._trunk['name'],
+ str(e))
+ attrs = {'name': 'reallylongname'}
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.network.add_trunk_subports.assert_not_called()
+
+ def test_set_trunk_add_subport_with_exception(self):
+ arglist = [
+ '--subport', 'port=invalid_subport',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{'port': 'invalid_subport'}]),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.add_trunk_subports = (
+ mock.Mock(side_effect=exceptions.CommandError)
+ )
+ self.network.find_port = (mock.Mock(
+ return_value={'id': 'invalid_subport'}))
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual(
+ "Failed to add subports to trunk '%s': " % self._trunk['name'],
+ str(e))
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk)
+ self.network.add_trunk_subports.assert_called_once_with(
+ self._trunk, [{'port_id': 'invalid_subport'}])
+
+
+class TestListNetworkSubport(TestNetworkTrunk):
+
+ _trunk = network_fakes.create_one_trunk()
+ _subports = _trunk['sub_ports']
+
+ columns = (
+ 'Port',
+ 'Segmentation Type',
+ 'Segmentation ID',
+ )
+ data = []
+ for s in _subports:
+ data.append((
+ s['port_id'],
+ s['segmentation_type'],
+ s['segmentation_id'],
+ ))
+
+ def setUp(self):
+ super().setUp()
+
+ self.network.find_trunk = mock.Mock(return_value=self._trunk)
+ self.network.get_trunk_subports = mock.Mock(
+ return_value={network_trunk.SUB_PORTS: self._subports})
+
+ # Get the command object to test
+ self.cmd = network_trunk.ListNetworkSubport(self.app, self.namespace)
+
+ def test_subport_list(self):
+ arglist = [
+ '--trunk', self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.get_trunk_subports.assert_called_once_with(self._trunk)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
+
+class TestUnsetNetworkTrunk(TestNetworkTrunk):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+ _trunk = network_fakes.create_one_trunk(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+
+ columns = (
+ 'admin_state_up',
+ 'id',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ )
+ data = (
+ network_trunk.AdminStateColumn(_trunk['admin_state_up']),
+ _trunk['id'],
+ _trunk['name'],
+ _trunk['port_id'],
+ _trunk['project_id'],
+ _trunk['status'],
+ format_columns.ListDictColumn(_trunk['sub_ports']),
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.network.find_trunk = mock.Mock(return_value=self._trunk)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.sub_port, self.sub_port])
+ self.network.delete_trunk_subports = mock.Mock(return_value=None)
+
+ # Get the command object to test
+ self.cmd = network_trunk.UnsetNetworkTrunk(self.app, self.namespace)
+
+ def test_unset_network_trunk_subport(self):
+ subport = self._trunk['sub_ports'][0]
+ arglist = [
+ "--subport", subport['port_id'],
+ self._trunk['name'],
+ ]
+
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('unset_subports', [subport['port_id']]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.delete_trunk_subports.assert_called_once_with(
+ self._trunk,
+ [{'port_id': subport['port_id']}]
+ )
+ self.assertIsNone(result)
+
+ def test_unset_subport_no_arguments_fail(self):
+ arglist = [
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ]
+ self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd, arglist, verifylist)
diff --git a/openstackclient/tests/unit/volume/v1/test_volume.py b/openstackclient/tests/unit/volume/v1/test_volume.py
index 9f16b398..b46a608d 100644
--- a/openstackclient/tests/unit/volume/v1/test_volume.py
+++ b/openstackclient/tests/unit/volume/v1/test_volume.py
@@ -430,7 +430,8 @@ class TestVolumeCreate(TestVolume):
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.datalist, data)
- def test_volume_create_with_bootable_and_readonly(self):
+ @mock.patch.object(utils, 'wait_for_status', return_value=True)
+ def test_volume_create_with_bootable_and_readonly(self, mock_wait):
arglist = [
'--bootable',
'--read-only',
@@ -472,7 +473,8 @@ class TestVolumeCreate(TestVolume):
self.volumes_mock.update_readonly_flag.assert_called_with(
self.new_volume.id, True)
- def test_volume_create_with_nonbootable_and_readwrite(self):
+ @mock.patch.object(utils, 'wait_for_status', return_value=True)
+ def test_volume_create_with_nonbootable_and_readwrite(self, mock_wait):
arglist = [
'--non-bootable',
'--read-write',
@@ -515,8 +517,9 @@ class TestVolumeCreate(TestVolume):
self.new_volume.id, False)
@mock.patch.object(volume.LOG, 'error')
+ @mock.patch.object(utils, 'wait_for_status', return_value=True)
def test_volume_create_with_bootable_and_readonly_fail(
- self, mock_error):
+ self, mock_wait, mock_error):
self.volumes_mock.set_bootable.side_effect = (
exceptions.CommandError())
@@ -566,6 +569,48 @@ class TestVolumeCreate(TestVolume):
self.volumes_mock.update_readonly_flag.assert_called_with(
self.new_volume.id, True)
+ @mock.patch.object(volume.LOG, 'error')
+ @mock.patch.object(utils, 'wait_for_status', return_value=False)
+ def test_volume_create_non_available_with_readonly(
+ self, mock_wait, mock_error):
+ arglist = [
+ '--non-bootable',
+ '--read-only',
+ '--size', str(self.new_volume.size),
+ self.new_volume.display_name,
+ ]
+ verifylist = [
+ ('bootable', False),
+ ('non_bootable', True),
+ ('read_only', True),
+ ('read_write', False),
+ ('size', self.new_volume.size),
+ ('name', self.new_volume.display_name),
+ ]
+
+ parsed_args = self.check_parser(
+ self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.create.assert_called_with(
+ self.new_volume.size,
+ None,
+ None,
+ self.new_volume.display_name,
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ )
+
+ self.assertEqual(2, mock_error.call_count)
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.datalist, data)
+
def test_volume_create_without_size(self):
arglist = [
self.new_volume.display_name,
diff --git a/openstackclient/tests/unit/volume/v2/test_consistency_group.py b/openstackclient/tests/unit/volume/v2/test_consistency_group.py
index 7ef4a08e..c5537ed8 100644
--- a/openstackclient/tests/unit/volume/v2/test_consistency_group.py
+++ b/openstackclient/tests/unit/volume/v2/test_consistency_group.py
@@ -257,7 +257,7 @@ class TestConsistencyGroupCreate(TestConsistencyGroup):
self.new_consistency_group.name,
]
verifylist = [
- ('consistency_group_source', self.new_consistency_group.id),
+ ('source', self.new_consistency_group.id),
('description', self.new_consistency_group.description),
('name', self.new_consistency_group.name),
]
@@ -285,7 +285,7 @@ class TestConsistencyGroupCreate(TestConsistencyGroup):
self.new_consistency_group.name,
]
verifylist = [
- ('consistency_group_snapshot', self.consistency_group_snapshot.id),
+ ('snapshot', self.consistency_group_snapshot.id),
('description', self.new_consistency_group.description),
('name', self.new_consistency_group.name),
]
diff --git a/openstackclient/tests/unit/volume/v2/test_volume.py b/openstackclient/tests/unit/volume/v2/test_volume.py
index c930002f..0419acef 100644
--- a/openstackclient/tests/unit/volume/v2/test_volume.py
+++ b/openstackclient/tests/unit/volume/v2/test_volume.py
@@ -435,7 +435,8 @@ class TestVolumeCreate(TestVolume):
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.datalist, data)
- def test_volume_create_with_bootable_and_readonly(self):
+ @mock.patch.object(utils, 'wait_for_status', return_value=True)
+ def test_volume_create_with_bootable_and_readonly(self, mock_wait):
arglist = [
'--bootable',
'--read-only',
@@ -478,7 +479,8 @@ class TestVolumeCreate(TestVolume):
self.volumes_mock.update_readonly_flag.assert_called_with(
self.new_volume.id, True)
- def test_volume_create_with_nonbootable_and_readwrite(self):
+ @mock.patch.object(utils, 'wait_for_status', return_value=True)
+ def test_volume_create_with_nonbootable_and_readwrite(self, mock_wait):
arglist = [
'--non-bootable',
'--read-write',
@@ -522,8 +524,9 @@ class TestVolumeCreate(TestVolume):
self.new_volume.id, False)
@mock.patch.object(volume.LOG, 'error')
+ @mock.patch.object(utils, 'wait_for_status', return_value=True)
def test_volume_create_with_bootable_and_readonly_fail(
- self, mock_error):
+ self, mock_wait, mock_error):
self.volumes_mock.set_bootable.side_effect = (
exceptions.CommandError())
@@ -574,6 +577,50 @@ class TestVolumeCreate(TestVolume):
self.volumes_mock.update_readonly_flag.assert_called_with(
self.new_volume.id, True)
+ @mock.patch.object(volume.LOG, 'error')
+ @mock.patch.object(utils, 'wait_for_status', return_value=False)
+ def test_volume_create_non_available_with_readonly(
+ self, mock_wait, mock_error,
+ ):
+ arglist = [
+ '--non-bootable',
+ '--read-only',
+ '--size', str(self.new_volume.size),
+ self.new_volume.name,
+ ]
+ verifylist = [
+ ('bootable', False),
+ ('non_bootable', True),
+ ('read_only', True),
+ ('read_write', False),
+ ('size', self.new_volume.size),
+ ('name', self.new_volume.name),
+ ]
+
+ parsed_args = self.check_parser(
+ self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.create.assert_called_with(
+ size=self.new_volume.size,
+ snapshot_id=None,
+ name=self.new_volume.name,
+ description=None,
+ volume_type=None,
+ availability_zone=None,
+ metadata=None,
+ imageRef=None,
+ source_volid=None,
+ consistencygroup_id=None,
+ scheduler_hints=None,
+ backup_id=None,
+ )
+
+ self.assertEqual(2, mock_error.call_count)
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.datalist, data)
+
def test_volume_create_without_size(self):
arglist = [
self.new_volume.name,
diff --git a/openstackclient/tests/unit/volume/v3/fakes.py b/openstackclient/tests/unit/volume/v3/fakes.py
index 3e3a05fa..62383580 100644
--- a/openstackclient/tests/unit/volume/v3/fakes.py
+++ b/openstackclient/tests/unit/volume/v3/fakes.py
@@ -47,6 +47,10 @@ class FakeVolumeClient:
self.volumes.resource_class = fakes.FakeResource(None, {})
self.volume_types = mock.Mock()
self.volume_types.resource_class = fakes.FakeResource(None, {})
+ self.services = mock.Mock()
+ self.services.resource_class = fakes.FakeResource(None, {})
+ self.workers = mock.Mock()
+ self.workers.resource_class = fakes.FakeResource(None, {})
class TestVolume(utils.TestCommand):
@@ -436,3 +440,88 @@ def get_volume_attachments(attachments=None, count=2):
attachments = create_volume_attachments(count)
return mock.Mock(side_effect=attachments)
+
+
+def create_service_log_level_entry(attrs=None):
+ service_log_level_info = {
+ 'host': 'host_test',
+ 'binary': 'cinder-api',
+ 'prefix': 'cinder.api.common',
+ 'level': 'DEBUG',
+ }
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
+
+ service_log_level_info.update(attrs)
+
+ service_log_level = fakes.FakeResource(
+ None, service_log_level_info, loaded=True)
+ return service_log_level
+
+
+def create_cleanup_records():
+ """Create fake service cleanup records.
+
+ :return: A list of FakeResource objects
+ """
+ cleaning_records = []
+ unavailable_records = []
+ cleaning_work_info = {
+ 'id': 1,
+ 'host': 'devstack@fakedriver-1',
+ 'binary': 'cinder-volume',
+ 'cluster_name': 'fake_cluster',
+ }
+ unavailable_work_info = {
+ 'id': 2,
+ 'host': 'devstack@fakedriver-2',
+ 'binary': 'cinder-scheduler',
+ 'cluster_name': 'new_cluster',
+ }
+ cleaning_records.append(cleaning_work_info)
+ unavailable_records.append(unavailable_work_info)
+
+ cleaning = [fakes.FakeResource(
+ None, obj, loaded=True) for obj in cleaning_records]
+ unavailable = [fakes.FakeResource(
+ None, obj, loaded=True) for obj in unavailable_records]
+
+ return cleaning, unavailable
+
+
+def create_one_manage_record(attrs=None, snapshot=False):
+ manage_dict = {
+ 'reference': {'source-name': 'fake-volume'},
+ 'size': '1',
+ 'safe_to_manage': False,
+ 'reason_not_safe': 'already managed',
+ 'cinder_id': 'fake-volume',
+ 'extra_info': None,
+ }
+ if snapshot:
+ manage_dict['source_reference'] = {'source-name': 'fake-source'}
+
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
+
+ manage_dict.update(attrs)
+ manage_record = fakes.FakeResource(None, manage_dict, loaded=True)
+ return manage_record
+
+
+def create_volume_manage_list_records(count=2):
+ volume_manage_list = []
+ for i in range(count):
+ volume_manage_list.append(
+ create_one_manage_record({'size': str(i + 1)}))
+
+ return volume_manage_list
+
+
+def create_snapshot_manage_list_records(count=2):
+ snapshot_manage_list = []
+ for i in range(count):
+ snapshot_manage_list.append(
+ create_one_manage_record({'size': str(i + 1)}, snapshot=True))
+
+ return snapshot_manage_list
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py b/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py
new file mode 100644
index 00000000..b48ce2f9
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py
@@ -0,0 +1,178 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from cinderclient import api_versions
+from osc_lib import exceptions
+
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_cleanup
+
+
+class TestBlockStorage(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the BlockStorageWorkerManager Mock
+ self.worker_mock = self.app.client_manager.volume.workers
+ self.worker_mock.reset_mock()
+
+
+class TestBlockStorageCleanup(TestBlockStorage):
+
+ cleaning, unavailable = volume_fakes.create_cleanup_records()
+
+ def setUp(self):
+ super().setUp()
+
+ self.worker_mock.clean.return_value = (self.cleaning, self.unavailable)
+
+ # Get the command object to test
+ self.cmd = \
+ block_storage_cleanup.BlockStorageCleanup(self.app, None)
+
+ def test_cleanup(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.24')
+
+ arglist = [
+ ]
+ verifylist = [
+ ('cluster', None),
+ ('host', None),
+ ('binary', None),
+ ('is_up', None),
+ ('disabled', None),
+ ('resource_id', None),
+ ('resource_type', None),
+ ('service_id', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = ('ID', 'Cluster Name', 'Host', 'Binary', 'Status')
+ cleaning_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Cleaning'
+ ) for obj in self.cleaning
+ )
+ unavailable_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Unavailable'
+ ) for obj in self.unavailable
+ )
+ expected_data = cleaning_data + unavailable_data
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, tuple(data))
+
+ # checking if proper call was made to cleanup resources
+ # Since we ignore all parameters with None value, we don't
+ # have any arguments passed to the API
+ self.worker_mock.clean.assert_called_once_with()
+
+ def test_block_storage_cleanup_pre_324(self):
+ arglist = [
+ ]
+ verifylist = [
+ ('cluster', None),
+ ('host', None),
+ ('binary', None),
+ ('is_up', None),
+ ('disabled', None),
+ ('resource_id', None),
+ ('resource_type', None),
+ ('service_id', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.24 or greater is required', str(exc))
+
+ def test_cleanup_with_args(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.24')
+
+ fake_cluster = 'fake-cluster'
+ fake_host = 'fake-host'
+ fake_binary = 'fake-service'
+ fake_resource_id = str(uuid.uuid4())
+ fake_resource_type = 'Volume'
+ fake_service_id = 1
+ arglist = [
+ '--cluster', fake_cluster,
+ '--host', fake_host,
+ '--binary', fake_binary,
+ '--down',
+ '--enabled',
+ '--resource-id', fake_resource_id,
+ '--resource-type', fake_resource_type,
+ '--service-id', str(fake_service_id),
+ ]
+ verifylist = [
+ ('cluster', fake_cluster),
+ ('host', fake_host),
+ ('binary', fake_binary),
+ ('is_up', False),
+ ('disabled', False),
+ ('resource_id', fake_resource_id),
+ ('resource_type', fake_resource_type),
+ ('service_id', fake_service_id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = ('ID', 'Cluster Name', 'Host', 'Binary', 'Status')
+ cleaning_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Cleaning'
+ ) for obj in self.cleaning
+ )
+ unavailable_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Unavailable'
+ ) for obj in self.unavailable
+ )
+ expected_data = cleaning_data + unavailable_data
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, tuple(data))
+
+ # checking if proper call was made to cleanup resources
+ self.worker_mock.clean.assert_called_once_with(
+ cluster_name=fake_cluster,
+ host=fake_host,
+ binary=fake_binary,
+ is_up=False,
+ disabled=False,
+ resource_id=fake_resource_id,
+ resource_type=fake_resource_type,
+ service_id=fake_service_id)
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py b/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py
new file mode 100644
index 00000000..35ea6274
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py
@@ -0,0 +1,233 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from cinderclient import api_versions
+import ddt
+from osc_lib import exceptions
+
+from openstackclient.tests.unit import utils as tests_utils
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_log_level as service
+
+
+class TestService(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the ServiceManager Mock
+ self.service_mock = self.app.client_manager.volume.services
+ self.service_mock.reset_mock()
+
+
+class TestBlockStorageLogLevelList(TestService):
+
+ service_log = volume_fakes.create_service_log_level_entry()
+
+ def setUp(self):
+ super().setUp()
+
+ self.service_mock.get_log_levels.return_value = [self.service_log]
+
+ # Get the command object to test
+ self.cmd = service.BlockStorageLogLevelList(self.app, None)
+
+ def test_block_storage_log_level_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ '--host', self.service_log.host,
+ '--service', self.service_log.binary,
+ '--log-prefix', self.service_log.prefix,
+ ]
+ verifylist = [
+ ('host', self.service_log.host),
+ ('service', self.service_log.binary),
+ ('log_prefix', self.service_log.prefix),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'Binary',
+ 'Host',
+ 'Prefix',
+ 'Level',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = ((
+ self.service_log.binary,
+ self.service_log.host,
+ self.service_log.prefix,
+ self.service_log.level,
+ ), )
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get log level of services
+ self.service_mock.get_log_levels.assert_called_with(
+ server=self.service_log.host,
+ binary=self.service_log.binary,
+ prefix=self.service_log.prefix,
+ )
+
+ def test_block_storage_log_level_list_pre_332(self):
+ arglist = [
+ '--host', self.service_log.host,
+ '--service', 'cinder-api',
+ '--log-prefix', 'cinder_test.api.common',
+ ]
+ verifylist = [
+ ('host', self.service_log.host),
+ ('service', 'cinder-api'),
+ ('log_prefix', 'cinder_test.api.common'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.32 or greater is required', str(exc))
+
+ def test_block_storage_log_level_list_invalid_service_name(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ '--host', self.service_log.host,
+ '--service', 'nova-api',
+ '--log-prefix', 'cinder_test.api.common',
+ ]
+ verifylist = [
+ ('host', self.service_log.host),
+ ('service', 'nova-api'),
+ ('log_prefix', 'cinder_test.api.common'),
+ ]
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+
+@ddt.ddt
+class TestBlockStorageLogLevelSet(TestService):
+
+ service_log = volume_fakes.create_service_log_level_entry()
+
+ def setUp(self):
+ super().setUp()
+
+ # Get the command object to test
+ self.cmd = service.BlockStorageLogLevelSet(self.app, None)
+
+ def test_block_storage_log_level_set(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ 'ERROR',
+ '--host', self.service_log.host,
+ '--service', self.service_log.binary,
+ '--log-prefix', self.service_log.prefix,
+ ]
+ verifylist = [
+ ('level', 'ERROR'),
+ ('host', self.service_log.host),
+ ('service', self.service_log.binary),
+ ('log_prefix', self.service_log.prefix),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # checking if proper call was made to set log level of services
+ self.service_mock.set_log_levels.assert_called_with(
+ level='ERROR',
+ server=self.service_log.host,
+ binary=self.service_log.binary,
+ prefix=self.service_log.prefix,
+ )
+
+ def test_block_storage_log_level_set_pre_332(self):
+ arglist = [
+ 'ERROR',
+ '--host', self.service_log.host,
+ '--service', 'cinder-api',
+ '--log-prefix', 'cinder_test.api.common',
+ ]
+ verifylist = [
+ ('level', 'ERROR'),
+ ('host', self.service_log.host),
+ ('service', 'cinder-api'),
+ ('log_prefix', 'cinder_test.api.common'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.32 or greater is required', str(exc))
+
+ def test_block_storage_log_level_set_invalid_service_name(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ 'ERROR',
+ '--host', self.service_log.host,
+ '--service', 'nova-api',
+ '--log-prefix', 'cinder.api.common',
+ ]
+ verifylist = [
+ ('level', 'ERROR'),
+ ('host', self.service_log.host),
+ ('service', 'nova-api'),
+ ('log_prefix', 'cinder.api.common'),
+ ]
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ @ddt.data('WARNING', 'info', 'Error', 'debuG', 'fake-log-level')
+ def test_block_storage_log_level_set_log_level(self, log_level):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ log_level,
+ '--host', self.service_log.host,
+ '--service', 'cinder-api',
+ '--log-prefix', 'cinder.api.common',
+ ]
+ verifylist = [
+ ('level', log_level.upper()),
+ ('host', self.service_log.host),
+ ('service', 'cinder-api'),
+ ('log_prefix', 'cinder.api.common'),
+ ]
+
+ if log_level == 'fake-log-level':
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+ else:
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # checking if proper call was made to set log level of services
+ self.service_mock.set_log_levels.assert_called_with(
+ level=log_level.upper(),
+ server=self.service_log.host,
+ binary=self.service_log.binary,
+ prefix=self.service_log.prefix)
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py b/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py
new file mode 100644
index 00000000..afd0fd35
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py
@@ -0,0 +1,411 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from cinderclient import api_versions
+from osc_lib import exceptions
+
+from openstackclient.tests.unit import utils as tests_utils
+from openstackclient.tests.unit.volume.v2 import fakes as v2_volume_fakes
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_manage
+
+
+class TestBlockStorageManage(v2_volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ self.volumes_mock.reset_mock()
+ self.snapshots_mock = self.app.client_manager.volume.volume_snapshots
+ self.snapshots_mock.reset_mock()
+
+
+class TestBlockStorageVolumeManage(TestBlockStorageManage):
+
+ volume_manage_list = volume_fakes.create_volume_manage_list_records()
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock.list_manageable.return_value = (
+ self.volume_manage_list)
+
+ # Get the command object to test
+ self.cmd = block_storage_manage.BlockStorageManageVolumes(
+ self.app, None)
+
+ def test_block_storage_volume_manage_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for volume_record in self.volume_manage_list:
+ manage_details = (
+ volume_record.reference,
+ volume_record.size,
+ volume_record.safe_to_manage,
+ volume_record.reason_not_safe,
+ volume_record.cinder_id,
+ volume_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get volume manageable list
+ self.volumes_mock.list_manageable.assert_called_with(
+ host=parsed_args.host,
+ detailed=parsed_args.detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=parsed_args.cluster,
+ )
+
+ def test_block_storage_volume_manage_pre_38(self):
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.8 or greater is required', str(exc))
+
+ def test_block_storage_volume_manage_pre_317(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.16')
+ cluster = 'fake_cluster'
+ arglist = [
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('cluster', cluster),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.17 or greater is required', str(exc))
+ self.assertIn('--cluster', str(exc))
+
+ def test_block_storage_volume_manage_host_and_cluster(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.17')
+ host = 'fake_host'
+ cluster = 'fake_cluster'
+ arglist = [
+ host,
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('host', host),
+ ('cluster', cluster),
+ ]
+ exc = self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd,
+ arglist, verifylist)
+ self.assertIn(
+ 'argument --cluster: not allowed with argument <host>', str(exc))
+
+ def test_block_storage_volume_manage_list_all_args(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ detailed = True
+ marker = 'fake_marker'
+ limit = '5'
+ offset = '3'
+ sort = 'size:asc'
+ arglist = [
+ host,
+ '--detailed', str(detailed),
+ '--marker', marker,
+ '--limit', limit,
+ '--offset', offset,
+ '--sort', sort,
+ ]
+ verifylist = [
+ ('host', host),
+ ('detailed', str(detailed)),
+ ('marker', marker),
+ ('limit', limit),
+ ('offset', offset),
+ ('sort', sort),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for volume_record in self.volume_manage_list:
+ manage_details = (
+ volume_record.reference,
+ volume_record.size,
+ volume_record.safe_to_manage,
+ volume_record.reason_not_safe,
+ volume_record.cinder_id,
+ volume_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get volume manageable list
+ self.volumes_mock.list_manageable.assert_called_with(
+ host=host,
+ detailed=detailed,
+ marker=marker,
+ limit=limit,
+ offset=offset,
+ sort=sort,
+ cluster=parsed_args.cluster,
+ )
+
+
+class TestBlockStorageSnapshotManage(TestBlockStorageManage):
+
+ snapshot_manage_list = volume_fakes.create_snapshot_manage_list_records()
+
+ def setUp(self):
+ super().setUp()
+
+ self.snapshots_mock.list_manageable.return_value = (
+ self.snapshot_manage_list)
+
+ # Get the command object to test
+ self.cmd = block_storage_manage.BlockStorageManageSnapshots(
+ self.app, None)
+
+ def test_block_storage_snapshot_manage_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'source_reference',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for snapshot_record in self.snapshot_manage_list:
+ manage_details = (
+ snapshot_record.reference,
+ snapshot_record.size,
+ snapshot_record.safe_to_manage,
+ snapshot_record.source_reference,
+ snapshot_record.reason_not_safe,
+ snapshot_record.cinder_id,
+ snapshot_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get snapshot manageable list
+ self.snapshots_mock.list_manageable.assert_called_with(
+ host=parsed_args.host,
+ detailed=parsed_args.detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=parsed_args.cluster,
+ )
+
+ def test_block_storage_volume_manage_pre_38(self):
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.8 or greater is required', str(exc))
+
+ def test_block_storage_volume_manage_pre_317(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.16')
+ cluster = 'fake_cluster'
+ arglist = [
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('cluster', cluster),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.17 or greater is required', str(exc))
+ self.assertIn('--cluster', str(exc))
+
+ def test_block_storage_volume_manage_host_and_cluster(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.17')
+ host = 'fake_host'
+ cluster = 'fake_cluster'
+ arglist = [
+ host,
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('host', host),
+ ('cluster', cluster),
+ ]
+ exc = self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd,
+ arglist, verifylist)
+ self.assertIn(
+ 'argument --cluster: not allowed with argument <host>', str(exc))
+
+ def test_block_storage_volume_manage_list_all_args(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ detailed = True
+ marker = 'fake_marker'
+ limit = '5'
+ offset = '3'
+ sort = 'size:asc'
+ arglist = [
+ host,
+ '--detailed', str(detailed),
+ '--marker', marker,
+ '--limit', limit,
+ '--offset', offset,
+ '--sort', sort,
+ ]
+ verifylist = [
+ ('host', host),
+ ('detailed', str(detailed)),
+ ('marker', marker),
+ ('limit', limit),
+ ('offset', offset),
+ ('sort', sort),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'source_reference',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for snapshot_record in self.snapshot_manage_list:
+ manage_details = (
+ snapshot_record.reference,
+ snapshot_record.size,
+ snapshot_record.safe_to_manage,
+ snapshot_record.source_reference,
+ snapshot_record.reason_not_safe,
+ snapshot_record.cinder_id,
+ snapshot_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get snapshot manageable list
+ self.snapshots_mock.list_manageable.assert_called_with(
+ host=host,
+ detailed=detailed,
+ marker=marker,
+ limit=limit,
+ offset=offset,
+ sort=sort,
+ cluster=parsed_args.cluster,
+ )
diff --git a/openstackclient/tests/unit/volume/v3/test_volume.py b/openstackclient/tests/unit/volume/v3/test_volume.py
new file mode 100644
index 00000000..ed72bfa1
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_volume.py
@@ -0,0 +1,179 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import copy
+from unittest import mock
+
+from cinderclient import api_versions
+from osc_lib.cli import format_columns
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
+from openstackclient.volume.v3 import volume
+
+
+class TestVolumeSummary(volume_fakes.TestVolume):
+
+ columns = [
+ 'Total Count',
+ 'Total Size',
+ ]
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ self.volumes_mock.reset_mock()
+ self.mock_vol_1 = volume_fakes.create_one_volume()
+ self.mock_vol_2 = volume_fakes.create_one_volume()
+ self.return_dict = {
+ 'volume-summary': {
+ 'total_count': 2,
+ 'total_size': self.mock_vol_1.size + self.mock_vol_2.size}}
+ self.volumes_mock.summary.return_value = self.return_dict
+
+ # Get the command object to test
+ self.cmd = volume.VolumeSummary(self.app, None)
+
+ def test_volume_summary(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.12')
+ arglist = [
+ '--all-projects',
+ ]
+ verifylist = [
+ ('all_projects', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.summary.assert_called_once_with(
+ all_tenants=True,
+ )
+
+ self.assertEqual(self.columns, columns)
+
+ datalist = (
+ 2,
+ self.mock_vol_1.size + self.mock_vol_2.size)
+ self.assertCountEqual(datalist, tuple(data))
+
+ def test_volume_summary_pre_312(self):
+ arglist = [
+ '--all-projects',
+ ]
+ verifylist = [
+ ('all_projects', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.12 or greater is required',
+ str(exc))
+
+ def test_volume_summary_with_metadata(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.36')
+
+ combine_meta = {**self.mock_vol_1.metadata, **self.mock_vol_2.metadata}
+ meta_dict = copy.deepcopy(self.return_dict)
+ meta_dict['volume-summary']['metadata'] = combine_meta
+ self.volumes_mock.summary.return_value = meta_dict
+
+ new_cols = copy.deepcopy(self.columns)
+ new_cols.extend(['Metadata'])
+
+ arglist = [
+ '--all-projects',
+ ]
+ verifylist = [
+ ('all_projects', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.summary.assert_called_once_with(
+ all_tenants=True,
+ )
+
+ self.assertEqual(new_cols, columns)
+
+ datalist = (
+ 2,
+ self.mock_vol_1.size + self.mock_vol_2.size,
+ format_columns.DictColumn(combine_meta))
+ self.assertCountEqual(datalist, tuple(data))
+
+
+class TestVolumeRevertToSnapshot(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ self.volumes_mock.reset_mock()
+ self.snapshots_mock = self.app.client_manager.volume.volume_snapshots
+ self.snapshots_mock.reset_mock()
+ self.mock_volume = volume_fakes.create_one_volume()
+ self.mock_snapshot = volume_fakes.create_one_snapshot(
+ attrs={'volume_id': self.volumes_mock.id})
+
+ # Get the command object to test
+ self.cmd = volume.VolumeRevertToSnapshot(self.app, None)
+
+ def test_volume_revert_to_snapshot_pre_340(self):
+ arglist = [
+ self.mock_snapshot.id,
+ ]
+ verifylist = [
+ ('snapshot', self.mock_snapshot.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.40 or greater is required',
+ str(exc))
+
+ def test_volume_revert_to_snapshot(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.40')
+ arglist = [
+ self.mock_snapshot.id,
+ ]
+ verifylist = [
+ ('snapshot', self.mock_snapshot.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ find_mock_result = [self.mock_snapshot, self.mock_volume]
+ with mock.patch.object(utils, 'find_resource',
+ side_effect=find_mock_result) as find_mock:
+ self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.revert_to_snapshot.assert_called_once_with(
+ volume=self.mock_volume,
+ snapshot=self.mock_snapshot,
+ )
+ self.assertEqual(2, find_mock.call_count)
diff --git a/openstackclient/tests/unit/volume/v3/test_volume_group.py b/openstackclient/tests/unit/volume/v3/test_volume_group.py
index 96079a08..78717de8 100644
--- a/openstackclient/tests/unit/volume/v3/test_volume_group.py
+++ b/openstackclient/tests/unit/volume/v3/test_volume_group.py
@@ -10,9 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from cinderclient import api_versions
from osc_lib import exceptions
+from openstackclient.tests.unit import utils as tests_utils
from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
from openstackclient.volume.v3 import volume_group
@@ -32,6 +35,10 @@ class TestVolumeGroup(volume_fakes.TestVolume):
self.volume_types_mock = self.app.client_manager.volume.volume_types
self.volume_types_mock.reset_mock()
+ self.volume_group_snapshots_mock = \
+ self.app.client_manager.volume.group_snapshots
+ self.volume_group_snapshots_mock.reset_mock()
+
class TestVolumeGroupCreate(TestVolumeGroup):
@@ -43,6 +50,8 @@ class TestVolumeGroupCreate(TestVolumeGroup):
'volume_types': [fake_volume_type.id],
},
)
+ fake_volume_group_snapshot = \
+ volume_fakes.create_one_volume_group_snapshot()
columns = (
'ID',
@@ -79,6 +88,10 @@ class TestVolumeGroupCreate(TestVolumeGroup):
self.fake_volume_group_type
self.volume_groups_mock.create.return_value = self.fake_volume_group
self.volume_groups_mock.get.return_value = self.fake_volume_group
+ self.volume_groups_mock.create_from_src.return_value = \
+ self.fake_volume_group
+ self.volume_group_snapshots_mock.get.return_value = \
+ self.fake_volume_group_snapshot
self.cmd = volume_group.CreateVolumeGroup(self.app, None)
@@ -87,8 +100,8 @@ class TestVolumeGroupCreate(TestVolumeGroup):
api_versions.APIVersion('3.13')
arglist = [
- self.fake_volume_group_type.id,
- self.fake_volume_type.id,
+ '--volume-group-type', self.fake_volume_group_type.id,
+ '--volume-type', self.fake_volume_type.id,
]
verifylist = [
('volume_group_type', self.fake_volume_group_type.id),
@@ -115,13 +128,75 @@ class TestVolumeGroupCreate(TestVolumeGroup):
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
- def test_volume_group_create_with_options(self):
+ def test_volume_group_create__legacy(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.13')
arglist = [
self.fake_volume_group_type.id,
self.fake_volume_type.id,
+ ]
+ verifylist = [
+ ('volume_group_type_legacy', self.fake_volume_group_type.id),
+ ('volume_types_legacy', [self.fake_volume_type.id]),
+ ('name', None),
+ ('description', None),
+ ('availability_zone', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ with mock.patch.object(self.cmd.log, 'warning') as mock_warning:
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volume_group_types_mock.get.assert_called_once_with(
+ self.fake_volume_group_type.id)
+ self.volume_types_mock.get.assert_called_once_with(
+ self.fake_volume_type.id)
+ self.volume_groups_mock.create.assert_called_once_with(
+ self.fake_volume_group_type.id,
+ self.fake_volume_type.id,
+ None,
+ None,
+ availability_zone=None,
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, data)
+ mock_warning.assert_called_once()
+ self.assertIn(
+ 'Passing volume group type and volume types as positional ',
+ str(mock_warning.call_args[0][0]),
+ )
+
+ def test_volume_group_create_no_volume_type(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.13')
+
+ arglist = [
+ '--volume-group-type', self.fake_volume_group_type.id,
+ ]
+ verifylist = [
+ ('volume_group_type', self.fake_volume_group_type.id),
+ ('name', None),
+ ('description', None),
+ ('availability_zone', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--volume-types is a required argument when creating ',
+ str(exc))
+
+ def test_volume_group_create_with_options(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.13')
+
+ arglist = [
+ '--volume-group-type', self.fake_volume_group_type.id,
+ '--volume-type', self.fake_volume_type.id,
'--name', 'foo',
'--description', 'hello, world',
'--availability-zone', 'bar',
@@ -156,8 +231,8 @@ class TestVolumeGroupCreate(TestVolumeGroup):
api_versions.APIVersion('3.12')
arglist = [
- self.fake_volume_group_type.id,
- self.fake_volume_type.id,
+ '--volume-group-type', self.fake_volume_group_type.id,
+ '--volume-type', self.fake_volume_type.id,
]
verifylist = [
('volume_group_type', self.fake_volume_group_type.id),
@@ -176,6 +251,101 @@ class TestVolumeGroupCreate(TestVolumeGroup):
'--os-volume-api-version 3.13 or greater is required',
str(exc))
+ def test_volume_group_create_from_source_group(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.14')
+
+ arglist = [
+ '--source-group', self.fake_volume_group.id,
+ ]
+ verifylist = [
+ ('source_group', self.fake_volume_group.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volume_groups_mock.get.assert_has_calls(
+ [mock.call(self.fake_volume_group.id),
+ mock.call(self.fake_volume_group.id)])
+ self.volume_groups_mock.create_from_src.assert_called_once_with(
+ None,
+ self.fake_volume_group.id,
+ None,
+ None,
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, data)
+
+ def test_volume_group_create_from_group_snapshot(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.14')
+
+ arglist = [
+ '--group-snapshot', self.fake_volume_group_snapshot.id,
+ ]
+ verifylist = [
+ ('group_snapshot', self.fake_volume_group_snapshot.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volume_group_snapshots_mock.get.assert_called_once_with(
+ self.fake_volume_group_snapshot.id)
+ self.volume_groups_mock.get.assert_called_once_with(
+ self.fake_volume_group.id)
+ self.volume_groups_mock.create_from_src.assert_called_once_with(
+ self.fake_volume_group_snapshot.id,
+ None,
+ None,
+ None,
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, data)
+
+ def test_volume_group_create_from_src_pre_v314(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.13')
+
+ arglist = [
+ '--source-group', self.fake_volume_group.id,
+ ]
+ verifylist = [
+ ('source_group', self.fake_volume_group.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.14 or greater is required',
+ str(exc))
+
+ def test_volume_group_create_from_src_source_group_group_snapshot(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.14')
+
+ arglist = [
+ '--source-group', self.fake_volume_group.id,
+ '--group-snapshot', self.fake_volume_group_snapshot.id,
+ ]
+ verifylist = [
+ ('source_group', self.fake_volume_group.id),
+ ('group_snapshot', self.fake_volume_group_snapshot.id),
+ ]
+
+ exc = self.assertRaises(tests_utils.ParserException,
+ self.check_parser,
+ self.cmd,
+ arglist,
+ verifylist)
+ self.assertIn(
+ '--group-snapshot: not allowed with argument --source-group',
+ str(exc))
+
class TestVolumeGroupDelete(TestVolumeGroup):
diff --git a/openstackclient/volume/v1/volume.py b/openstackclient/volume/v1/volume.py
index dfbb0c54..198b890f 100644
--- a/openstackclient/volume/v1/volume.py
+++ b/openstackclient/volume/v1/volume.py
@@ -224,15 +224,44 @@ class CreateVolume(command.ShowOne):
if parsed_args.bootable or parsed_args.non_bootable:
try:
- volume_client.volumes.set_bootable(
- volume.id, parsed_args.bootable)
+ if utils.wait_for_status(
+ volume_client.volumes.get,
+ volume.id,
+ success_status=['available'],
+ error_status=['error'],
+ sleep_time=1
+ ):
+ volume_client.volumes.set_bootable(
+ volume.id,
+ parsed_args.bootable
+ )
+ else:
+ msg = _(
+ "Volume status is not available for setting boot "
+ "state"
+ )
+ raise exceptions.CommandError(msg)
except Exception as e:
LOG.error(_("Failed to set volume bootable property: %s"), e)
if parsed_args.read_only or parsed_args.read_write:
try:
- volume_client.volumes.update_readonly_flag(
+ if utils.wait_for_status(
+ volume_client.volumes.get,
volume.id,
- parsed_args.read_only)
+ success_status=['available'],
+ error_status=['error'],
+ sleep_time=1
+ ):
+ volume_client.volumes.update_readonly_flag(
+ volume.id,
+ parsed_args.read_only
+ )
+ else:
+ msg = _(
+ "Volume status is not available for setting it"
+ "read only."
+ )
+ raise exceptions.CommandError(msg)
except Exception as e:
LOG.error(_("Failed to set volume read-only access "
"mode flag: %s"), e)
diff --git a/openstackclient/volume/v2/backup_record.py b/openstackclient/volume/v2/backup_record.py
index 64ff4f67..0d3af641 100644
--- a/openstackclient/volume/v2/backup_record.py
+++ b/openstackclient/volume/v2/backup_record.py
@@ -26,9 +26,10 @@ LOG = logging.getLogger(__name__)
class ExportBackupRecord(command.ShowOne):
- _description = _('Export volume backup details. Backup information can be '
- 'imported into a new service instance to be able to '
- 'restore.')
+ _description = _("""Export volume backup details.
+
+Backup information can be imported into a new service instance to be able to
+restore.""")
def get_parser(self, prog_name):
parser = super(ExportBackupRecord, self).get_parser(prog_name)
@@ -54,9 +55,10 @@ class ExportBackupRecord(command.ShowOne):
class ImportBackupRecord(command.ShowOne):
- _description = _('Import volume backup details. Exported backup details '
- 'contain the metadata necessary to restore to a new or '
- 'rebuilt service instance')
+ _description = _("""Import volume backup details.
+
+Exported backup details contain the metadata necessary to restore to a new or
+rebuilt service instance""")
def get_parser(self, prog_name):
parser = super(ImportBackupRecord, self).get_parser(prog_name)
diff --git a/openstackclient/volume/v2/consistency_group.py b/openstackclient/volume/v2/consistency_group.py
index c50a1b5b..77da6f64 100644
--- a/openstackclient/volume/v2/consistency_group.py
+++ b/openstackclient/volume/v2/consistency_group.py
@@ -14,6 +14,7 @@
"""Volume v2 consistency group action implementations"""
+import argparse
import logging
from osc_lib.cli import format_columns
@@ -90,35 +91,51 @@ class CreateConsistencyGroup(command.ShowOne):
"name",
metavar="<name>",
nargs="?",
- help=_("Name of new consistency group (default to None)")
+ help=_("Name of new consistency group (default to None)"),
)
exclusive_group = parser.add_mutually_exclusive_group(required=True)
exclusive_group.add_argument(
"--volume-type",
metavar="<volume-type>",
- help=_("Volume type of this consistency group (name or ID)")
+ help=_("Volume type of this consistency group (name or ID)"),
)
exclusive_group.add_argument(
+ "--source",
+ metavar="<consistency-group>",
+ help=_("Existing consistency group (name or ID)"),
+ )
+ # NOTE(stephenfin): Legacy alias
+ exclusive_group.add_argument(
"--consistency-group-source",
metavar="<consistency-group>",
- help=_("Existing consistency group (name or ID)")
+ dest='source',
+ help=argparse.SUPPRESS,
+ )
+ exclusive_group.add_argument(
+ "--snapshot",
+ metavar="<consistency-group-snapshot>",
+ help=_("Existing consistency group snapshot (name or ID)"),
)
+ # NOTE(stephenfin): Legacy alias
exclusive_group.add_argument(
"--consistency-group-snapshot",
metavar="<consistency-group-snapshot>",
- help=_("Existing consistency group snapshot (name or ID)")
+ dest='snapshot',
+ help=argparse.SUPPRESS,
)
parser.add_argument(
"--description",
metavar="<description>",
- help=_("Description of this consistency group")
+ help=_("Description of this consistency group"),
)
parser.add_argument(
"--availability-zone",
metavar="<availability-zone>",
- help=_("Availability zone for this consistency group "
- "(not available if creating consistency group "
- "from source)"),
+ help=_(
+ "Availability zone for this consistency group "
+ "(not available if creating consistency group "
+ "from source)"
+ ),
)
return parser
@@ -142,21 +159,23 @@ class CreateConsistencyGroup(command.ShowOne):
consistency_group_id = None
consistency_group_snapshot = None
- if parsed_args.consistency_group_source:
+ if parsed_args.source:
consistency_group_id = utils.find_resource(
volume_client.consistencygroups,
- parsed_args.consistency_group_source).id
- elif parsed_args.consistency_group_snapshot:
+ parsed_args.source,
+ ).id
+ elif parsed_args.snapshot:
consistency_group_snapshot = utils.find_resource(
volume_client.cgsnapshots,
- parsed_args.consistency_group_snapshot).id
+ parsed_args.snapshot,
+ ).id
consistency_group = (
volume_client.consistencygroups.create_from_src(
consistency_group_snapshot,
consistency_group_id,
name=parsed_args.name,
- description=parsed_args.description
+ description=parsed_args.description,
)
)
diff --git a/openstackclient/volume/v2/volume.py b/openstackclient/volume/v2/volume.py
index 7905e097..a5e5a670 100644
--- a/openstackclient/volume/v2/volume.py
+++ b/openstackclient/volume/v2/volume.py
@@ -257,15 +257,44 @@ class CreateVolume(command.ShowOne):
if parsed_args.bootable or parsed_args.non_bootable:
try:
- volume_client.volumes.set_bootable(
- volume.id, parsed_args.bootable)
+ if utils.wait_for_status(
+ volume_client.volumes.get,
+ volume.id,
+ success_status=['available'],
+ error_status=['error'],
+ sleep_time=1
+ ):
+ volume_client.volumes.set_bootable(
+ volume.id,
+ parsed_args.bootable
+ )
+ else:
+ msg = _(
+ "Volume status is not available for setting boot "
+ "state"
+ )
+ raise exceptions.CommandError(msg)
except Exception as e:
LOG.error(_("Failed to set volume bootable property: %s"), e)
if parsed_args.read_only or parsed_args.read_write:
try:
- volume_client.volumes.update_readonly_flag(
+ if utils.wait_for_status(
+ volume_client.volumes.get,
volume.id,
- parsed_args.read_only)
+ success_status=['available'],
+ error_status=['error'],
+ sleep_time=1
+ ):
+ volume_client.volumes.update_readonly_flag(
+ volume.id,
+ parsed_args.read_only
+ )
+ else:
+ msg = _(
+ "Volume status is not available for setting it"
+ "read only."
+ )
+ raise exceptions.CommandError(msg)
except Exception as e:
LOG.error(_("Failed to set volume read-only access "
"mode flag: %s"), e)
diff --git a/openstackclient/volume/v3/block_storage_cleanup.py b/openstackclient/volume/v3/block_storage_cleanup.py
new file mode 100644
index 00000000..f99b8217
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_cleanup.py
@@ -0,0 +1,146 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+
+from openstackclient.i18n import _
+
+
+def _format_cleanup_response(cleaning, unavailable):
+ column_headers = (
+ 'ID',
+ 'Cluster Name',
+ 'Host',
+ 'Binary',
+ 'Status',
+ )
+ combined_data = []
+ for obj in cleaning:
+ details = (obj.id, obj.cluster_name, obj.host, obj.binary, 'Cleaning')
+ combined_data.append(details)
+
+ for obj in unavailable:
+ details = (obj.id, obj.cluster_name, obj.host, obj.binary,
+ 'Unavailable')
+ combined_data.append(details)
+
+ return (column_headers, combined_data)
+
+
+class BlockStorageCleanup(command.Lister):
+ """Do block storage cleanup.
+
+ This command requires ``--os-volume-api-version`` 3.24 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ '--cluster',
+ metavar='<cluster>',
+ help=_('Name of block storage cluster in which cleanup needs '
+ 'to be performed (name only)')
+ )
+ parser.add_argument(
+ "--host",
+ metavar="<host>",
+ default=None,
+ help=_("Host where the service resides. (name only)")
+ )
+ parser.add_argument(
+ '--binary',
+ metavar='<binary>',
+ default=None,
+ help=_("Name of the service binary.")
+ )
+ service_up_parser = parser.add_mutually_exclusive_group()
+ service_up_parser.add_argument(
+ '--up',
+ dest='is_up',
+ action='store_true',
+ default=None,
+ help=_(
+ 'Filter by up status. If this is set, services need to be up.'
+ )
+ )
+ service_up_parser.add_argument(
+ '--down',
+ dest='is_up',
+ action='store_false',
+ help=_(
+ 'Filter by down status. If this is set, services need to be '
+ 'down.'
+ )
+ )
+ service_disabled_parser = parser.add_mutually_exclusive_group()
+ service_disabled_parser.add_argument(
+ '--disabled',
+ dest='disabled',
+ action='store_true',
+ default=None,
+ help=_('Filter by disabled status.')
+ )
+ service_disabled_parser.add_argument(
+ '--enabled',
+ dest='disabled',
+ action='store_false',
+ help=_('Filter by enabled status.')
+ )
+ parser.add_argument(
+ '--resource-id',
+ metavar='<resource-id>',
+ default=None,
+ help=_('UUID of a resource to cleanup.')
+ )
+ parser.add_argument(
+ '--resource-type',
+ metavar='<Volume|Snapshot>',
+ choices=('Volume', 'Snapshot'),
+ help=_('Type of resource to cleanup.')
+ )
+ parser.add_argument(
+ '--service-id',
+ type=int,
+ default=None,
+ help=_(
+ 'The service ID field from the DB, not the UUID of the '
+ 'service.'
+ )
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.24'):
+ msg = _(
+ "--os-volume-api-version 3.24 or greater is required to "
+ "support the 'block storage cleanup' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ filters = {
+ 'cluster_name': parsed_args.cluster,
+ 'host': parsed_args.host,
+ 'binary': parsed_args.binary,
+ 'is_up': parsed_args.is_up,
+ 'disabled': parsed_args.disabled,
+ 'resource_id': parsed_args.resource_id,
+ 'resource_type': parsed_args.resource_type,
+ 'service_id': parsed_args.service_id
+ }
+
+ filters = {k: v for k, v in filters.items() if v is not None}
+ cleaning, unavailable = volume_client.workers.clean(**filters)
+ return _format_cleanup_response(cleaning, unavailable)
diff --git a/openstackclient/volume/v3/block_storage_log_level.py b/openstackclient/volume/v3/block_storage_log_level.py
new file mode 100644
index 00000000..d5286cdd
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_log_level.py
@@ -0,0 +1,147 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Block Storage Service action implementations"""
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+
+class BlockStorageLogLevelList(command.Lister):
+ """List log levels of block storage service.
+
+ Supported by --os-volume-api-version 3.32 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "--host",
+ metavar="<host>",
+ default="",
+ help=_("List block storage service log level of specified host "
+ "(name only)")
+ )
+ parser.add_argument(
+ "--service",
+ metavar="<service>",
+ default="",
+ choices=(
+ '',
+ '*',
+ 'cinder-api',
+ 'cinder-volume',
+ 'cinder-scheduler',
+ 'cinder-backup'),
+ help=_("List block storage service log level of the specified "
+ "service (name only)")
+ )
+ parser.add_argument(
+ "--log-prefix",
+ metavar="<log-prefix>",
+ default="",
+ help="Prefix for the log, e.g. 'sqlalchemy'"
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ service_client = self.app.client_manager.volume
+ columns = [
+ "Binary",
+ "Host",
+ "Prefix",
+ "Level",
+ ]
+
+ if service_client.api_version < api_versions.APIVersion('3.32'):
+ msg = _(
+ "--os-volume-api-version 3.32 or greater is required to "
+ "support the 'block storage log level list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ data = service_client.services.get_log_levels(
+ binary=parsed_args.service,
+ server=parsed_args.host,
+ prefix=parsed_args.log_prefix)
+
+ return (columns,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))
+
+
+class BlockStorageLogLevelSet(command.Command):
+ """Set log level of block storage service
+
+ Supported by --os-volume-api-version 3.32 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "level",
+ metavar="<log-level>",
+ choices=('INFO', 'WARNING', 'ERROR', 'DEBUG'),
+ type=str.upper,
+ help=_("Desired log level.")
+ )
+ parser.add_argument(
+ "--host",
+ metavar="<host>",
+ default="",
+ help=_("Set block storage service log level of specified host "
+ "(name only)")
+ )
+ parser.add_argument(
+ "--service",
+ metavar="<service>",
+ default="",
+ choices=(
+ '',
+ '*',
+ 'cinder-api',
+ 'cinder-volume',
+ 'cinder-scheduler',
+ 'cinder-backup'),
+ help=_("Set block storage service log level of specified service "
+ "(name only)")
+ )
+ parser.add_argument(
+ "--log-prefix",
+ metavar="<log-prefix>",
+ default="",
+ help="Prefix for the log, e.g. 'sqlalchemy'"
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ service_client = self.app.client_manager.volume
+
+ if service_client.api_version < api_versions.APIVersion('3.32'):
+ msg = _(
+ "--os-volume-api-version 3.32 or greater is required to "
+ "support the 'block storage log level set' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ service_client.services.set_log_levels(
+ level=parsed_args.level,
+ binary=parsed_args.service,
+ server=parsed_args.host,
+ prefix=parsed_args.log_prefix)
diff --git a/openstackclient/volume/v3/block_storage_manage.py b/openstackclient/volume/v3/block_storage_manage.py
new file mode 100644
index 00000000..9015f44d
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_manage.py
@@ -0,0 +1,258 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Block Storage Volume/Snapshot Management implementations"""
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+from oslo_utils import strutils
+
+from openstackclient.i18n import _
+
+
+SORT_MANAGEABLE_KEY_VALUES = ('size', 'reference')
+
+
+class BlockStorageManageVolumes(command.Lister):
+ """List manageable volumes.
+
+ Supported by --os-volume-api-version 3.8 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ host_group = parser.add_mutually_exclusive_group()
+ host_group.add_argument(
+ "host",
+ metavar="<host>",
+ nargs='?',
+ help=_('Cinder host on which to list manageable volumes. '
+ 'Takes the form: host@backend-name#pool')
+ )
+ host_group.add_argument(
+ "--cluster",
+ metavar="<cluster>",
+ help=_('Cinder cluster on which to list manageable volumes. '
+ 'Takes the form: cluster@backend-name#pool. '
+ '(supported by --os-volume-api-version 3.17 or later)')
+ )
+ parser.add_argument(
+ '--detailed',
+ metavar='<detailed>',
+ default=True,
+ help=_('Returns detailed information (Default=True).')
+ )
+ parser.add_argument(
+ '--marker',
+ metavar='<marker>',
+ default=None,
+ help=_('Begin returning volumes that appear later in the volume '
+ 'list than that represented by this reference. This '
+ 'reference should be json like. Default=None.')
+ )
+ parser.add_argument(
+ '--limit',
+ metavar='<limit>',
+ default=None,
+ help=_('Maximum number of volumes to return. Default=None.')
+ )
+ parser.add_argument(
+ '--offset',
+ metavar='<offset>',
+ default=None,
+ help=_('Number of volumes to skip after marker. Default=None.')
+ )
+ parser.add_argument(
+ '--sort',
+ metavar='<key>[:<direction>]',
+ default=None,
+ help=(_('Comma-separated list of sort keys and directions in the '
+ 'form of <key>[:<asc|desc>]. '
+ 'Valid keys: %s. '
+ 'Default=None.') % ', '.join(SORT_MANAGEABLE_KEY_VALUES))
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if parsed_args.host is None and parsed_args.cluster is None:
+ msg = _(
+ "Either <host> or '--cluster <cluster>' needs to be provided "
+ "to run the 'block storage volume manageable list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if volume_client.api_version < api_versions.APIVersion('3.8'):
+ msg = _(
+ "--os-volume-api-version 3.8 or greater is required to "
+ "support the 'block storage volume manageable list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.cluster:
+ if volume_client.api_version < api_versions.APIVersion('3.17'):
+ msg = _(
+ "--os-volume-api-version 3.17 or greater is required to "
+ "support the '--cluster' option"
+ )
+ raise exceptions.CommandError(msg)
+
+ detailed = strutils.bool_from_string(parsed_args.detailed)
+ cluster = getattr(parsed_args, 'cluster', None)
+
+ columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ ]
+ if detailed:
+ columns.extend([
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ])
+
+ data = volume_client.volumes.list_manageable(
+ host=parsed_args.host,
+ detailed=detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=cluster)
+
+ return (columns,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))
+
+
+class BlockStorageManageSnapshots(command.Lister):
+ """List manageable snapshots.
+
+ Supported by --os-volume-api-version 3.8 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ host_group = parser.add_mutually_exclusive_group()
+ host_group.add_argument(
+ "host",
+ metavar="<host>",
+ nargs='?',
+ help=_('Cinder host on which to list manageable snapshots. '
+ 'Takes the form: host@backend-name#pool')
+ )
+ host_group.add_argument(
+ "--cluster",
+ metavar="<cluster>",
+ help=_('Cinder cluster on which to list manageable snapshots. '
+ 'Takes the form: cluster@backend-name#pool. '
+ '(supported by --os-volume-api-version 3.17 or later)')
+ )
+ parser.add_argument(
+ '--detailed',
+ metavar='<detailed>',
+ default=True,
+ help=_('Returns detailed information (Default=True).')
+ )
+ parser.add_argument(
+ '--marker',
+ metavar='<marker>',
+ default=None,
+ help=_('Begin returning snapshots that appear later in the '
+ 'snapshot list than that represented by this reference. '
+ 'This reference should be json like. Default=None.')
+ )
+ parser.add_argument(
+ '--limit',
+ metavar='<limit>',
+ default=None,
+ help=_('Maximum number of snapshots to return. Default=None.')
+ )
+ parser.add_argument(
+ '--offset',
+ metavar='<offset>',
+ default=None,
+ help=_('Number of snapshots to skip after marker. Default=None.')
+ )
+ parser.add_argument(
+ '--sort',
+ metavar='<key>[:<direction>]',
+ default=None,
+ help=(_('Comma-separated list of sort keys and directions in the '
+ 'form of <key>[:<asc|desc>]. '
+ 'Valid keys: %s. '
+ 'Default=None.') % ', '.join(SORT_MANAGEABLE_KEY_VALUES))
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if parsed_args.host is None and parsed_args.cluster is None:
+ msg = _(
+ "Either <host> or '--cluster <cluster>' needs to be provided "
+ "to run the 'block storage volume snapshot manageable list' "
+ "command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if volume_client.api_version < api_versions.APIVersion('3.8'):
+ msg = _(
+ "--os-volume-api-version 3.8 or greater is required to "
+ "support the 'block storage volume snapshot manageable list' "
+ "command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.cluster:
+ if volume_client.api_version < api_versions.APIVersion('3.17'):
+ msg = _(
+ "--os-volume-api-version 3.17 or greater is required to "
+ "support the '--cluster' option"
+ )
+ raise exceptions.CommandError(msg)
+
+ detailed = strutils.bool_from_string(parsed_args.detailed)
+ cluster = getattr(parsed_args, 'cluster', None)
+
+ columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'source_reference',
+ ]
+ if detailed:
+ columns.extend([
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ])
+
+ data = volume_client.volume_snapshots.list_manageable(
+ host=parsed_args.host,
+ detailed=detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=cluster)
+
+ return (columns,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))
diff --git a/openstackclient/volume/v3/volume.py b/openstackclient/volume/v3/volume.py
new file mode 100644
index 00000000..4b159688
--- /dev/null
+++ b/openstackclient/volume/v3/volume.py
@@ -0,0 +1,114 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Volume V3 Volume action implementations"""
+
+import logging
+
+from cinderclient import api_versions
+from osc_lib.cli import format_columns
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+
+LOG = logging.getLogger(__name__)
+
+
+class VolumeSummary(command.ShowOne):
+ _description = _("Show a summary of all volumes in this deployment.")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ '--all-projects',
+ action='store_true',
+ default=False,
+ help=_('Include all projects (admin only)'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.12'):
+ msg = _(
+ "--os-volume-api-version 3.12 or greater is required to "
+ "support the 'volume summary' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ columns = [
+ 'total_count',
+ 'total_size',
+ ]
+ column_headers = [
+ 'Total Count',
+ 'Total Size',
+ ]
+ if volume_client.api_version.matches('3.36'):
+ columns.append('metadata')
+ column_headers.append('Metadata')
+
+ # set value of 'all_tenants' when using project option
+ all_projects = parsed_args.all_projects
+
+ vol_summary = volume_client.volumes.summary(
+ all_tenants=all_projects,
+ )
+
+ return (
+ column_headers,
+ utils.get_dict_properties(
+ vol_summary['volume-summary'],
+ columns,
+ formatters={'metadata': format_columns.DictColumn},
+ ),
+ )
+
+
+class VolumeRevertToSnapshot(command.Command):
+ _description = _("Revert a volume to a snapshot.")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'snapshot',
+ metavar="<snapshot>",
+ help=_('Name or ID of the snapshot to restore. The snapshot must '
+ 'be the most recent one known to cinder.'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.40'):
+ msg = _(
+ "--os-volume-api-version 3.40 or greater is required to "
+ "support the 'volume revert snapshot' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ snapshot = utils.find_resource(
+ volume_client.volume_snapshots, parsed_args.snapshot)
+ volume = utils.find_resource(
+ volume_client.volumes, snapshot.volume_id)
+
+ volume_client.volumes.revert_to_snapshot(
+ volume=volume, snapshot=snapshot)
diff --git a/openstackclient/volume/v3/volume_group.py b/openstackclient/volume/v3/volume_group.py
index db4e9a94..242ffcd4 100644
--- a/openstackclient/volume/v3/volume_group.py
+++ b/openstackclient/volume/v3/volume_group.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
+import argparse
from cinderclient import api_versions
from osc_lib.command import command
@@ -19,8 +19,6 @@ from osc_lib import utils
from openstackclient.i18n import _
-LOG = logging.getLogger(__name__)
-
def _format_group(group):
columns = (
@@ -82,17 +80,72 @@ class CreateVolumeGroup(command.ShowOne):
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
- parser.add_argument(
- 'volume_group_type',
+ # This is a bit complicated. We accept two patterns: a legacy pattern
+ #
+ # volume group create \
+ # <volume-group-type> <volume-type> [<volume-type>...]
+ #
+ # and the modern approach
+ #
+ # volume group create \
+ # --volume-group-type <volume-group-type>
+ # --volume-type <volume-type>
+ # [--volume-type <volume-type> ...]
+ #
+ # Because argparse doesn't properly support nested exclusive groups, we
+ # use two groups: one to ensure users don't pass <volume-group-type> as
+ # both a positional and an option argument and another to ensure users
+ # don't pass <volume-type> this way. It's a bit weird but it catches
+ # everything we care about.
+ source_parser = parser.add_mutually_exclusive_group()
+ # we use a different name purely so we can issue a deprecation warning
+ source_parser.add_argument(
+ 'volume_group_type_legacy',
metavar='<volume_group_type>',
- help=_('Name or ID of volume group type to use.'),
+ nargs='?',
+ help=argparse.SUPPRESS,
)
- parser.add_argument(
- 'volume_types',
+ volume_types_parser = parser.add_mutually_exclusive_group()
+ # We need to use a separate dest
+ # https://github.com/python/cpython/issues/101990
+ volume_types_parser.add_argument(
+ 'volume_types_legacy',
+ metavar='<volume_type>',
+ nargs='*',
+ default=[],
+ help=argparse.SUPPRESS,
+ )
+ source_parser.add_argument(
+ '--volume-group-type',
+ metavar='<volume_group_type>',
+ help=_('Volume group type to use (name or ID)'),
+ )
+ volume_types_parser.add_argument(
+ '--volume-type',
metavar='<volume_type>',
- nargs='+',
+ dest='volume_types',
+ action='append',
default=[],
- help=_('Name or ID of volume type(s) to use.'),
+ help=_(
+ 'Volume type(s) to use (name or ID) '
+ '(required with --volume-group-type)'
+ ),
+ )
+ source_parser.add_argument(
+ '--source-group',
+ metavar='<source-group>',
+ help=_(
+ 'Existing volume group to use (name or ID) '
+ '(supported by --os-volume-api-version 3.14 or later)'
+ ),
+ )
+ source_parser.add_argument(
+ '--group-snapshot',
+ metavar='<group-snapshot>',
+ help=_(
+ 'Existing group snapshot to use (name or ID) '
+ '(supported by --os-volume-api-version 3.14 or later)'
+ ),
)
parser.add_argument(
'--name',
@@ -107,44 +160,105 @@ class CreateVolumeGroup(command.ShowOne):
parser.add_argument(
'--availability-zone',
metavar='<availability-zone>',
- help=_('Availability zone for volume group.'),
+ help=_(
+ 'Availability zone for volume group. '
+ '(not available if creating group from source)'
+ ),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
- if volume_client.api_version < api_versions.APIVersion('3.13'):
+ if parsed_args.volume_group_type_legacy:
msg = _(
- "--os-volume-api-version 3.13 or greater is required to "
- "support the 'volume group create' command"
+ "Passing volume group type and volume types as positional "
+ "arguments is deprecated. Use the --volume-group-type and "
+ "--volume-type option arguments instead."
)
- raise exceptions.CommandError(msg)
+ self.log.warning(msg)
- volume_group_type = utils.find_resource(
- volume_client.group_types,
- parsed_args.volume_group_type,
- )
+ volume_group_type = parsed_args.volume_group_type or \
+ parsed_args.volume_group_type_legacy
+ volume_types = parsed_args.volume_types[:]
+ volume_types.extend(parsed_args.volume_types_legacy)
- volume_types = []
- for volume_type in parsed_args.volume_types:
- volume_types.append(
- utils.find_resource(
- volume_client.volume_types,
- volume_type,
+ if volume_group_type:
+ if volume_client.api_version < api_versions.APIVersion('3.13'):
+ msg = _(
+ "--os-volume-api-version 3.13 or greater is required to "
+ "support the 'volume group create' command"
)
- )
+ raise exceptions.CommandError(msg)
+ if not volume_types:
+ msg = _(
+ "--volume-types is a required argument when creating a "
+ "group from group type."
+ )
+ raise exceptions.CommandError(msg)
- group = volume_client.groups.create(
- volume_group_type.id,
- ','.join(x.id for x in volume_types),
- parsed_args.name,
- parsed_args.description,
- availability_zone=parsed_args.availability_zone)
+ volume_group_type_id = utils.find_resource(
+ volume_client.group_types,
+ volume_group_type,
+ ).id
+ volume_types_ids = []
+ for volume_type in volume_types:
+ volume_types_ids.append(
+ utils.find_resource(
+ volume_client.volume_types,
+ volume_type,
+ ).id
+ )
- group = volume_client.groups.get(group.id)
+ group = volume_client.groups.create(
+ volume_group_type_id,
+ ','.join(volume_types_ids),
+ parsed_args.name,
+ parsed_args.description,
+ availability_zone=parsed_args.availability_zone,
+ )
- return _format_group(group)
+ group = volume_client.groups.get(group.id)
+ return _format_group(group)
+
+ else:
+ if volume_client.api_version < api_versions.APIVersion('3.14'):
+ msg = _(
+ "--os-volume-api-version 3.14 or greater is required to "
+ "support the 'volume group create "
+ "[--source-group|--group-snapshot]' command"
+ )
+ raise exceptions.CommandError(msg)
+ if (parsed_args.source_group is None and
+ parsed_args.group_snapshot is None):
+ msg = _(
+ "Either --source-group <source_group> or "
+ "'--group-snapshot <group_snapshot>' needs to be "
+ "provided to run the 'volume group create "
+ "[--source-group|--group-snapshot]' command"
+ )
+ raise exceptions.CommandError(msg)
+ if parsed_args.availability_zone:
+ msg = _("'--availability-zone' option will not work "
+ "if creating group from source.")
+ self.log.warning(msg)
+
+ source_group = None
+ if parsed_args.source_group:
+ source_group = utils.find_resource(volume_client.groups,
+ parsed_args.source_group)
+ group_snapshot = None
+ if parsed_args.group_snapshot:
+ group_snapshot = utils.find_resource(
+ volume_client.group_snapshots,
+ parsed_args.group_snapshot)
+ group = volume_client.groups.create_from_src(
+ group_snapshot.id if group_snapshot else None,
+ source_group.id if source_group else None,
+ parsed_args.name,
+ parsed_args.description)
+ group = volume_client.groups.get(group.id)
+ return _format_group(group)
class DeleteVolumeGroup(command.Command):
diff --git a/releasenotes/notes/add-auto-approve-cleanup-a2d225faa42dfdcb.yaml b/releasenotes/notes/add-auto-approve-cleanup-a2d225faa42dfdcb.yaml
new file mode 100644
index 00000000..945320b3
--- /dev/null
+++ b/releasenotes/notes/add-auto-approve-cleanup-a2d225faa42dfdcb.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ An ``--auto-approve`` option has been added to the
+ ``project cleanup`` command. This allows the interactive
+ confirmation of resource deletion to be skipped.
diff --git a/releasenotes/notes/add-block-storage-manage-commands-6ebf029bd7a67bb3.yaml b/releasenotes/notes/add-block-storage-manage-commands-6ebf029bd7a67bb3.yaml
new file mode 100644
index 00000000..7b40a341
--- /dev/null
+++ b/releasenotes/notes/add-block-storage-manage-commands-6ebf029bd7a67bb3.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Added ``block storage volume manageable list`` and
+ ``block storage snapshot manageable list`` commands that
+ allow operators to list the volumes and snapshots on a
+ particular host or cluster for management under OpenStack.
diff --git a/releasenotes/notes/add-create-group-from-src-options-6fcb0c87f617ca91.yaml b/releasenotes/notes/add-create-group-from-src-options-6fcb0c87f617ca91.yaml
new file mode 100644
index 00000000..9a4f1cb3
--- /dev/null
+++ b/releasenotes/notes/add-create-group-from-src-options-6fcb0c87f617ca91.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added ``--source-group`` and ``--group-snapshot`` options to the
+ ``volume group create`` command to allow creating group from
+ a source group or a group snapshot.
diff --git a/releasenotes/notes/add-port-ranges-in-port-forwarding-command-8c6ee05cf625578a.yaml b/releasenotes/notes/add-port-ranges-in-port-forwarding-command-8c6ee05cf625578a.yaml
new file mode 100644
index 00000000..80e4445e
--- /dev/null
+++ b/releasenotes/notes/add-port-ranges-in-port-forwarding-command-8c6ee05cf625578a.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Add port ranges support to the ``floating ip port forwarding`` commands.
diff --git a/releasenotes/notes/add-vol-service-get-set-log-commands-f9420e5061d994b5.yaml b/releasenotes/notes/add-vol-service-get-set-log-commands-f9420e5061d994b5.yaml
new file mode 100644
index 00000000..ccaf69c1
--- /dev/null
+++ b/releasenotes/notes/add-vol-service-get-set-log-commands-f9420e5061d994b5.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added ``block storage log level list`` and ``block storage log level set``
+ commands that allows operators to list and set log levels for cinder
+ services.
diff --git a/releasenotes/notes/add-volume-revert-command-1c8f695420acbe7e.yaml b/releasenotes/notes/add-volume-revert-command-1c8f695420acbe7e.yaml
new file mode 100644
index 00000000..2832b888
--- /dev/null
+++ b/releasenotes/notes/add-volume-revert-command-1c8f695420acbe7e.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added ``volume revert`` command that reverts
+ the volume to the given snapshot.
diff --git a/releasenotes/notes/add-volume-summary-command-b2175b48af3ccab1.yaml b/releasenotes/notes/add-volume-summary-command-b2175b48af3ccab1.yaml
new file mode 100644
index 00000000..1c5cdf18
--- /dev/null
+++ b/releasenotes/notes/add-volume-summary-command-b2175b48af3ccab1.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added ``volume summary`` command to show the total size,
+ total count and metadata of volumes.
diff --git a/releasenotes/notes/add-workers-cleanup-command-720573c0f642efe9.yaml b/releasenotes/notes/add-workers-cleanup-command-720573c0f642efe9.yaml
new file mode 100644
index 00000000..7406cd62
--- /dev/null
+++ b/releasenotes/notes/add-workers-cleanup-command-720573c0f642efe9.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added ``block storage cleanup`` command that allows cleanup
+ of resources (volumes and snapshots) by services in other nodes
+ in a cluster in an Active-Active deployments.
diff --git a/releasenotes/notes/consistency-group-create-opts-aliases-e1c2f1498e9b1d3d.yaml b/releasenotes/notes/consistency-group-create-opts-aliases-e1c2f1498e9b1d3d.yaml
new file mode 100644
index 00000000..191f020f
--- /dev/null
+++ b/releasenotes/notes/consistency-group-create-opts-aliases-e1c2f1498e9b1d3d.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ The ``--consistency-group-source`` and ``--consistency-group-snapshot``
+ options for the ``consistency group create`` command have been renamed to
+ ``--source`` and ``--snapshot``, respectively. Aliases are provided for the
+ older variants.
diff --git a/releasenotes/notes/deprecate-volume-group-create-positional-arguments-89f6b886c0f1f2b5.yaml b/releasenotes/notes/deprecate-volume-group-create-positional-arguments-89f6b886c0f1f2b5.yaml
new file mode 100644
index 00000000..ee3a6843
--- /dev/null
+++ b/releasenotes/notes/deprecate-volume-group-create-positional-arguments-89f6b886c0f1f2b5.yaml
@@ -0,0 +1,10 @@
+---
+deprecations:
+ - |
+ The ``<volume-group-type>`` and ``<volume-type> [<volume-type>...]``
+ positional arguments for the ``volume group create`` command have been
+ deprecated in favour of option arguments. For example::
+
+ openstack volume group create \
+ --volume-group-type <volume-group-type>
+ --volume-type <volume-type> [--volume-type <volume-type> ...]
diff --git a/releasenotes/notes/migrate-host-list-show-to-sdk-9b80cd9b4196ab01.yaml b/releasenotes/notes/migrate-host-list-show-to-sdk-9b80cd9b4196ab01.yaml
new file mode 100644
index 00000000..085670c9
--- /dev/null
+++ b/releasenotes/notes/migrate-host-list-show-to-sdk-9b80cd9b4196ab01.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ The ``host list`` and ``host show`` commands have been migrated to SDK.
diff --git a/releasenotes/notes/migrate-server-volume-list-update-to-sdk-95b1d3063e46f813.yaml b/releasenotes/notes/migrate-server-volume-list-update-to-sdk-95b1d3063e46f813.yaml
new file mode 100644
index 00000000..6194fb1b
--- /dev/null
+++ b/releasenotes/notes/migrate-server-volume-list-update-to-sdk-95b1d3063e46f813.yaml
@@ -0,0 +1,3 @@
+features:
+ - |
+ Switch the server volume list and server volume update command from novaclient to SDK.
diff --git a/releasenotes/notes/network-qos-rule-type-filters-47f4911a02011501.yaml b/releasenotes/notes/network-qos-rule-type-filters-47f4911a02011501.yaml
new file mode 100644
index 00000000..d0503f59
--- /dev/null
+++ b/releasenotes/notes/network-qos-rule-type-filters-47f4911a02011501.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Added two new filter flags to ``openstack network qos rule type list``:
+ ``--all-supported``, to return any QoS rule type supported by at least
+ one loaded driver; ``--all-rules``, to return all QoS rule types
+ supported by the current version of Neutron server, regardless of the
+ loaded drivers.
diff --git a/releasenotes/notes/rename-server-volume-update-to-server-volume-set-833f1730a9bf6169.yaml b/releasenotes/notes/rename-server-volume-update-to-server-volume-set-833f1730a9bf6169.yaml
new file mode 100644
index 00000000..c1e9251b
--- /dev/null
+++ b/releasenotes/notes/rename-server-volume-update-to-server-volume-set-833f1730a9bf6169.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ The ``server volume update`` command has been renamed to ``server volume
+ set`` to better match other commands in OSC. An alias is provided for
+ backwards compatibility.
diff --git a/releasenotes/notes/switch-server-migration-show-to-sdk-4adb88a0f1f03f3b.yaml b/releasenotes/notes/switch-server-migration-show-to-sdk-4adb88a0f1f03f3b.yaml
new file mode 100644
index 00000000..ec47cd13
--- /dev/null
+++ b/releasenotes/notes/switch-server-migration-show-to-sdk-4adb88a0f1f03f3b.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Finish switching server migration to the OpenStackSDK
diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst
new file mode 100644
index 00000000..d1238479
--- /dev/null
+++ b/releasenotes/source/2023.1.rst
@@ -0,0 +1,6 @@
+===========================
+2023.1 Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/2023.1
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 69562dbd..de09e52e 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@ OpenStackClient Release Notes
:maxdepth: 1
unreleased
+ 2023.1
zed
yoga
xena
diff --git a/setup.cfg b/setup.cfg
index 42ce970b..aee5e99b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -159,6 +159,7 @@ openstack.compute.v2 =
server_migration_show = openstackclient.compute.v2.server_migration:ShowMigration
server_volume_list = openstackclient.compute.v2.server_volume:ListServerVolume
+ server_volume_set = openstackclient.compute.v2.server_volume:SetServerVolume
server_volume_update = openstackclient.compute.v2.server_volume:UpdateServerVolume
usage_list = openstackclient.compute.v2.usage:ListUsage
@@ -519,6 +520,14 @@ openstack.network.v2 =
network_service_provider_list = openstackclient.network.v2.network_service_provider:ListNetworkServiceProvider
+ network_subport_list = openstackclient.network.v2.network_trunk:ListNetworkSubport
+ network_trunk_create = openstackclient.network.v2.network_trunk:CreateNetworkTrunk
+ network_trunk_delete = openstackclient.network.v2.network_trunk:DeleteNetworkTrunk
+ network_trunk_list = openstackclient.network.v2.network_trunk:ListNetworkTrunk
+ network_trunk_set = openstackclient.network.v2.network_trunk:SetNetworkTrunk
+ network_trunk_show = openstackclient.network.v2.network_trunk:ShowNetworkTrunk
+ network_trunk_unset = openstackclient.network.v2.network_trunk:UnsetNetworkTrunk
+
port_create = openstackclient.network.v2.port:CreatePort
port_delete = openstackclient.network.v2.port:DeletePort
port_list = openstackclient.network.v2.port:ListPort
@@ -811,3 +820,11 @@ openstack.volume.v3 =
volume_transfer_request_delete = openstackclient.volume.v2.volume_transfer_request:DeleteTransferRequest
volume_transfer_request_list = openstackclient.volume.v2.volume_transfer_request:ListTransferRequest
volume_transfer_request_show = openstackclient.volume.v2.volume_transfer_request:ShowTransferRequest
+
+ volume_summary = openstackclient.volume.v3.volume:VolumeSummary
+ volume_revert = openstackclient.volume.v3.volume:VolumeRevertToSnapshot
+ block_storage_log_level_list = openstackclient.volume.v3.block_storage_log_level:BlockStorageLogLevelList
+ block_storage_log_level_set = openstackclient.volume.v3.block_storage_log_level:BlockStorageLogLevelSet
+ block_storage_cleanup = openstackclient.volume.v3.block_storage_cleanup:BlockStorageCleanup
+ block_storage_volume_manageable_list = openstackclient.volume.v3.block_storage_manage:BlockStorageManageVolumes
+ block_storage_snapshot_manageable_list = openstackclient.volume.v3.block_storage_manage:BlockStorageManageSnapshots
diff --git a/tox.ini b/tox.ini
index 5f02e7c2..3de7dd38 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
[tox]
minversion = 3.18.0
-envlist = py38,pep8
-skipsdist = True
+envlist = py3,pep8
+#skipsdist = True
# Automatic envs (pyXX) will only use the python version appropriate to that
# env and ignore basepython inherited from [testenv] if we set
# ignore_basepython_conflict.
@@ -10,19 +10,21 @@ ignore_basepython_conflict = True
[testenv]
usedevelop = True
basepython = python3
-setenv = OS_STDOUT_CAPTURE=1
- OS_STDERR_CAPTURE=1
- OS_TEST_TIMEOUT=60
+setenv =
+ OS_STDOUT_CAPTURE=1
+ OS_STDERR_CAPTURE=1
+ OS_TEST_TIMEOUT=60
deps =
- -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/test-requirements.txt
- -r{toxinidir}/requirements.txt
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/test-requirements.txt
+ -r{toxinidir}/requirements.txt
commands = stestr run {posargs}
allowlist_externals = stestr
[testenv:fast8]
# Use same environment directory as pep8 env to save space and install time
-setenv = VIRTUAL_ENV={envdir}
+setenv =
+ VIRTUAL_ENV={envdir}
envdir = {toxworkdir}/pep8
commands =
{toxinidir}/tools/fast8.sh
@@ -74,14 +76,18 @@ commands =
allowlist_externals = stestr
[testenv:functional]
-setenv = OS_TEST_PATH=./openstackclient/tests/functional
-passenv = OS_*
+setenv =
+ OS_TEST_PATH=./openstackclient/tests/functional
+passenv =
+ OS_*
commands =
stestr run {posargs}
[testenv:functional-tips]
-setenv = OS_TEST_PATH=./openstackclient/tests/functional
-passenv = OS_*
+setenv =
+ OS_TEST_PATH=./openstackclient/tests/functional
+passenv =
+ OS_*
commands =
python -m pip install -q -U -e "git+file://{toxinidir}/../cliff#egg=cliff"
python -m pip install -q -U -e "git+file://{toxinidir}/../keystoneauth#egg=keystoneauth1"
@@ -108,7 +114,8 @@ commands =
coverage xml -o cover/coverage.xml
[testenv:debug]
-passenv = OS_*
+passenv =
+ OS_*
commands =
oslo_debug_helper -t openstackclient/tests {posargs}