summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.coveragerc1
-rw-r--r--.zuul.yaml1
-rw-r--r--doc/requirements.txt1
-rw-r--r--doc/source/cli/command-objects/block-storage-cleanup.rst8
-rw-r--r--doc/source/cli/command-objects/block-storage-log-level.rst8
-rw-r--r--doc/source/cli/command-objects/block-storage-manage.rst11
-rw-r--r--doc/source/cli/command-objects/network-trunk.rst16
-rw-r--r--doc/source/cli/command-objects/volume.rst8
-rw-r--r--doc/source/cli/data/cinder.csv16
-rw-r--r--doc/source/cli/data/glance.csv9
-rw-r--r--doc/source/cli/plugin-commands/cyborg.rst4
-rw-r--r--doc/source/cli/plugin-commands/index.rst1
-rw-r--r--doc/source/contributor/humaninterfaceguide.rst4
-rw-r--r--doc/source/contributor/plugins.rst1
-rw-r--r--openstackclient/compute/v2/hypervisor_stats.py40
-rw-r--r--openstackclient/compute/v2/server.py111
-rw-r--r--openstackclient/compute/v2/server_migration.py48
-rw-r--r--openstackclient/image/v2/image.py66
-rw-r--r--openstackclient/image/v2/metadef_namespaces.py247
-rw-r--r--openstackclient/network/v2/network_agent.py7
-rw-r--r--openstackclient/network/v2/network_qos_rule_type.py25
-rw-r--r--openstackclient/network/v2/network_trunk.py402
-rw-r--r--openstackclient/tests/functional/compute/v2/test_hypervisor.py52
-rw-r--r--openstackclient/tests/functional/compute/v2/test_server.py90
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py29
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_trunk.py149
-rw-r--r--openstackclient/tests/unit/compute/v2/fakes.py70
-rw-r--r--openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py51
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server.py60
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server_migration.py120
-rw-r--r--openstackclient/tests/unit/image/v2/fakes.py9
-rw-r--r--openstackclient/tests/unit/image/v2/test_image.py25
-rw-r--r--openstackclient/tests/unit/image/v2/test_metadef_namespaces.py150
-rw-r--r--openstackclient/tests/unit/network/v2/fakes.py69
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py34
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_trunk.py851
-rw-r--r--openstackclient/tests/unit/volume/v3/fakes.py89
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py178
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py233
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_manage.py411
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume.py179
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume_group.py131
-rw-r--r--openstackclient/volume/v3/block_storage_cleanup.py146
-rw-r--r--openstackclient/volume/v3/block_storage_log_level.py147
-rw-r--r--openstackclient/volume/v3/block_storage_manage.py258
-rw-r--r--openstackclient/volume/v3/volume.py114
-rw-r--r--openstackclient/volume/v3/volume_group.py117
-rw-r--r--releasenotes/notes/add-baremetal-agent-type-7c46365e8d457ac8.yaml5
-rw-r--r--releasenotes/notes/add-block-storage-manage-commands-6ebf029bd7a67bb3.yaml7
-rw-r--r--releasenotes/notes/add-create-group-from-src-options-6fcb0c87f617ca91.yaml6
-rw-r--r--releasenotes/notes/add-vol-service-get-set-log-commands-f9420e5061d994b5.yaml6
-rw-r--r--releasenotes/notes/add-volume-revert-command-1c8f695420acbe7e.yaml5
-rw-r--r--releasenotes/notes/add-volume-summary-command-b2175b48af3ccab1.yaml5
-rw-r--r--releasenotes/notes/add-workers-cleanup-command-720573c0f642efe9.yaml6
-rw-r--r--releasenotes/notes/image-metadef-namespace-b940206bece64f97.yaml10
-rw-r--r--releasenotes/notes/network-qos-rule-type-filters-47f4911a02011501.yaml8
-rw-r--r--releasenotes/notes/switch-hypervisor-to-sdk-f6495f070b034718.yaml3
-rw-r--r--releasenotes/notes/switch-server-migration-to-sdk-4e4530f787f90fd2.yaml5
-rw-r--r--releasenotes/notes/switch-server-show-to-sdk-44a614aebf2c6da6.yaml7
-rw-r--r--requirements.txt2
-rw-r--r--setup.cfg21
-rw-r--r--tox.ini35
62 files changed, 4622 insertions, 306 deletions
diff --git a/.coveragerc b/.coveragerc
index 3685187b..8dc03265 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,6 +1,7 @@
[run]
branch = True
source = openstackclient
+omit = openstackclient/tests/*
[report]
ignore_errors = True
diff --git a/.zuul.yaml b/.zuul.yaml
index 95ab34ab..65dd597f 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -102,6 +102,7 @@
neutron-tag-ports-during-bulk-creation: true
neutron-conntrack-helper: true
neutron-ndp-proxy: true
+ q-trunk: true
devstack_localrc:
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
diff --git a/doc/requirements.txt b/doc/requirements.txt
index 8b4202be..93e4f046 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -11,6 +11,7 @@ aodhclient>=0.9.0 # Apache-2.0
gnocchiclient>=3.3.1 # Apache-2.0
osc-placement>=1.7.0 # Apache-2.0
python-barbicanclient>=4.5.2 # Apache-2.0
+python-cyborgclient>=1.2.1 # Apache-2.0
python-designateclient>=2.7.0 # Apache-2.0
python-heatclient>=1.10.0 # Apache-2.0
python-ironicclient>=2.3.0 # Apache-2.0
diff --git a/doc/source/cli/command-objects/block-storage-cleanup.rst b/doc/source/cli/command-objects/block-storage-cleanup.rst
new file mode 100644
index 00000000..6a593c11
--- /dev/null
+++ b/doc/source/cli/command-objects/block-storage-cleanup.rst
@@ -0,0 +1,8 @@
+=============
+block storage
+=============
+
+Block Storage v3
+
+.. autoprogram-cliff:: openstack.volume.v3
+ :command: block storage cleanup
diff --git a/doc/source/cli/command-objects/block-storage-log-level.rst b/doc/source/cli/command-objects/block-storage-log-level.rst
new file mode 100644
index 00000000..17241a0e
--- /dev/null
+++ b/doc/source/cli/command-objects/block-storage-log-level.rst
@@ -0,0 +1,8 @@
+=======================
+Block Storage Log Level
+=======================
+
+Block Storage v3
+
+.. autoprogram-cliff:: openstack.volume.v3
+ :command: block storage log level *
diff --git a/doc/source/cli/command-objects/block-storage-manage.rst b/doc/source/cli/command-objects/block-storage-manage.rst
new file mode 100644
index 00000000..a1cff1ad
--- /dev/null
+++ b/doc/source/cli/command-objects/block-storage-manage.rst
@@ -0,0 +1,11 @@
+====================
+Block Storage Manage
+====================
+
+Block Storage v3
+
+.. autoprogram-cliff:: openstack.volume.v3
+ :command: block storage volume manageable list
+
+.. autoprogram-cliff:: openstack.volume.v3
+ :command: block storage snapshot manageable list
diff --git a/doc/source/cli/command-objects/network-trunk.rst b/doc/source/cli/command-objects/network-trunk.rst
new file mode 100644
index 00000000..98fd4b0c
--- /dev/null
+++ b/doc/source/cli/command-objects/network-trunk.rst
@@ -0,0 +1,16 @@
+=============
+network trunk
+=============
+
+A **network trunk** is a container to group logical ports from different
+networks and provide a single trunked vNIC for servers. It consists of
+one parent port which is a regular VIF and multiple subports which allow
+the server to connect to more networks.
+
+Network v2
+
+.. autoprogram-cliff:: openstack.network.v2
+ :command: network subport list
+
+.. autoprogram-cliff:: openstack.network.v2
+ :command: network trunk *
diff --git a/doc/source/cli/command-objects/volume.rst b/doc/source/cli/command-objects/volume.rst
index ac414110..9b491772 100644
--- a/doc/source/cli/command-objects/volume.rst
+++ b/doc/source/cli/command-objects/volume.rst
@@ -388,3 +388,11 @@ Unset volume properties
.. describe:: <volume>
Volume to modify (name or ID)
+
+Block Storage v3
+
+ .. autoprogram-cliff:: openstack.volume.v3
+ :command: volume summary
+
+ .. autoprogram-cliff:: openstack.volume.v3
+ :command: volume revert
diff --git a/doc/source/cli/data/cinder.csv b/doc/source/cli/data/cinder.csv
index 8b25d3fd..84ea409e 100644
--- a/doc/source/cli/data/cinder.csv
+++ b/doc/source/cli/data/cinder.csv
@@ -45,7 +45,7 @@ freeze-host,volume host set --disable,Freeze and disable the specified cinder-vo
get-capabilities,volume backend capability show,Show capabilities of a volume backend. Admin only.
get-pools,volume backend pool list,Show pool information for backends. Admin only.
group-create,volume group create,Creates a group. (Supported by API versions 3.13 - 3.latest)
-group-create-from-src,,Creates a group from a group snapshot or a source group. (Supported by API versions 3.14 - 3.latest)
+group-create-from-src,volume group create [--source-group|--group-snapshot],Creates a group from a group snapshot or a source group. (Supported by API versions 3.14 - 3.latest)
group-delete,volume group delete,Removes one or more groups. (Supported by API versions 3.13 - 3.latest)
group-disable-replication,volume group set --disable-replication,Disables replication for group. (Supported by API versions 3.38 - 3.latest)
group-enable-replication,volume group set --enable-replication,Enables replication for group. (Supported by API versions 3.38 - 3.latest)
@@ -71,7 +71,7 @@ image-metadata-show,volume show,Shows volume image metadata.
list,volume list,Lists all volumes.
list-filters,block storage resource filter list,List enabled filters. (Supported by API versions 3.33 - 3.latest)
manage,volume create --remote-source k=v,Manage an existing volume.
-manageable-list,,Lists all manageable volumes. (Supported by API versions 3.8 - 3.latest)
+manageable-list,block storage volume manageable list,Lists all manageable volumes. (Supported by API versions 3.8 - 3.latest)
message-delete,volume message delete,Removes one or more messages. (Supported by API versions 3.3 - 3.latest)
message-list,volume message list,Lists all messages. (Supported by API versions 3.3 - 3.latest)
message-show,volume message show,Shows message details. (Supported by API versions 3.3 - 3.latest)
@@ -100,19 +100,19 @@ readonly-mode-update,volume set --read-only-mode | --read-write-mode,Updates vol
rename,volume set --name,Renames a volume.
reset-state,volume set --state,Explicitly updates the volume state.
retype,volume type set --type,Changes the volume type for a volume.
-revert-to-snapshot,,Revert a volume to the specified snapshot. (Supported by API versions 3.40 - 3.latest)
+revert-to-snapshot,volume revert,Revert a volume to the specified snapshot. (Supported by API versions 3.40 - 3.latest)
service-disable,volume service set --disable,Disables the service.
service-enable,volume service set --enable,Enables the service.
-service-get-log,,(Supported by API versions 3.32 - 3.latest)
+service-get-log,block storage log level list,(Supported by API versions 3.32 - 3.latest)
service-list,volume service list,Lists all services. Filter by host and service binary.
-service-set-log,,(Supported by API versions 3.32 - 3.latest)
+service-set-log,block storage log level set,(Supported by API versions 3.32 - 3.latest)
set-bootable,volume set --bootable / --not-bootable,Update bootable status of a volume.
show,volume show,Shows volume details.
snapshot-create,snapshot create,Creates a snapshot.
snapshot-delete,snapshot delete,Remove one or more snapshots.
snapshot-list,snapshot list,Lists all snapshots.
snapshot-manage,volume snapshot create --remote-source <key=value>,Manage an existing snapshot.
-snapshot-manageable-list,,Lists all manageable snapshots. (Supported by API versions 3.8 - 3.latest)
+snapshot-manageable-list,block storage snapshot manageable list,Lists all manageable snapshots. (Supported by API versions 3.8 - 3.latest)
snapshot-metadata,snapshot set --property k=v / snapshot unset --property k,Sets or deletes snapshot metadata.
snapshot-metadata-show,snapshot show,Shows snapshot metadata.
snapshot-metadata-update-all,snapshot set --property k=v,Updates snapshot metadata.
@@ -120,7 +120,7 @@ snapshot-rename,snapshot set --name,Renames a snapshot.
snapshot-reset-state,snapshot set --state,Explicitly updates the snapshot state.
snapshot-show,snapshot show,Shows snapshot details.
snapshot-unmanage,volume snapshot delete --remote,Stop managing a snapshot.
-summary,,Get volumes summary. (Supported by API versions 3.12 - 3.latest)
+summary,volume summary,Get volumes summary. (Supported by API versions 3.12 - 3.latest)
thaw-host,volume host set --enable,Thaw and enable the specified cinder-volume host.
transfer-accept,volume transfer accept,Accepts a volume transfer.
transfer-create,volume transfer create,Creates a volume transfer.
@@ -140,7 +140,7 @@ type-update,volume type set,"Updates volume type name description and/or is_publ
unmanage,volume delete --remote,Stop managing a volume.
upload-to-image,image create --volume,Uploads volume to Image Service as an image.
version-list,versions show --service block-storage,List all API versions. (Supported by API versions 3.0 - 3.latest)
-work-cleanup,,Request cleanup of services with optional filtering. (Supported by API versions 3.24 - 3.latest)
+work-cleanup,block storage cleanup,Request cleanup of services with optional filtering. (Supported by API versions 3.24 - 3.latest)
bash-completion,complete,Prints arguments for bash_completion.
help,help,Shows help about this program or one of its subcommands.
list-extensions,extension list --volume,Lists all available os-api extensions.
diff --git a/doc/source/cli/data/glance.csv b/doc/source/cli/data/glance.csv
index d5c65f2d..adca8c0e 100644
--- a/doc/source/cli/data/glance.csv
+++ b/doc/source/cli/data/glance.csv
@@ -1,6 +1,10 @@
+cache-clear,,"Clear all images from cache, queue or both."
+cache-delete,,Delete image from cache/caching queue.
+cache-list,,Get cache state.
+cache-queue,,Queue image(s) for caching.
explain,WONTFIX,Describe a specific model.
image-create,image create,Create a new image.
-image-create-via-import,,EXPERIMENTAL: Create a new image via image import.
+image-create-via-import, image create --import,"EXPERIMENTAL: Create a new image via image import using glance-direct import method. Missing support for web-download, copy-image and glance-download import methods. The OSC command is also missing support for importing image to specified store as well as all stores (--store, --stores, --all-stores) and skip or stop processing if import fails to one of the store (--allow-failure)"
image-deactivate,image set --deactivate,Deactivate specified image.
image-delete,image delete,Delete specified image.
image-download,image save,Download a specific image.
@@ -11,6 +15,7 @@ image-show,image show,Describe a specific image.
image-stage,image stage,Upload data for a specific image to staging.
image-tag-delete,image unset --tag <tag>,Delete the tag associated with the given image.
image-tag-update,image set --tag <tag>,Update an image with the given tag.
+image-tasks,,Get tasks associated with image.
image-update,image set,Update an existing image.
image-upload,,Upload data for a specific image.
import-info,,Print import methods available from Glance.
@@ -49,6 +54,7 @@ md-tag-show,,Describe a specific metadata definitions tag inside a namespace.
md-tag-update,,Rename a metadata definitions tag inside a namespace.
member-create,image add project,Create member for a given image.
member-delete,image remove project,Delete image member.
+member-get,,Show details of an image member
member-list,image member list,Describe sharing permissions by image.
member-update,image set --accept --reject --status,Update the status of a member for a given image.
stores-delete,,Delete image from specific store.
@@ -56,5 +62,6 @@ stores-info,,Print available backends from Glance.
task-create,WONTFIX,Create a new task.
task-list,image task list,List tasks you can access.
task-show,image task show,Describe a specific task.
+usage,,Get quota usage information.
bash-completion,complete,Prints arguments for bash_completion.
help,help,Display help about this program or one of its subcommands.
diff --git a/doc/source/cli/plugin-commands/cyborg.rst b/doc/source/cli/plugin-commands/cyborg.rst
new file mode 100644
index 00000000..aedaa6b5
--- /dev/null
+++ b/doc/source/cli/plugin-commands/cyborg.rst
@@ -0,0 +1,4 @@
+cyborg
+------
+
+.. autoprogram-cliff:: openstack.accelerator.v2
diff --git a/doc/source/cli/plugin-commands/index.rst b/doc/source/cli/plugin-commands/index.rst
index 638dcbe5..e2e0dfa4 100644
--- a/doc/source/cli/plugin-commands/index.rst
+++ b/doc/source/cli/plugin-commands/index.rst
@@ -8,6 +8,7 @@ Plugin Commands
:maxdepth: 1
barbican
+ cyborg
designate
gnocchi
heat
diff --git a/doc/source/contributor/humaninterfaceguide.rst b/doc/source/contributor/humaninterfaceguide.rst
index 5987b86f..db6d7987 100644
--- a/doc/source/contributor/humaninterfaceguide.rst
+++ b/doc/source/contributor/humaninterfaceguide.rst
@@ -246,7 +246,7 @@ Objects and Actions
Commands consist of an object, described by one or more words, followed by an
action. ::
- <object> <action> [<name-or-id>]
+ <object> <action>
For example:
@@ -411,7 +411,7 @@ For example:
* ``server show <name-or-id>`` (compute servers have names or IDs and can be
referenced by both)
* ``consumer show <id>`` (identity consumers only have IDs, not names)
-* ``server show --toplogy <name-or-id>`` (additional information should be
+* ``server show --topology <name-or-id>`` (additional information should be
provided as options)
``list``
diff --git a/doc/source/contributor/plugins.rst b/doc/source/contributor/plugins.rst
index c2a08c5d..35d8d207 100644
--- a/doc/source/contributor/plugins.rst
+++ b/doc/source/contributor/plugins.rst
@@ -26,6 +26,7 @@ The following is a list of projects that are an OpenStackClient plugin.
- gnocchiclient
- osc-placement
- python-barbicanclient
+- python-cyborgclient
- python-designateclient
- python-heatclient
- python-ironicclient
diff --git a/openstackclient/compute/v2/hypervisor_stats.py b/openstackclient/compute/v2/hypervisor_stats.py
index 4493e080..cb63a800 100644
--- a/openstackclient/compute/v2/hypervisor_stats.py
+++ b/openstackclient/compute/v2/hypervisor_stats.py
@@ -11,19 +11,49 @@
# under the License.
#
-
"""Hypervisor Stats action implementations"""
from osc_lib.command import command
+from osc_lib import utils
from openstackclient.i18n import _
+def _get_hypervisor_stat_columns(item):
+ column_map = {
+ # NOTE(gtema): If we decide to use SDK names - empty this
+ 'disk_available': 'disk_available_least',
+ 'local_disk_free': 'free_disk_gb',
+ 'local_disk_size': 'local_gb',
+ 'local_disk_used': 'local_gb_used',
+ 'memory_free': 'free_ram_mb',
+ 'memory_size': 'memory_mb',
+ 'memory_used': 'memory_mb_used',
+
+ }
+ hidden_columns = ['id', 'links', 'location', 'name']
+ return utils.get_osc_show_columns_for_sdk_resource(
+ item, column_map, hidden_columns)
+
+
class ShowHypervisorStats(command.ShowOne):
_description = _("Display hypervisor stats details")
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
- hypervisor_stats = compute_client.hypervisors.statistics().to_dict()
-
- return zip(*sorted(hypervisor_stats.items()))
+ # The command is deprecated since it is being dropped in Nova.
+ self.log.warning(
+ _("This command is deprecated.")
+ )
+ compute_client = self.app.client_manager.sdk_connection.compute
+ # We do API request directly cause this deprecated method is not and
+ # will not be supported by OpenStackSDK.
+ response = compute_client.get(
+ '/os-hypervisors/statistics',
+ microversion='2.1')
+ hypervisor_stats = response.json().get('hypervisor_statistics')
+
+ display_columns, columns = _get_hypervisor_stat_columns(
+ hypervisor_stats)
+ data = utils.get_dict_properties(
+ hypervisor_stats, columns)
+ return (display_columns, data)
diff --git a/openstackclient/compute/v2/server.py b/openstackclient/compute/v2/server.py
index 3fb9bf09..85693e17 100644
--- a/openstackclient/compute/v2/server.py
+++ b/openstackclient/compute/v2/server.py
@@ -77,6 +77,10 @@ class AddressesColumn(cliff_columns.FormattableColumn):
except Exception:
return 'N/A'
+ def machine_readable(self):
+ return {k: [i['addr'] for i in v if 'addr' in i]
+ for k, v in self._value.items()}
+
class HostColumn(cliff_columns.FormattableColumn):
"""Generate a formatted string of a hostname."""
@@ -133,14 +137,61 @@ def _prep_server_detail(compute_client, image_client, server, refresh=True):
the latest details of a server after creating it.
:rtype: a dict of server details
"""
+ # Note: Some callers of this routine pass a novaclient server, and others
+ # pass an SDK server. Column names may be different across those cases.
info = server.to_dict()
if refresh:
server = utils.find_resource(compute_client.servers, info['id'])
info.update(server.to_dict())
+ # Some commands using this routine were originally implemented with the
+ # nova python wrappers, and were later migrated to use the SDK. Map the
+ # SDK's property names to the original property names to maintain backward
+ # compatibility for existing users. Data is duplicated under both the old
+ # and new name so users can consume the data by either name.
+ column_map = {
+ 'access_ipv4': 'accessIPv4',
+ 'access_ipv6': 'accessIPv6',
+ 'admin_password': 'adminPass',
+ 'admin_password': 'adminPass',
+ 'volumes': 'os-extended-volumes:volumes_attached',
+ 'availability_zone': 'OS-EXT-AZ:availability_zone',
+ 'block_device_mapping': 'block_device_mapping_v2',
+ 'compute_host': 'OS-EXT-SRV-ATTR:host',
+ 'created_at': 'created',
+ 'disk_config': 'OS-DCF:diskConfig',
+ 'flavor_id': 'flavorRef',
+ 'has_config_drive': 'config_drive',
+ 'host_id': 'hostId',
+ 'fault': 'fault',
+ 'hostname': 'OS-EXT-SRV-ATTR:hostname',
+ 'hypervisor_hostname': 'OS-EXT-SRV-ATTR:hypervisor_hostname',
+ 'image_id': 'imageRef',
+ 'instance_name': 'OS-EXT-SRV-ATTR:instance_name',
+ 'is_locked': 'locked',
+ 'kernel_id': 'OS-EXT-SRV-ATTR:kernel_id',
+ 'launch_index': 'OS-EXT-SRV-ATTR:launch_index',
+ 'launched_at': 'OS-SRV-USG:launched_at',
+ 'power_state': 'OS-EXT-STS:power_state',
+ 'project_id': 'tenant_id',
+ 'ramdisk_id': 'OS-EXT-SRV-ATTR:ramdisk_id',
+ 'reservation_id': 'OS-EXT-SRV-ATTR:reservation_id',
+ 'root_device_name': 'OS-EXT-SRV-ATTR:root_device_name',
+ 'scheduler_hints': 'OS-SCH-HNT:scheduler_hints',
+ 'task_state': 'OS-EXT-STS:task_state',
+ 'terminated_at': 'OS-SRV-USG:terminated_at',
+ 'updated_at': 'updated',
+ 'user_data': 'OS-EXT-SRV-ATTR:user_data',
+ 'vm_state': 'OS-EXT-STS:vm_state',
+ }
+
+ info.update({
+ column_map[column]: data for column, data in info.items()
+ if column in column_map})
+
# Convert the image blob to a name
image_info = info.get('image', {})
- if image_info:
+ if image_info and any(image_info.values()):
image_id = image_info.get('id', '')
try:
image = image_client.get_image(image_id)
@@ -188,7 +239,9 @@ def _prep_server_detail(compute_client, image_client, server, refresh=True):
# NOTE(dtroyer): novaclient splits these into separate entries...
# Format addresses in a useful way
- info['addresses'] = format_columns.DictListColumn(server.networks)
+ info['addresses'] = (
+ AddressesColumn(info['addresses']) if 'addresses' in info
+ else format_columns.DictListColumn(info.get('networks')))
# Map 'metadata' field to 'properties'
info['properties'] = format_columns.DictColumn(info.pop('metadata'))
@@ -290,9 +343,11 @@ class AddFixedIP(command.ShowOne):
return ((), ())
kwargs = {
- 'net_id': net_id,
- 'fixed_ip': parsed_args.fixed_ip_address,
+ 'net_id': net_id
}
+ if parsed_args.fixed_ip_address:
+ kwargs['fixed_ips'] = [
+ {"ip_address": parsed_args.fixed_ip_address}]
if parsed_args.tag:
kwargs['tag'] = parsed_args.tag
@@ -451,8 +506,7 @@ class AddPort(command.Command):
port_id = parsed_args.port
kwargs = {
- 'port_id': port_id,
- 'fixed_ip': None,
+ 'port_id': port_id
}
if parsed_args.tag:
@@ -506,8 +560,7 @@ class AddNetwork(command.Command):
net_id = parsed_args.network
kwargs = {
- 'net_id': net_id,
- 'fixed_ip': None,
+ 'net_id': net_id
}
if parsed_args.tag:
@@ -1874,12 +1927,10 @@ class CreateServerDump(command.Command):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
- for server in parsed_args.server:
- utils.find_resource(
- compute_client.servers,
- server,
- ).trigger_crash_dump()
+ compute_client = self.app.client_manager.sdk_connection.compute
+ for name_or_id in parsed_args.server:
+ server = compute_client.find_server(name_or_id)
+ server.trigger_crash_dump(compute_client)
class DeleteServer(command.Command):
@@ -2586,9 +2637,9 @@ class ListServer(command.Lister):
columns += ('Metadata',)
column_headers += ('Properties',)
- # convert back to tuple
- column_headers = tuple(column_headers)
- columns = tuple(columns)
+ # remove duplicates
+ column_headers = tuple(dict.fromkeys(column_headers))
+ columns = tuple(dict.fromkeys(columns))
if parsed_args.marker is not None:
# Check if both "--marker" and "--deleted" are used.
@@ -4327,32 +4378,34 @@ class ShowServer(command.ShowOne):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
- server = utils.find_resource(
- compute_client.servers, parsed_args.server)
+ compute_client = self.app.client_manager.sdk_connection.compute
+
+ # Find by name or ID, then get the full details of the server
+ server = compute_client.find_server(
+ parsed_args.server, ignore_missing=False)
+ server = compute_client.get_server(server)
if parsed_args.diagnostics:
- (resp, data) = server.diagnostics()
- if not resp.status_code == 200:
- self.app.stderr.write(_(
- "Error retrieving diagnostics data\n"
- ))
- return ({}, {})
+ data = compute_client.get_server_diagnostics(server)
return zip(*sorted(data.items()))
topology = None
if parsed_args.topology:
- if compute_client.api_version < api_versions.APIVersion('2.78'):
+ if not sdk_utils.supports_microversion(compute_client, '2.78'):
msg = _(
'--os-compute-api-version 2.78 or greater is required to '
'support the --topology option'
)
raise exceptions.CommandError(msg)
- topology = server.topology()
+ topology = server.fetch_topology(compute_client)
data = _prep_server_detail(
- compute_client, self.app.client_manager.image, server,
+ # TODO(dannosliwcd): Replace these clients with SDK clients after
+ # all callers of _prep_server_detail() are using the SDK.
+ self.app.client_manager.compute,
+ self.app.client_manager.image,
+ server,
refresh=False)
if topology:
diff --git a/openstackclient/compute/v2/server_migration.py b/openstackclient/compute/v2/server_migration.py
index 919b67bd..016d15d7 100644
--- a/openstackclient/compute/v2/server_migration.py
+++ b/openstackclient/compute/v2/server_migration.py
@@ -15,6 +15,7 @@
import uuid
from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
@@ -130,22 +131,22 @@ class ListMigration(command.Lister):
# the same as the column header names.
columns = [
'source_node', 'dest_node', 'source_compute', 'dest_compute',
- 'dest_host', 'status', 'instance_uuid', 'old_instance_type_id',
- 'new_instance_type_id', 'created_at', 'updated_at',
+ 'dest_host', 'status', 'server_id', 'old_flavor_id',
+ 'new_flavor_id', 'created_at', 'updated_at',
]
# Insert migrations UUID after ID
- if compute_client.api_version >= api_versions.APIVersion("2.59"):
+ if sdk_utils.supports_microversion(compute_client, "2.59"):
column_headers.insert(0, "UUID")
columns.insert(0, "uuid")
- if compute_client.api_version >= api_versions.APIVersion("2.23"):
+ if sdk_utils.supports_microversion(compute_client, "2.23"):
column_headers.insert(0, "Id")
columns.insert(0, "id")
column_headers.insert(len(column_headers) - 2, "Type")
columns.insert(len(columns) - 2, "migration_type")
- if compute_client.api_version >= api_versions.APIVersion("2.80"):
+ if sdk_utils.supports_microversion(compute_client, "2.80"):
if parsed_args.project:
column_headers.insert(len(column_headers) - 2, "Project")
columns.insert(len(columns) - 2, "project_id")
@@ -159,19 +160,23 @@ class ListMigration(command.Lister):
)
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
identity_client = self.app.client_manager.identity
- search_opts = {
- 'host': parsed_args.host,
- 'status': parsed_args.status,
- }
+ search_opts = {}
+
+ if parsed_args.host is not None:
+ search_opts['host'] = parsed_args.host
+
+ if parsed_args.status is not None:
+ search_opts['status'] = parsed_args.status
if parsed_args.server:
- search_opts['instance_uuid'] = utils.find_resource(
- compute_client.servers,
- parsed_args.server,
- ).id
+ server = compute_client.find_server(parsed_args.server)
+ if server is None:
+ msg = _('Unable to find server: %s') % parsed_args.server
+ raise exceptions.CommandError(msg)
+ search_opts['instance_uuid'] = server.id
if parsed_args.type:
migration_type = parsed_args.type
@@ -181,7 +186,7 @@ class ListMigration(command.Lister):
search_opts['migration_type'] = migration_type
if parsed_args.marker:
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, "2.59"):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'support the --marker option'
@@ -190,16 +195,17 @@ class ListMigration(command.Lister):
search_opts['marker'] = parsed_args.marker
if parsed_args.limit:
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, "2.59"):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'support the --limit option'
)
raise exceptions.CommandError(msg)
search_opts['limit'] = parsed_args.limit
+ search_opts['paginated'] = False
if parsed_args.changes_since:
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, "2.59"):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'support the --changes-since option'
@@ -208,7 +214,7 @@ class ListMigration(command.Lister):
search_opts['changes_since'] = parsed_args.changes_since
if parsed_args.changes_before:
- if compute_client.api_version < api_versions.APIVersion('2.66'):
+ if not sdk_utils.supports_microversion(compute_client, "2.66"):
msg = _(
'--os-compute-api-version 2.66 or greater is required to '
'support the --changes-before option'
@@ -217,7 +223,7 @@ class ListMigration(command.Lister):
search_opts['changes_before'] = parsed_args.changes_before
if parsed_args.project:
- if compute_client.api_version < api_versions.APIVersion('2.80'):
+ if not sdk_utils.supports_microversion(compute_client, "2.80"):
msg = _(
'--os-compute-api-version 2.80 or greater is required to '
'support the --project option'
@@ -231,7 +237,7 @@ class ListMigration(command.Lister):
).id
if parsed_args.user:
- if compute_client.api_version < api_versions.APIVersion('2.80'):
+ if not sdk_utils.supports_microversion(compute_client, "2.80"):
msg = _(
'--os-compute-api-version 2.80 or greater is required to '
'support the --user option'
@@ -244,7 +250,7 @@ class ListMigration(command.Lister):
parsed_args.user_domain,
).id
- migrations = compute_client.migrations.list(**search_opts)
+ migrations = list(compute_client.migrations(**search_opts))
return self.print_migrations(parsed_args, compute_client, migrations)
diff --git a/openstackclient/image/v2/image.py b/openstackclient/image/v2/image.py
index 21b962f1..4adaadda 100644
--- a/openstackclient/image/v2/image.py
+++ b/openstackclient/image/v2/image.py
@@ -33,7 +33,7 @@ from osc_lib import utils
from openstackclient.common import progressbar
from openstackclient.i18n import _
-from openstackclient.identity import common
+from openstackclient.identity import common as identity_common
if os.name == "nt":
import msvcrt
@@ -177,21 +177,22 @@ class AddProjectToImage(command.ShowOne):
metavar="<project>",
help=_("Project to associate with image (ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
identity_client = self.app.client_manager.identity
- project_id = common.find_project(
+ project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
image = image_client.find_image(
- parsed_args.image, ignore_missing=False
+ parsed_args.image,
+ ignore_missing=False,
)
obj = image_client.add_member(
@@ -397,7 +398,7 @@ class CreateImage(command.ShowOne):
"Force the use of glance image import instead of direct upload"
),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
for deadopt in self.deadopts:
parser.add_argument(
"--%s" % deadopt,
@@ -450,7 +451,7 @@ class CreateImage(command.ShowOne):
kwargs['visibility'] = parsed_args.visibility
if parsed_args.project:
- kwargs['owner_id'] = common.find_project(
+ kwargs['owner_id'] = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
@@ -664,7 +665,8 @@ class DeleteImage(command.Command):
for image in parsed_args.images:
try:
image_obj = image_client.find_image(
- image, ignore_missing=False
+ image,
+ ignore_missing=False,
)
image_client.delete_image(image_obj.id)
except Exception as e:
@@ -765,7 +767,7 @@ class ListImage(command.Lister):
metavar='<project>',
help=_("Search by project (admin only) (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--tag',
metavar='<tag>',
@@ -835,7 +837,10 @@ class ListImage(command.Lister):
if parsed_args.limit:
kwargs['limit'] = parsed_args.limit
if parsed_args.marker:
- kwargs['marker'] = image_client.find_image(parsed_args.marker).id
+ kwargs['marker'] = image_client.find_image(
+ parsed_args.marker,
+ ignore_missing=False,
+ ).id
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.status:
@@ -846,7 +851,7 @@ class ListImage(command.Lister):
kwargs['tag'] = parsed_args.tag
project_id = None
if parsed_args.project:
- project_id = common.find_project(
+ project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
@@ -925,14 +930,17 @@ class ListImageProjects(command.Lister):
metavar="<image>",
help=_("Image (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
columns = ("Image ID", "Member ID", "Status")
- image_id = image_client.find_image(parsed_args.image).id
+ image_id = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ ).id
data = image_client.members(image=image_id)
@@ -963,19 +971,22 @@ class RemoveProjectImage(command.Command):
metavar="<project>",
help=_("Project to disassociate with image (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
identity_client = self.app.client_manager.identity
- project_id = common.find_project(
- identity_client, parsed_args.project, parsed_args.project_domain
+ project_id = identity_common.find_project(
+ identity_client,
+ parsed_args.project,
+ parsed_args.project_domain,
).id
image = image_client.find_image(
- parsed_args.image, ignore_missing=False
+ parsed_args.image,
+ ignore_missing=False,
)
image_client.remove_member(member=project_id, image=image.id)
@@ -1001,7 +1012,10 @@ class SaveImage(command.Command):
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
- image = image_client.find_image(parsed_args.image)
+ image = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ )
output_file = parsed_args.filename
if output_file is None:
@@ -1176,7 +1190,7 @@ class SetImage(command.Command):
metavar="<project>",
help=_("Set an alternate project on this image (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
for deadopt in self.deadopts:
parser.add_argument(
"--%s" % deadopt,
@@ -1248,7 +1262,7 @@ class SetImage(command.Command):
)
project_id = None
if parsed_args.project:
- project_id = common.find_project(
+ project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
@@ -1369,7 +1383,8 @@ class ShowImage(command.ShowOne):
image_client = self.app.client_manager.image
image = image_client.find_image(
- parsed_args.image, ignore_missing=False
+ parsed_args.image,
+ ignore_missing=False,
)
info = _format_image(image, parsed_args.human_readable)
@@ -1413,7 +1428,8 @@ class UnsetImage(command.Command):
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
image = image_client.find_image(
- parsed_args.image, ignore_missing=False
+ parsed_args.image,
+ ignore_missing=False,
)
kwargs = {}
@@ -1789,10 +1805,10 @@ class ImportImage(command.ShowOne):
image_client.import_image(
image,
method=parsed_args.import_method,
- # uri=parsed_args.uri,
- # remote_region=parsed_args.remote_region,
- # remote_image=parsed_args.remote_image,
- # remote_service_interface=parsed_args.remote_service_interface,
+ uri=parsed_args.uri,
+ remote_region=parsed_args.remote_region,
+ remote_image=parsed_args.remote_image,
+ remote_service_interface=parsed_args.remote_service_interface,
stores=parsed_args.stores,
all_stores=parsed_args.all_stores,
all_stores_must_succeed=not parsed_args.allow_failure,
diff --git a/openstackclient/image/v2/metadef_namespaces.py b/openstackclient/image/v2/metadef_namespaces.py
index 158fd94e..f09f2002 100644
--- a/openstackclient/image/v2/metadef_namespaces.py
+++ b/openstackclient/image/v2/metadef_namespaces.py
@@ -15,8 +15,11 @@
"""Image V2 Action Implementations"""
+import logging
+
from osc_lib.cli import format_columns
from osc_lib.command import command
+from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
@@ -25,6 +28,149 @@ _formatters = {
'tags': format_columns.ListColumn,
}
+LOG = logging.getLogger(__name__)
+
+
+def _format_namespace(namespace):
+ info = {}
+
+ fields_to_show = [
+ 'created_at',
+ 'description',
+ 'display_name',
+ 'namespace',
+ 'owner',
+ 'protected',
+ 'schema',
+ 'visibility',
+ ]
+
+ namespace = namespace.to_dict(ignore_none=True, original_names=True)
+
+ # split out the usual key and the properties which are top-level
+ for key in namespace:
+ if key in fields_to_show:
+ info[key] = namespace.get(key)
+ elif key == "resource_type_associations":
+ info[key] = [resource_type['name']
+ for resource_type in namespace.get(key)]
+ elif key == 'properties':
+ info['properties'] = list(namespace.get(key).keys())
+
+ return info
+
+
+class CreateMetadefNameSpace(command.ShowOne):
+ _description = _("Create a metadef namespace")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "namespace",
+ metavar="<namespace>",
+ help=_("New metadef namespace name"),
+ )
+ parser.add_argument(
+ "--display-name",
+ metavar="<display_name>",
+ help=_("A user-friendly name for the namespace."),
+ )
+ parser.add_argument(
+ "--description",
+ metavar="<description>",
+ help=_("A description of the namespace"),
+ )
+ visibility_group = parser.add_mutually_exclusive_group()
+ visibility_group.add_argument(
+ "--public",
+ action="store_const",
+ const="public",
+ dest="visibility",
+ help=_("Set namespace visibility 'public'"),
+ )
+ visibility_group.add_argument(
+ "--private",
+ action="store_const",
+ const="private",
+ dest="visibility",
+ help=_("Set namespace visibility 'private'"),
+ )
+ protected_group = parser.add_mutually_exclusive_group()
+ protected_group.add_argument(
+ "--protected",
+ action="store_const",
+ const=True,
+ dest="is_protected",
+ help=_("Prevent metadef namespace from being deleted"),
+ )
+ protected_group.add_argument(
+ "--unprotected",
+ action="store_const",
+ const=False,
+ dest="is_protected",
+ help=_("Allow metadef namespace to be deleted (default)"),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+ filter_keys = [
+ 'namespace',
+ 'display_name',
+ 'description'
+ ]
+ kwargs = {}
+
+ for key in filter_keys:
+ argument = getattr(parsed_args, key, None)
+ if argument is not None:
+ kwargs[key] = argument
+
+ if parsed_args.is_protected is not None:
+ kwargs['protected'] = parsed_args.is_protected
+
+ if parsed_args.visibility is not None:
+ kwargs['visibility'] = parsed_args.visibility
+
+ data = image_client.create_metadef_namespace(**kwargs)
+
+ return zip(*sorted(data.items()))
+
+
+class DeleteMetadefNameSpace(command.Command):
+ _description = _("Delete metadef namespace")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "namespace_name",
+ metavar="<namespace_name>",
+ nargs="+",
+ help=_("An identifier (a name) for the namespace"),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ result = 0
+ for i in parsed_args.namespace_name:
+ try:
+ namespace = image_client.get_metadef_namespace(i)
+ image_client.delete_metadef_namespace(namespace.id)
+ except Exception as e:
+ result += 1
+ LOG.error(_("Failed to delete namespace with name or "
+ "ID '%(namespace)s': %(e)s"),
+ {'namespace': i, 'e': e}
+ )
+
+ if result > 0:
+ total = len(parsed_args.namespace_name)
+ msg = (_("%(result)s of %(total)s namespace failed "
+ "to delete.") % {'result': result, 'total': total})
+ raise exceptions.CommandError(msg)
+
class ListMetadefNameSpaces(command.Lister):
_description = _("List metadef namespaces")
@@ -63,3 +209,104 @@ class ListMetadefNameSpaces(command.Lister):
formatters=_formatters,
) for s in data)
)
+
+
+class SetMetadefNameSpace(command.Command):
+ _description = _("Set metadef namespace properties")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "namespace",
+ metavar="<namespace>",
+ help=_("Namespace (name) for the namespace"),
+ )
+ parser.add_argument(
+ "--display-name",
+ metavar="<display_name>",
+ help=_("Set a user-friendly name for the namespace."),
+ )
+ parser.add_argument(
+ "--description",
+ metavar="<description>",
+ help=_("Set the description of the namespace"),
+ )
+ visibility_group = parser.add_mutually_exclusive_group()
+ visibility_group.add_argument(
+ "--public",
+ action="store_const",
+ const="public",
+ dest="visibility",
+ help=_("Set namespace visibility 'public'"),
+ )
+ visibility_group.add_argument(
+ "--private",
+ action="store_const",
+ const="private",
+ dest="visibility",
+ help=_("Set namespace visibility 'private'"),
+ )
+ protected_group = parser.add_mutually_exclusive_group()
+ protected_group.add_argument(
+ "--protected",
+ action="store_const",
+ const=True,
+ dest="is_protected",
+ help=_("Prevent metadef namespace from being deleted"),
+ )
+ protected_group.add_argument(
+ "--unprotected",
+ action="store_const",
+ const=False,
+ dest="is_protected",
+ help=_("Allow metadef namespace to be deleted (default)"),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ namespace = parsed_args.namespace
+
+ filter_keys = [
+ 'namespace',
+ 'display_name',
+ 'description'
+ ]
+ kwargs = {}
+
+ for key in filter_keys:
+ argument = getattr(parsed_args, key, None)
+ if argument is not None:
+ kwargs[key] = argument
+
+ if parsed_args.is_protected is not None:
+ kwargs['protected'] = parsed_args.is_protected
+
+ if parsed_args.visibility is not None:
+ kwargs['visibility'] = parsed_args.visibility
+
+ image_client.update_metadef_namespace(namespace, **kwargs)
+
+
+class ShowMetadefNameSpace(command.ShowOne):
+ _description = _("Show a metadef namespace")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "namespace_name",
+ metavar="<namespace_name>",
+ help=_("Namespace (name) for the namespace"),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ namespace_name = parsed_args.namespace_name
+
+ data = image_client.get_metadef_namespace(namespace_name)
+ info = _format_namespace(data)
+
+ return zip(*sorted(info.items()))
diff --git a/openstackclient/network/v2/network_agent.py b/openstackclient/network/v2/network_agent.py
index d963b3bf..f67f67bd 100644
--- a/openstackclient/network/v2/network_agent.py
+++ b/openstackclient/network/v2/network_agent.py
@@ -168,11 +168,11 @@ class ListNetworkAgent(command.Lister):
metavar='<agent-type>',
choices=["bgp", "dhcp", "open-vswitch", "linux-bridge", "ofa",
"l3", "loadbalancer", "metering", "metadata", "macvtap",
- "nic"],
+ "nic", "baremetal"],
help=_("List only agents with the specified agent type. "
"The supported agent types are: bgp, dhcp, open-vswitch, "
"linux-bridge, ofa, l3, loadbalancer, metering, "
- "metadata, macvtap, nic.")
+ "metadata, macvtap, nic, baremetal.")
)
parser.add_argument(
'--host',
@@ -231,7 +231,8 @@ class ListNetworkAgent(command.Lister):
'metering': 'Metering agent',
'metadata': 'Metadata agent',
'macvtap': 'Macvtap agent',
- 'nic': 'NIC Switch agent'
+ 'nic': 'NIC Switch agent',
+ 'baremetal': 'Baremetal Node'
}
filters = {}
diff --git a/openstackclient/network/v2/network_qos_rule_type.py b/openstackclient/network/v2/network_qos_rule_type.py
index 9af22876..3f4f6a19 100644
--- a/openstackclient/network/v2/network_qos_rule_type.py
+++ b/openstackclient/network/v2/network_qos_rule_type.py
@@ -32,6 +32,23 @@ def _get_columns(item):
class ListNetworkQosRuleType(command.Lister):
_description = _("List QoS rule types")
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ supported = parser.add_mutually_exclusive_group()
+ supported.add_argument(
+ '--all-supported',
+ action='store_true',
+ help=_("List all the QoS rule types supported by any loaded "
+ "mechanism drivers (the union of all sets of supported "
+ "rules)")
+ )
+ supported.add_argument(
+ '--all-rules',
+ action='store_true',
+ help=_("List all QoS rule types implemented in Neutron QoS driver")
+ )
+ return parser
+
def take_action(self, parsed_args):
client = self.app.client_manager.network
columns = (
@@ -40,7 +57,13 @@ class ListNetworkQosRuleType(command.Lister):
column_headers = (
'Type',
)
- data = client.qos_rule_types()
+
+ args = {}
+ if parsed_args.all_supported:
+ args['all_supported'] = True
+ elif parsed_args.all_rules:
+ args['all_rules'] = True
+ data = client.qos_rule_types(**args)
return (column_headers,
(utils.get_item_properties(
diff --git a/openstackclient/network/v2/network_trunk.py b/openstackclient/network/v2/network_trunk.py
new file mode 100644
index 00000000..c5f62901
--- /dev/null
+++ b/openstackclient/network/v2/network_trunk.py
@@ -0,0 +1,402 @@
+# Copyright 2016 ZTE Corporation.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Network trunk and subports action implementations"""
+import logging
+
+from cliff import columns as cliff_columns
+from osc_lib.cli import format_columns
+from osc_lib.cli import identity as identity_utils
+from osc_lib.cli import parseractions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils as osc_utils
+
+from openstackclient.i18n import _
+
+LOG = logging.getLogger(__name__)
+
+TRUNK = 'trunk'
+TRUNKS = 'trunks'
+SUB_PORTS = 'sub_ports'
+
+
+class AdminStateColumn(cliff_columns.FormattableColumn):
+ def human_readable(self):
+ return 'UP' if self._value else 'DOWN'
+
+
+class CreateNetworkTrunk(command.ShowOne):
+ """Create a network trunk for a given project"""
+
+ def get_parser(self, prog_name):
+ parser = super(CreateNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'name',
+ metavar='<name>',
+ help=_("Name of the trunk to create")
+ )
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("A description of the trunk")
+ )
+ parser.add_argument(
+ '--parent-port',
+ metavar='<parent-port>',
+ required=True,
+ help=_("Parent port belonging to this trunk (name or ID)")
+ )
+ parser.add_argument(
+ '--subport',
+ metavar='<port=,segmentation-type=,segmentation-id=>',
+ action=parseractions.MultiKeyValueAction, dest='add_subports',
+ optional_keys=['segmentation-id', 'segmentation-type'],
+ required_keys=['port'],
+ help=_("Subport to add. Subport is of form "
+ "\'port=<name or ID>,segmentation-type=<segmentation-type>,"
+ "segmentation-id=<segmentation-ID>\' (--subport) option "
+ "can be repeated")
+ )
+ admin_group = parser.add_mutually_exclusive_group()
+ admin_group.add_argument(
+ '--enable',
+ action='store_true',
+ default=True,
+ help=_("Enable trunk (default)")
+ )
+ admin_group.add_argument(
+ '--disable',
+ action='store_true',
+ help=_("Disable trunk")
+ )
+ identity_utils.add_project_owner_option_to_parser(parser)
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ attrs = _get_attrs_for_trunk(self.app.client_manager,
+ parsed_args)
+ obj = client.create_trunk(**attrs)
+ display_columns, columns = _get_columns(obj)
+ data = osc_utils.get_dict_properties(obj, columns,
+ formatters=_formatters)
+ return display_columns, data
+
+
+class DeleteNetworkTrunk(command.Command):
+ """Delete a given network trunk"""
+
+ def get_parser(self, prog_name):
+ parser = super(DeleteNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ nargs="+",
+ help=_("Trunk(s) to delete (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ result = 0
+ for trunk in parsed_args.trunk:
+ try:
+ trunk_id = client.find_trunk(trunk).id
+ client.delete_trunk(trunk_id)
+ except Exception as e:
+ result += 1
+ LOG.error(_("Failed to delete trunk with name "
+ "or ID '%(trunk)s': %(e)s"),
+ {'trunk': trunk, 'e': e})
+ if result > 0:
+ total = len(parsed_args.trunk)
+ msg = (_("%(result)s of %(total)s trunks failed "
+ "to delete.") % {'result': result, 'total': total})
+ raise exceptions.CommandError(msg)
+
+
+class ListNetworkTrunk(command.Lister):
+ """List all network trunks"""
+
+ def get_parser(self, prog_name):
+ parser = super(ListNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ '--long',
+ action='store_true',
+ default=False,
+ help=_("List additional fields in output")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ data = client.trunks()
+ headers = (
+ 'ID',
+ 'Name',
+ 'Parent Port',
+ 'Description'
+ )
+ columns = (
+ 'id',
+ 'name',
+ 'port_id',
+ 'description'
+ )
+ if parsed_args.long:
+ headers += (
+ 'Status',
+ 'State',
+ 'Created At',
+ 'Updated At',
+ )
+ columns += (
+ 'status',
+ 'admin_state_up',
+ 'created_at',
+ 'updated_at'
+ )
+ return (headers,
+ (osc_utils.get_item_properties(
+ s, columns,
+ formatters=_formatters,
+ ) for s in data))
+
+
+class SetNetworkTrunk(command.Command):
+ """Set network trunk properties"""
+
+ def get_parser(self, prog_name):
+ parser = super(SetNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ help=_("Trunk to modify (name or ID)")
+ )
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help=_("Set trunk name")
+ )
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("A description of the trunk")
+ )
+ parser.add_argument(
+ '--subport',
+ metavar='<port=,segmentation-type=,segmentation-id=>',
+ action=parseractions.MultiKeyValueAction, dest='set_subports',
+ optional_keys=['segmentation-id', 'segmentation-type'],
+ required_keys=['port'],
+ help=_("Subport to add. Subport is of form "
+ "\'port=<name or ID>,segmentation-type=<segmentation-type>"
+ ",segmentation-id=<segmentation-ID>\' (--subport) option "
+ "can be repeated")
+ )
+ admin_group = parser.add_mutually_exclusive_group()
+ admin_group.add_argument(
+ '--enable',
+ action='store_true',
+ help=_("Enable trunk")
+ )
+ admin_group.add_argument(
+ '--disable',
+ action='store_true',
+ help=_("Disable trunk")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ trunk_id = client.find_trunk(parsed_args.trunk)
+ attrs = _get_attrs_for_trunk(self.app.client_manager, parsed_args)
+ try:
+ client.update_trunk(trunk_id, **attrs)
+ except Exception as e:
+ msg = (_("Failed to set trunk '%(t)s': %(e)s")
+ % {'t': parsed_args.trunk, 'e': e})
+ raise exceptions.CommandError(msg)
+ if parsed_args.set_subports:
+ subport_attrs = _get_attrs_for_subports(self.app.client_manager,
+ parsed_args)
+ try:
+ client.add_trunk_subports(trunk_id, subport_attrs)
+ except Exception as e:
+ msg = (_("Failed to add subports to trunk '%(t)s': %(e)s")
+ % {'t': parsed_args.trunk, 'e': e})
+ raise exceptions.CommandError(msg)
+
+
+class ShowNetworkTrunk(command.ShowOne):
+ """Show information of a given network trunk"""
+ def get_parser(self, prog_name):
+ parser = super(ShowNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ help=_("Trunk to display (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ trunk_id = client.find_trunk(parsed_args.trunk).id
+ obj = client.get_trunk(trunk_id)
+ display_columns, columns = _get_columns(obj)
+ data = osc_utils.get_dict_properties(obj, columns,
+ formatters=_formatters)
+ return display_columns, data
+
+
+class ListNetworkSubport(command.Lister):
+ """List all subports for a given network trunk"""
+
+ def get_parser(self, prog_name):
+ parser = super(ListNetworkSubport, self).get_parser(prog_name)
+ parser.add_argument(
+ '--trunk',
+ required=True,
+ metavar="<trunk>",
+ help=_("List subports belonging to this trunk (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ trunk_id = client.find_trunk(parsed_args.trunk)
+ data = client.get_trunk_subports(trunk_id)
+ headers = ('Port', 'Segmentation Type', 'Segmentation ID')
+ columns = ('port_id', 'segmentation_type', 'segmentation_id')
+ return (headers,
+ (osc_utils.get_dict_properties(
+ s, columns,
+ ) for s in data[SUB_PORTS]))
+
+
+class UnsetNetworkTrunk(command.Command):
+ """Unset subports from a given network trunk"""
+
+ def get_parser(self, prog_name):
+ parser = super(UnsetNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ help=_("Unset subports from this trunk (name or ID)")
+ )
+ parser.add_argument(
+ '--subport',
+ metavar="<subport>",
+ required=True,
+ action='append', dest='unset_subports',
+ help=_("Subport to delete (name or ID of the port) "
+ "(--subport) option can be repeated")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ attrs = _get_attrs_for_subports(self.app.client_manager, parsed_args)
+ trunk_id = client.find_trunk(parsed_args.trunk)
+ client.delete_trunk_subports(trunk_id, attrs)
+
+
+_formatters = {
+ 'admin_state_up': AdminStateColumn,
+ 'sub_ports': format_columns.ListDictColumn,
+}
+
+
+def _get_columns(item):
+ column_map = {}
+ hidden_columns = ['location', 'tenant_id']
+ return osc_utils.get_osc_show_columns_for_sdk_resource(
+ item,
+ column_map,
+ hidden_columns
+ )
+
+
+def _get_attrs_for_trunk(client_manager, parsed_args):
+ attrs = {}
+ if parsed_args.name is not None:
+ attrs['name'] = str(parsed_args.name)
+ if parsed_args.description is not None:
+ attrs['description'] = str(parsed_args.description)
+ if parsed_args.enable:
+ attrs['admin_state_up'] = True
+ if parsed_args.disable:
+ attrs['admin_state_up'] = False
+ if 'parent_port' in parsed_args and parsed_args.parent_port is not None:
+ port_id = client_manager.network.find_port(
+ parsed_args.parent_port)['id']
+ attrs['port_id'] = port_id
+ if 'add_subports' in parsed_args and parsed_args.add_subports is not None:
+ attrs[SUB_PORTS] = _format_subports(client_manager,
+ parsed_args.add_subports)
+
+ # "trunk set" command doesn't support setting project.
+ if 'project' in parsed_args and parsed_args.project is not None:
+ identity_client = client_manager.identity
+ project_id = identity_utils.find_project(
+ identity_client,
+ parsed_args.project,
+ parsed_args.project_domain,
+ ).id
+ attrs['tenant_id'] = project_id
+
+ return attrs
+
+
+def _format_subports(client_manager, subports):
+ attrs = []
+ for subport in subports:
+ subport_attrs = {}
+ if subport.get('port'):
+ port_id = client_manager.network.find_port(subport['port'])['id']
+ subport_attrs['port_id'] = port_id
+ if subport.get('segmentation-id'):
+ try:
+ subport_attrs['segmentation_id'] = int(
+ subport['segmentation-id'])
+ except ValueError:
+ msg = (_("Segmentation-id '%s' is not an integer") %
+ subport['segmentation-id'])
+ raise exceptions.CommandError(msg)
+ if subport.get('segmentation-type'):
+ subport_attrs['segmentation_type'] = subport['segmentation-type']
+ attrs.append(subport_attrs)
+ return attrs
+
+
+def _get_attrs_for_subports(client_manager, parsed_args):
+ attrs = {}
+ if 'set_subports' in parsed_args and parsed_args.set_subports is not None:
+ attrs = _format_subports(client_manager,
+ parsed_args.set_subports)
+ if ('unset_subports' in parsed_args and
+ parsed_args.unset_subports is not None):
+ subports_list = []
+ for subport in parsed_args.unset_subports:
+ port_id = client_manager.network.find_port(subport)['id']
+ subports_list.append({'port_id': port_id})
+ attrs = subports_list
+ return attrs
+
+
+def _get_id(client, id_or_name, resource):
+ return client.find_resource(resource, str(id_or_name))['id']
diff --git a/openstackclient/tests/functional/compute/v2/test_hypervisor.py b/openstackclient/tests/functional/compute/v2/test_hypervisor.py
new file mode 100644
index 00000000..9bc23280
--- /dev/null
+++ b/openstackclient/tests/functional/compute/v2/test_hypervisor.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from openstackclient.tests.functional import base
+
+
+class HypervisorTests(base.TestCase):
+ """Functional tests for hypervisor."""
+
+ def test_hypervisor_list(self):
+ """Test create defaults, list filters, delete"""
+ # Test list
+ cmd_output = json.loads(self.openstack(
+ "hypervisor list -f json --os-compute-api-version 2.1"
+ ))
+ ids1 = [x["ID"] for x in cmd_output]
+ self.assertIsNotNone(cmd_output)
+
+ cmd_output = json.loads(self.openstack(
+ "hypervisor list -f json"
+ ))
+ ids2 = [x["ID"] for x in cmd_output]
+ self.assertIsNotNone(cmd_output)
+
+ # Show test - old microversion
+ for i in ids1:
+ cmd_output = json.loads(self.openstack(
+ "hypervisor show %s -f json "
+ " --os-compute-api-version 2.1"
+ % (i)
+ ))
+ self.assertIsNotNone(cmd_output)
+ # When we list hypervisors with older MV we get ids as integers. We
+ # need to verify that show finds resources independently
+ # Show test - latest microversion
+ for i in ids2:
+ cmd_output = json.loads(self.openstack(
+ "hypervisor show %s -f json"
+ % (i)
+ ))
+ self.assertIsNotNone(cmd_output)
diff --git a/openstackclient/tests/functional/compute/v2/test_server.py b/openstackclient/tests/functional/compute/v2/test_server.py
index 1c909b67..05945a02 100644
--- a/openstackclient/tests/functional/compute/v2/test_server.py
+++ b/openstackclient/tests/functional/compute/v2/test_server.py
@@ -11,6 +11,7 @@
# under the License.
import itertools
+import json
import time
import uuid
@@ -288,6 +289,33 @@ class ServerTests(common.ComputeTestCase):
)
self.assertOutput("", raw_output)
+ def test_server_show(self):
+ """Test server show"""
+ cmd_output = self.server_create()
+ name = cmd_output['name']
+
+ # Simple show
+ cmd_output = json.loads(self.openstack(
+ f'server show -f json {name}'
+ ))
+ self.assertEqual(
+ name,
+ cmd_output["name"],
+ )
+
+ # Show diagnostics
+ cmd_output = json.loads(self.openstack(
+ f'server show -f json {name} --diagnostics'
+ ))
+ self.assertIn('driver', cmd_output)
+
+ # Show topology
+ cmd_output = json.loads(self.openstack(
+ f'server show -f json {name} --topology '
+ f'--os-compute-api-version 2.78'
+ ))
+ self.assertIn('topology', cmd_output)
+
def test_server_actions(self):
"""Test server action pairs
@@ -1250,6 +1278,62 @@ class ServerTests(common.ComputeTestCase):
addresses = cmd_output['addresses']['private']
self.assertNotIn(ip_address, addresses)
+ def test_server_add_fixed_ip(self):
+ name = uuid.uuid4().hex
+ cmd_output = self.openstack(
+ 'server create ' +
+ '--network private ' +
+ '--flavor ' + self.flavor_name + ' ' +
+ '--image ' + self.image_name + ' ' +
+ '--wait ' +
+ name,
+ parse_output=True,
+ )
+
+ self.assertIsNotNone(cmd_output['id'])
+ self.assertEqual(name, cmd_output['name'])
+ self.addCleanup(self.openstack, 'server delete --wait ' + name)
+
+ # create port, record its ip address to use in later call,
+ # then delete - this is to figure out what should be a free ip
+ # in the subnet
+ port_name = uuid.uuid4().hex
+
+ cmd_output = self.openstack(
+ 'port list',
+ parse_output=True,
+ )
+ self.assertNotIn(port_name, cmd_output)
+
+ cmd_output = self.openstack(
+ 'port create ' +
+ '--network private ' + port_name,
+ parse_output=True,
+ )
+ self.assertIsNotNone(cmd_output['id'])
+ ip_address = cmd_output['fixed_ips'][0]['ip_address']
+ self.openstack('port delete ' + port_name)
+
+ # add fixed ip to server, assert the ip address appears
+ self.openstack('server add fixed ip --fixed-ip-address ' + ip_address +
+ ' ' + name + ' private')
+
+ wait_time = 0
+ while wait_time < 60:
+ cmd_output = self.openstack(
+ 'server show ' + name,
+ parse_output=True,
+ )
+ if ip_address not in cmd_output['addresses']['private']:
+ # Hang out for a bit and try again
+ print('retrying add fixed ip check')
+ wait_time += 10
+ time.sleep(10)
+ else:
+ break
+ addresses = cmd_output['addresses']['private']
+ self.assertIn(ip_address, addresses)
+
def test_server_add_remove_volume(self):
volume_wait_for = volume_common.BaseVolumeTests.wait_for_status
@@ -1328,3 +1412,9 @@ class ServerTests(common.ComputeTestCase):
raw_output = self.openstack('server volume list ' + server_name)
self.assertEqual('\n', raw_output)
+
+ def test_server_migration_list(self):
+ # Verify that the command does not raise an exception when we list
+ # migrations, including when we specify a query.
+ self.openstack('server migration list')
+ self.openstack('server migration list --limit 1')
diff --git a/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py b/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py
index 6b719cbe..4ead65cc 100644
--- a/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py
+++ b/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py
@@ -21,6 +21,13 @@ class NetworkQosRuleTypeTests(common.NetworkTests):
AVAILABLE_RULE_TYPES = ['dscp_marking',
'bandwidth_limit']
+ # NOTE(ralonsoh): this list was updated in Yoga (February 2022)
+ ALL_AVAILABLE_RULE_TYPES = ['dscp_marking',
+ 'bandwidth_limit',
+ 'minimum_bandwidth',
+ 'packet_rate_limit',
+ 'minimum_packet_rate',
+ ]
def setUp(self):
super(NetworkQosRuleTypeTests, self).setUp()
@@ -36,6 +43,28 @@ class NetworkQosRuleTypeTests(common.NetworkTests):
for rule_type in self.AVAILABLE_RULE_TYPES:
self.assertIn(rule_type, [x['Type'] for x in cmd_output])
+ def test_qos_rule_type_list_all_supported(self):
+ if not self.is_extension_enabled('qos-rule-type-filter'):
+ self.skipTest('No "qos-rule-type-filter" extension present')
+
+ cmd_output = self.openstack(
+ 'network qos rule type list --all-supported -f json',
+ parse_output=True
+ )
+ for rule_type in self.AVAILABLE_RULE_TYPES:
+ self.assertIn(rule_type, [x['Type'] for x in cmd_output])
+
+ def test_qos_rule_type_list_all_rules(self):
+ if not self.is_extension_enabled('qos-rule-type-filter'):
+ self.skipTest('No "qos-rule-type-filter" extension present')
+
+ cmd_output = self.openstack(
+ 'network qos rule type list --all-rules -f json',
+ parse_output=True
+ )
+ for rule_type in self.ALL_AVAILABLE_RULE_TYPES:
+ self.assertIn(rule_type, [x['Type'] for x in cmd_output])
+
def test_qos_rule_type_details(self):
for rule_type in self.AVAILABLE_RULE_TYPES:
cmd_output = self.openstack(
diff --git a/openstackclient/tests/functional/network/v2/test_network_trunk.py b/openstackclient/tests/functional/network/v2/test_network_trunk.py
new file mode 100644
index 00000000..bbb77a0d
--- /dev/null
+++ b/openstackclient/tests/functional/network/v2/test_network_trunk.py
@@ -0,0 +1,149 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import uuid
+
+from openstackclient.tests.functional.network.v2 import common
+
+
+class NetworkTrunkTests(common.NetworkTests):
+ """Functional tests for Network Trunks"""
+
+ def setUp(self):
+ super().setUp()
+ # Nothing in this class works with Nova Network
+ if not self.haz_network:
+ self.skipTest("No Network service present")
+
+ network_name = uuid.uuid4().hex
+ subnet_name = uuid.uuid4().hex
+ self.parent_port_name = uuid.uuid4().hex
+ self.sub_port_name = uuid.uuid4().hex
+
+ self.openstack('network create %s' % network_name)
+ self.addCleanup(self.openstack, 'network delete %s' % network_name)
+
+ self.openstack(
+ 'subnet create %s '
+ '--network %s --subnet-range 10.0.0.0/24' % (
+ subnet_name, network_name))
+ self.openstack('port create %s --network %s' %
+ (self.parent_port_name, network_name))
+ self.addCleanup(self.openstack, 'port delete %s' %
+ self.parent_port_name)
+ json_out = self.openstack('port create %s --network %s -f json' %
+ (self.sub_port_name, network_name))
+ self.sub_port_id = json.loads(json_out)['id']
+ self.addCleanup(self.openstack, 'port delete %s' % self.sub_port_name)
+
+ def test_network_trunk_create_delete(self):
+ trunk_name = uuid.uuid4().hex
+ self.openstack('network trunk create %s --parent-port %s -f json ' %
+ (trunk_name, self.parent_port_name))
+ raw_output = self.openstack(
+ 'network trunk delete ' +
+ trunk_name
+ )
+ self.assertEqual('', raw_output)
+
+ def test_network_trunk_list(self):
+ trunk_name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
+ 'network trunk create %s --parent-port %s -f json ' %
+ (trunk_name, self.parent_port_name)))
+ self.addCleanup(self.openstack,
+ 'network trunk delete ' + trunk_name)
+ self.assertEqual(trunk_name, json_output['name'])
+
+ json_output = json.loads(self.openstack(
+ 'network trunk list -f json'
+ ))
+ self.assertIn(trunk_name, [tr['Name'] for tr in json_output])
+
+ def test_network_trunk_set_unset(self):
+ trunk_name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
+ 'network trunk create %s --parent-port %s -f json ' %
+ (trunk_name, self.parent_port_name)))
+ self.addCleanup(self.openstack,
+ 'network trunk delete ' + trunk_name)
+ self.assertEqual(trunk_name, json_output['name'])
+
+ self.openstack(
+ 'network trunk set '
+ '--enable ' +
+ trunk_name
+ )
+
+ json_output = json.loads(self.openstack(
+ 'network trunk show -f json ' +
+ trunk_name
+ ))
+ self.assertTrue(json_output['is_admin_state_up'])
+
+ # Add subport to trunk
+ self.openstack(
+ 'network trunk set ' +
+ '--subport port=%s,segmentation-type=vlan,segmentation-id=42 ' %
+ (self.sub_port_name) +
+ trunk_name
+ )
+ json_output = json.loads(self.openstack(
+ 'network trunk show -f json ' +
+ trunk_name
+ ))
+ self.assertEqual(
+ [{
+ 'port_id': self.sub_port_id,
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'
+ }],
+ json_output['sub_ports'])
+
+ # Remove subport from trunk
+ self.openstack(
+ 'network trunk unset ' +
+ trunk_name +
+ ' --subport ' +
+ self.sub_port_name
+ )
+ json_output = json.loads(self.openstack(
+ 'network trunk show -f json ' +
+ trunk_name
+ ))
+ self.assertEqual(
+ [],
+ json_output['sub_ports'])
+
+ def test_network_trunk_list_subports(self):
+ trunk_name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
+ 'network trunk create %s --parent-port %s '
+ '--subport port=%s,segmentation-type=vlan,segmentation-id=42 '
+ '-f json ' %
+ (trunk_name, self.parent_port_name, self.sub_port_name)))
+ self.addCleanup(self.openstack,
+ 'network trunk delete ' + trunk_name)
+ self.assertEqual(trunk_name, json_output['name'])
+
+ json_output = json.loads(self.openstack(
+ 'network subport list --trunk %s -f json' % trunk_name))
+ self.assertEqual(
+ [{
+ 'Port': self.sub_port_id,
+ 'Segmentation ID': 42,
+ 'Segmentation Type': 'vlan'
+ }],
+ json_output)
diff --git a/openstackclient/tests/unit/compute/v2/fakes.py b/openstackclient/tests/unit/compute/v2/fakes.py
index c5f6ebe2..4cd71913 100644
--- a/openstackclient/tests/unit/compute/v2/fakes.py
+++ b/openstackclient/tests/unit/compute/v2/fakes.py
@@ -21,7 +21,7 @@ import uuid
from novaclient import api_versions
from openstack.compute.v2 import flavor as _flavor
from openstack.compute.v2 import hypervisor as _hypervisor
-from openstack.compute.v2 import server
+from openstack.compute.v2 import server as _server
from openstack.compute.v2 import server_group as _server_group
from openstack.compute.v2 import server_interface as _server_interface
from openstack.compute.v2 import service
@@ -341,67 +341,6 @@ class FakeExtension(object):
return extension
-class FakeHypervisorStats(object):
- """Fake one or more hypervisor stats."""
-
- @staticmethod
- def create_one_hypervisor_stats(attrs=None):
- """Create a fake hypervisor stats.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with count, current_workload, and so on
- """
- attrs = attrs or {}
-
- # Set default attributes.
- stats_info = {
- 'count': 2,
- 'current_workload': 0,
- 'disk_available_least': 50,
- 'free_disk_gb': 100,
- 'free_ram_mb': 23000,
- 'local_gb': 100,
- 'local_gb_used': 0,
- 'memory_mb': 23800,
- 'memory_mb_used': 1400,
- 'running_vms': 3,
- 'vcpus': 8,
- 'vcpus_used': 3,
- }
-
- # Overwrite default attributes.
- stats_info.update(attrs)
-
- # Set default method.
- hypervisor_stats_method = {'to_dict': stats_info}
-
- hypervisor_stats = fakes.FakeResource(
- info=copy.deepcopy(stats_info),
- methods=copy.deepcopy(hypervisor_stats_method),
- loaded=True)
- return hypervisor_stats
-
- @staticmethod
- def create_hypervisors_stats(attrs=None, count=2):
- """Create multiple fake hypervisors stats.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of hypervisors to fake
- :return:
- A list of FakeResource objects faking the hypervisors
- """
- hypervisors = []
- for i in range(0, count):
- hypervisors.append(
- FakeHypervisorStats.create_one_hypervisor_stats(attrs))
-
- return hypervisors
-
-
class FakeSecurityGroup(object):
"""Fake one or more security groups."""
@@ -605,7 +544,12 @@ class FakeServer(object):
# Overwrite default attributes.
server_info.update(attrs)
- return server.Server(**server_info)
+ server = _server.Server(**server_info)
+
+ # Override methods
+ server.trigger_crash_dump = mock.MagicMock()
+
+ return server
@staticmethod
def create_sdk_servers(attrs=None, methods=None, count=2):
diff --git a/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py b/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py
index 40086f9b..7bc7468a 100644
--- a/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py
+++ b/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py
@@ -12,9 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
#
+from unittest import mock
from openstackclient.compute.v2 import hypervisor_stats
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
+from openstackclient.tests.unit import fakes
class TestHypervisorStats(compute_fakes.TestComputev2):
@@ -23,20 +25,55 @@ class TestHypervisorStats(compute_fakes.TestComputev2):
super(TestHypervisorStats, self).setUp()
# Get a shortcut to the compute client hypervisors mock
- self.hypervisors_mock = self.app.client_manager.compute.hypervisors
- self.hypervisors_mock.reset_mock()
+ self.app.client_manager.sdk_connection = mock.Mock()
+ self.app.client_manager.sdk_connection.compute = mock.Mock()
+ self.sdk_client = self.app.client_manager.sdk_connection.compute
+ self.sdk_client.get = mock.Mock()
+
+
+# Not in fakes.py because hypervisor stats has been deprecated
+
+def create_one_hypervisor_stats(attrs=None):
+ """Create a fake hypervisor stats.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A dictionary that contains hypervisor stats information keys
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ stats_info = {
+ 'count': 2,
+ 'current_workload': 0,
+ 'disk_available_least': 50,
+ 'free_disk_gb': 100,
+ 'free_ram_mb': 23000,
+ 'local_gb': 100,
+ 'local_gb_used': 0,
+ 'memory_mb': 23800,
+ 'memory_mb_used': 1400,
+ 'running_vms': 3,
+ 'vcpus': 8,
+ 'vcpus_used': 3,
+ }
+
+ # Overwrite default attributes.
+ stats_info.update(attrs)
+
+ return stats_info
class TestHypervisorStatsShow(TestHypervisorStats):
+ _stats = create_one_hypervisor_stats()
+
def setUp(self):
super(TestHypervisorStatsShow, self).setUp()
- self.hypervisor_stats = \
- compute_fakes.FakeHypervisorStats.create_one_hypervisor_stats()
-
- self.hypervisors_mock.statistics.return_value =\
- self.hypervisor_stats
+ self.sdk_client.get.return_value = fakes.FakeResponse(
+ data={'hypervisor_statistics': self._stats})
self.cmd = hypervisor_stats.ShowHypervisorStats(self.app, None)
diff --git a/openstackclient/tests/unit/compute/v2/test_server.py b/openstackclient/tests/unit/compute/v2/test_server.py
index 56249862..a5d5a43f 100644
--- a/openstackclient/tests/unit/compute/v2/test_server.py
+++ b/openstackclient/tests/unit/compute/v2/test_server.py
@@ -423,8 +423,7 @@ class TestServerAddFixedIP(TestServer):
self.assertEqual(expected_data, tuple(data))
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0].id,
- net_id=network['id'],
- fixed_ip=None
+ net_id=network['id']
)
@mock.patch.object(sdk_utils, 'supports_microversion')
@@ -479,7 +478,7 @@ class TestServerAddFixedIP(TestServer):
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0].id,
net_id=network['id'],
- fixed_ip='5.6.7.8'
+ fixed_ips=[{'ip_address': '5.6.7.8'}]
)
@mock.patch.object(sdk_utils, 'supports_microversion')
@@ -536,7 +535,7 @@ class TestServerAddFixedIP(TestServer):
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0].id,
net_id=network['id'],
- fixed_ip='5.6.7.8',
+ fixed_ips=[{'ip_address': '5.6.7.8'}],
tag='tag1',
)
@@ -847,7 +846,7 @@ class TestServerAddPort(TestServer):
result = self.cmd.take_action(parsed_args)
self.sdk_client.create_server_interface.assert_called_once_with(
- servers[0], port_id=port_id, fixed_ip=None)
+ servers[0], port_id=port_id)
self.assertIsNone(result)
def test_server_add_port(self):
@@ -885,7 +884,6 @@ class TestServerAddPort(TestServer):
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0],
port_id='fake-port',
- fixed_ip=None,
tag='tag1')
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
@@ -1287,7 +1285,7 @@ class TestServerAddNetwork(TestServer):
result = self.cmd.take_action(parsed_args)
self.sdk_client.create_server_interface.assert_called_once_with(
- servers[0], net_id=net_id, fixed_ip=None)
+ servers[0], net_id=net_id)
self.assertIsNone(result)
def test_server_add_network(self):
@@ -1326,7 +1324,6 @@ class TestServerAddNetwork(TestServer):
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0],
net_id='fake-network',
- fixed_ip=None,
tag='tag1'
)
@@ -4463,21 +4460,36 @@ class TestServerDelete(TestServer):
class TestServerDumpCreate(TestServer):
def setUp(self):
- super(TestServerDumpCreate, self).setUp()
+ super().setUp()
# Get the command object to test
self.cmd = server.CreateServerDump(self.app, None)
- # Set methods to be tested.
- self.methods = {
- 'trigger_crash_dump': None,
- }
+ def run_test_server_dump(self, server_count):
+ servers = self.setup_sdk_servers_mock(server_count)
+
+ arglist = []
+ verifylist = []
+
+ for s in servers:
+ arglist.append(s.id)
+
+ verifylist = [
+ ('server', arglist),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+ for s in servers:
+ s.trigger_crash_dump.assert_called_once_with(self.sdk_client)
def test_server_dump_one_server(self):
- self.run_method_with_servers('trigger_crash_dump', 1)
+ self.run_test_server_dump(1)
def test_server_dump_multi_servers(self):
- self.run_method_with_servers('trigger_crash_dump', 3)
+ self.run_test_server_dump(3)
class _TestServerList(TestServer):
@@ -4705,6 +4717,7 @@ class TestServerList(_TestServerList):
self.assertIn('Availability Zone', columns)
self.assertIn('Host', columns)
self.assertIn('Properties', columns)
+ self.assertCountEqual(columns, set(columns))
def test_server_list_no_name_lookup_option(self):
self.data = tuple(
@@ -7932,20 +7945,15 @@ class TestServerShow(TestServer):
'tenant_id': 'tenant-id-xxx',
'networks': {'public': ['10.20.30.40', '2001:db8::f']},
}
- # Fake the server.diagnostics() method. The return value contains http
- # response and data. The data is a dict. Sincce this method itself is
- # faked, we don't need to fake everything of the return value exactly.
- resp = mock.Mock()
- resp.status_code = 200
+ self.sdk_client.get_server_diagnostics.return_value = {'test': 'test'}
server_method = {
- 'diagnostics': (resp, {'test': 'test'}),
- 'topology': self.topology,
+ 'fetch_topology': self.topology,
}
self.server = compute_fakes.FakeServer.create_one_server(
attrs=server_info, methods=server_method)
# This is the return value for utils.find_resource()
- self.servers_mock.get.return_value = self.server
+ self.sdk_client.get_server.return_value = self.server
self.get_image_mock.return_value = self.image
self.flavors_mock.get.return_value = self.flavor
@@ -8046,8 +8054,7 @@ class TestServerShow(TestServer):
self.assertEqual(('test',), data)
def test_show_topology(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.78')
+ self._set_mock_microversion('2.78')
arglist = [
'--topology',
@@ -8069,8 +8076,7 @@ class TestServerShow(TestServer):
self.assertCountEqual(self.data, data)
def test_show_topology_pre_v278(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.77')
+ self._set_mock_microversion('2.77')
arglist = [
'--topology',
diff --git a/openstackclient/tests/unit/compute/v2/test_server_migration.py b/openstackclient/tests/unit/compute/v2/test_server_migration.py
index c4cbac47..93c1865a 100644
--- a/openstackclient/tests/unit/compute/v2/test_server_migration.py
+++ b/openstackclient/tests/unit/compute/v2/test_server_migration.py
@@ -13,6 +13,7 @@
from unittest import mock
from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib import exceptions
from osc_lib import utils as common_utils
@@ -35,10 +36,6 @@ class TestServerMigration(compute_fakes.TestComputev2):
self.app.client_manager.compute.server_migrations
self.server_migrations_mock.reset_mock()
- # Get a shortcut to the compute client MigrationManager mock
- self.migrations_mock = self.app.client_manager.compute.migrations
- self.migrations_mock.reset_mock()
-
self.app.client_manager.sdk_connection = mock.Mock()
self.app.client_manager.sdk_connection.compute = mock.Mock()
self.sdk_client = self.app.client_manager.sdk_connection.compute
@@ -53,22 +50,21 @@ class TestListMigration(TestServerMigration):
'Old Flavor', 'New Flavor', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
MIGRATION_FIELDS = [
'source_node', 'dest_node', 'source_compute', 'dest_compute',
- 'dest_host', 'status', 'instance_uuid', 'old_instance_type_id',
- 'new_instance_type_id', 'created_at', 'updated_at'
+ 'dest_host', 'status', 'server_id', 'old_flavor_id',
+ 'new_flavor_id', 'created_at', 'updated_at'
]
def setUp(self):
super().setUp()
self.server = compute_fakes.FakeServer.create_one_server()
- self.servers_mock.get.return_value = self.server
+ self.sdk_client.find_server.return_value = self.server
self.migrations = compute_fakes.FakeMigration.create_migrations(
count=3)
- self.migrations_mock.list.return_value = self.migrations
+ self.sdk_client.migrations.return_value = self.migrations
self.data = (common_utils.get_item_properties(
s, self.MIGRATION_FIELDS) for s in self.migrations)
@@ -76,6 +72,20 @@ class TestListMigration(TestServerMigration):
# Get the command object to test
self.cmd = server_migration.ListMigration(self.app, None)
+ patcher = mock.patch.object(
+ sdk_utils, 'supports_microversion', return_value=True)
+ self.addCleanup(patcher.stop)
+ self.supports_microversion_mock = patcher.start()
+ self._set_mock_microversion(
+ self.app.client_manager.compute.api_version.get_string())
+
+ def _set_mock_microversion(self, mock_v):
+ """Set a specific microversion for the mock supports_microversion()."""
+ self.supports_microversion_mock.reset_mock(return_value=True)
+ self.supports_microversion_mock.side_effect = (
+ lambda _, v:
+ api_versions.APIVersion(v) <= api_versions.APIVersion(mock_v))
+
def test_server_migration_list_no_options(self):
arglist = []
verifylist = []
@@ -84,12 +94,9 @@ class TestListMigration(TestServerMigration):
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
- kwargs = {
- 'status': None,
- 'host': None,
- }
+ kwargs = {}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -117,8 +124,8 @@ class TestListMigration(TestServerMigration):
'migration_type': 'migration',
}
- self.servers_mock.get.assert_called_with('server1')
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.find_server.assert_called_with('server1')
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -133,18 +140,17 @@ class TestListMigrationV223(TestListMigration):
'Type', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
+ # These are the Migration object fields.
MIGRATION_FIELDS = [
'id', 'source_node', 'dest_node', 'source_compute', 'dest_compute',
- 'dest_host', 'status', 'instance_uuid', 'old_instance_type_id',
- 'new_instance_type_id', 'migration_type', 'created_at', 'updated_at'
+ 'dest_host', 'status', 'server_id', 'old_flavor_id',
+ 'new_flavor_id', 'migration_type', 'created_at', 'updated_at'
]
def setUp(self):
super().setUp()
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.23')
+ self._set_mock_microversion('2.23')
def test_server_migration_list(self):
arglist = [
@@ -159,10 +165,9 @@ class TestListMigrationV223(TestListMigration):
# Set expected values
kwargs = {
'status': 'migrating',
- 'host': None,
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -177,19 +182,18 @@ class TestListMigrationV259(TestListMigration):
'Old Flavor', 'New Flavor', 'Type', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
+ # These are the Migration object fields.
MIGRATION_FIELDS = [
'id', 'uuid', 'source_node', 'dest_node', 'source_compute',
- 'dest_compute', 'dest_host', 'status', 'instance_uuid',
- 'old_instance_type_id', 'new_instance_type_id', 'migration_type',
+ 'dest_compute', 'dest_host', 'status', 'server_id',
+ 'old_flavor_id', 'new_flavor_id', 'migration_type',
'created_at', 'updated_at'
]
def setUp(self):
super().setUp()
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
def test_server_migration_list(self):
arglist = [
@@ -211,19 +215,18 @@ class TestListMigrationV259(TestListMigration):
kwargs = {
'status': 'migrating',
'limit': 1,
+ 'paginated': False,
'marker': 'test_kp',
- 'host': None,
'changes_since': '2019-08-09T08:03:25Z',
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
def test_server_migration_list_with_limit_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
'--status', 'migrating',
'--limit', '1'
@@ -242,8 +245,7 @@ class TestListMigrationV259(TestListMigration):
str(ex))
def test_server_migration_list_with_marker_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
'--status', 'migrating',
'--marker', 'test_kp'
@@ -262,8 +264,7 @@ class TestListMigrationV259(TestListMigration):
str(ex))
def test_server_migration_list_with_changes_since_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
'--status', 'migrating',
'--changes-since', '2019-08-09T08:03:25Z'
@@ -291,19 +292,18 @@ class TestListMigrationV266(TestListMigration):
'Old Flavor', 'New Flavor', 'Type', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
+ # These are the Migration object fields.
MIGRATION_FIELDS = [
'id', 'uuid', 'source_node', 'dest_node', 'source_compute',
- 'dest_compute', 'dest_host', 'status', 'instance_uuid',
- 'old_instance_type_id', 'new_instance_type_id', 'migration_type',
+ 'dest_compute', 'dest_host', 'status', 'server_id',
+ 'old_flavor_id', 'new_flavor_id', 'migration_type',
'created_at', 'updated_at'
]
def setUp(self):
super().setUp()
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.66')
+ self._set_mock_microversion('2.66')
def test_server_migration_list_with_changes_before(self):
arglist = [
@@ -327,20 +327,19 @@ class TestListMigrationV266(TestListMigration):
kwargs = {
'status': 'migrating',
'limit': 1,
+ 'paginated': False,
'marker': 'test_kp',
- 'host': None,
'changes_since': '2019-08-07T08:03:25Z',
'changes_before': '2019-08-09T08:03:25Z',
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
def test_server_migration_list_with_changes_before_pre_v266(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.65')
+ self._set_mock_microversion('2.65')
arglist = [
'--status', 'migrating',
'--changes-before', '2019-08-09T08:03:25Z'
@@ -368,11 +367,11 @@ class TestListMigrationV280(TestListMigration):
'Old Flavor', 'New Flavor', 'Type', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
+ # These are the Migration object fields.
MIGRATION_FIELDS = [
'id', 'uuid', 'source_node', 'dest_node', 'source_compute',
- 'dest_compute', 'dest_host', 'status', 'instance_uuid',
- 'old_instance_type_id', 'new_instance_type_id', 'migration_type',
+ 'dest_compute', 'dest_host', 'status', 'server_id',
+ 'old_flavor_id', 'new_flavor_id', 'migration_type',
'created_at', 'updated_at'
]
@@ -391,8 +390,7 @@ class TestListMigrationV280(TestListMigration):
self.projects_mock.get.return_value = self.project
self.users_mock.get.return_value = self.user
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.80')
+ self._set_mock_microversion('2.80')
def test_server_migration_list_with_project(self):
arglist = [
@@ -418,14 +416,14 @@ class TestListMigrationV280(TestListMigration):
kwargs = {
'status': 'migrating',
'limit': 1,
+ 'paginated': False,
'marker': 'test_kp',
- 'host': None,
'project_id': self.project.id,
'changes_since': '2019-08-07T08:03:25Z',
'changes_before': "2019-08-09T08:03:25Z",
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.MIGRATION_COLUMNS.insert(
len(self.MIGRATION_COLUMNS) - 2, "Project")
@@ -439,8 +437,7 @@ class TestListMigrationV280(TestListMigration):
self.MIGRATION_FIELDS.remove('project_id')
def test_get_migrations_with_project_pre_v280(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.79')
+ self._set_mock_microversion('2.79')
arglist = [
'--status', 'migrating',
'--changes-before', '2019-08-09T08:03:25Z',
@@ -478,20 +475,21 @@ class TestListMigrationV280(TestListMigration):
('user', self.user.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'status': 'migrating',
'limit': 1,
+ 'paginated': False,
'marker': 'test_kp',
- 'host': None,
'user_id': self.user.id,
'changes_since': '2019-08-07T08:03:25Z',
'changes_before': "2019-08-09T08:03:25Z",
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.MIGRATION_COLUMNS.insert(
len(self.MIGRATION_COLUMNS) - 2, "User")
@@ -505,8 +503,7 @@ class TestListMigrationV280(TestListMigration):
self.MIGRATION_FIELDS.remove('user_id')
def test_get_migrations_with_user_pre_v280(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.79')
+ self._set_mock_microversion('2.79')
arglist = [
'--status', 'migrating',
'--changes-before', '2019-08-09T08:03:25Z',
@@ -550,14 +547,14 @@ class TestListMigrationV280(TestListMigration):
kwargs = {
'status': 'migrating',
'limit': 1,
- 'host': None,
+ 'paginated': False,
'project_id': self.project.id,
'user_id': self.user.id,
'changes_since': '2019-08-07T08:03:25Z',
'changes_before': "2019-08-09T08:03:25Z",
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.MIGRATION_COLUMNS.insert(
len(self.MIGRATION_COLUMNS) - 2, "Project")
@@ -576,8 +573,7 @@ class TestListMigrationV280(TestListMigration):
self.MIGRATION_FIELDS.remove('user_id')
def test_get_migrations_with_project_and_user_pre_v280(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.79')
+ self._set_mock_microversion('2.79')
arglist = [
'--status', 'migrating',
'--changes-before', '2019-08-09T08:03:25Z',
diff --git a/openstackclient/tests/unit/image/v2/fakes.py b/openstackclient/tests/unit/image/v2/fakes.py
index ded9ff31..8ddd9a09 100644
--- a/openstackclient/tests/unit/image/v2/fakes.py
+++ b/openstackclient/tests/unit/image/v2/fakes.py
@@ -239,7 +239,11 @@ def create_tasks(attrs=None, count=2):
class FakeMetadefNamespaceClient:
def __init__(self, **kwargs):
+ self.create_metadef_namespace = mock.Mock()
+ self.delete_metadef_namespace = mock.Mock()
self.metadef_namespaces = mock.Mock()
+ self.get_metadef_namespace = mock.Mock()
+ self.update_metadef_namespace = mock.Mock()
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
@@ -277,10 +281,11 @@ def create_one_metadef_namespace(attrs=None):
'display_name': 'Flavor Quota',
'namespace': 'OS::Compute::Quota',
'owner': 'admin',
- 'resource_type_associations': ['OS::Nova::Flavor'],
+ # 'resource_type_associations': ['OS::Nova::Flavor'],
+ # The part that receives the list type factor is not implemented.
'visibility': 'public',
}
# Overwrite default attributes if there are some attributes set
metadef_namespace_list.update(attrs)
- return metadef_namespace.MetadefNamespace(metadef_namespace_list)
+ return metadef_namespace.MetadefNamespace(**metadef_namespace_list)
diff --git a/openstackclient/tests/unit/image/v2/test_image.py b/openstackclient/tests/unit/image/v2/test_image.py
index 010c4a9d..019b4d9d 100644
--- a/openstackclient/tests/unit/image/v2/test_image.py
+++ b/openstackclient/tests/unit/image/v2/test_image.py
@@ -905,7 +905,10 @@ class TestImageList(TestImage):
marker=self._image.id,
)
- self.client.find_image.assert_called_with('graven')
+ self.client.find_image.assert_called_with(
+ 'graven',
+ ignore_missing=False,
+ )
def test_image_list_name_option(self):
arglist = [
@@ -1856,6 +1859,10 @@ class TestImageImport(TestImage):
self.client.import_image.assert_called_once_with(
self.image,
method='glance-direct',
+ uri=None,
+ remote_region=None,
+ remote_image=None,
+ remote_service_interface=None,
stores=None,
all_stores=None,
all_stores_must_succeed=False,
@@ -1880,7 +1887,10 @@ class TestImageImport(TestImage):
self.client.import_image.assert_called_once_with(
self.image,
method='web-download',
- # uri='https://example.com/',
+ uri='https://example.com/',
+ remote_region=None,
+ remote_image=None,
+ remote_service_interface=None,
stores=None,
all_stores=None,
all_stores_must_succeed=False,
@@ -1978,6 +1988,10 @@ class TestImageImport(TestImage):
self.client.import_image.assert_called_once_with(
self.image,
method='copy-image',
+ uri=None,
+ remote_region=None,
+ remote_image=None,
+ remote_service_interface=None,
stores=['fast'],
all_stores=None,
all_stores_must_succeed=False,
@@ -2005,9 +2019,10 @@ class TestImageImport(TestImage):
self.client.import_image.assert_called_once_with(
self.image,
method='glance-download',
- # remote_region='eu/dublin',
- # remote_image='remote-image-id',
- # remote_service_interface='private',
+ uri=None,
+ remote_region='eu/dublin',
+ remote_image='remote-image-id',
+ remote_service_interface='private',
stores=None,
all_stores=None,
all_stores_must_succeed=False,
diff --git a/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py b/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py
index 5eae289c..7ed11838 100644
--- a/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py
+++ b/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py
@@ -30,8 +30,89 @@ class TestMetadefNamespaces(md_namespace_fakes.TestMetadefNamespaces):
self.domain_mock.reset_mock()
-class TestMetadefNamespaceList(TestMetadefNamespaces):
+class TestMetadefNamespaceCreate(TestMetadefNamespaces):
+ _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace()
+
+ expected_columns = (
+ 'created_at',
+ 'description',
+ 'display_name',
+ 'id',
+ 'is_protected',
+ 'location',
+ 'name',
+ 'namespace',
+ 'owner',
+ 'resource_type_associations',
+ 'updated_at',
+ 'visibility'
+ )
+ expected_data = (
+ _metadef_namespace.created_at,
+ _metadef_namespace.description,
+ _metadef_namespace.display_name,
+ _metadef_namespace.id,
+ _metadef_namespace.is_protected,
+ _metadef_namespace.location,
+ _metadef_namespace.name,
+ _metadef_namespace.namespace,
+ _metadef_namespace.owner,
+ _metadef_namespace.resource_type_associations,
+ _metadef_namespace.updated_at,
+ _metadef_namespace.visibility
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.create_metadef_namespace.return_value \
+ = self._metadef_namespace
+ self.cmd = metadef_namespaces.CreateMetadefNameSpace(self.app, None)
+ self.datalist = self._metadef_namespace
+
+ def test_namespace_create(self):
+ arglist = [
+ self._metadef_namespace.namespace
+ ]
+
+ verifylist = [
+
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(self.expected_columns, columns)
+ self.assertEqual(self.expected_data, data)
+
+
+class TestMetadefNamespaceDelete(TestMetadefNamespaces):
+ _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace()
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.delete_metadef_namespace.return_value \
+ = self._metadef_namespace
+ self.cmd = metadef_namespaces.DeleteMetadefNameSpace(self.app, None)
+ self.datalist = self._metadef_namespace
+
+ def test_namespace_create(self):
+ arglist = [
+ self._metadef_namespace.namespace
+ ]
+
+ verifylist = [
+
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+
+class TestMetadefNamespaceList(TestMetadefNamespaces):
_metadef_namespace = [md_namespace_fakes.create_one_metadef_namespace()]
columns = [
@@ -65,3 +146,70 @@ class TestMetadefNamespaceList(TestMetadefNamespaces):
self.assertEqual(self.columns, columns)
self.assertEqual(getattr(self.datalist[0], 'namespace'),
next(data)[0])
+
+
+class TestMetadefNamespaceSet(TestMetadefNamespaces):
+ _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace()
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.update_metadef_namespace.return_value \
+ = self._metadef_namespace
+ self.cmd = metadef_namespaces.SetMetadefNameSpace(self.app, None)
+ self.datalist = self._metadef_namespace
+
+ def test_namespace_set_no_options(self):
+ arglist = [
+ self._metadef_namespace.namespace
+ ]
+ verifylist = [
+ ('namespace', self._metadef_namespace.namespace),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+
+
+class TestMetadefNamespaceShow(TestMetadefNamespaces):
+ _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace()
+
+ expected_columns = (
+ 'created_at',
+ 'display_name',
+ 'namespace',
+ 'owner',
+ 'visibility'
+ )
+ expected_data = (
+ _metadef_namespace.created_at,
+ _metadef_namespace.display_name,
+ _metadef_namespace.namespace,
+ _metadef_namespace.owner,
+ _metadef_namespace.visibility
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.get_metadef_namespace.return_value \
+ = self._metadef_namespace
+ self.cmd = metadef_namespaces.ShowMetadefNameSpace(self.app, None)
+
+ def test_namespace_show_no_options(self):
+ arglist = [
+ self._metadef_namespace.namespace
+ ]
+
+ verifylist = [
+
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(self.expected_columns, columns)
+ self.assertEqual(self.expected_data, data)
diff --git a/openstackclient/tests/unit/network/v2/fakes.py b/openstackclient/tests/unit/network/v2/fakes.py
index 4d029a0e..5d68d95d 100644
--- a/openstackclient/tests/unit/network/v2/fakes.py
+++ b/openstackclient/tests/unit/network/v2/fakes.py
@@ -34,6 +34,7 @@ from openstack.network.v2 import port as _port
from openstack.network.v2 import rbac_policy as network_rbac
from openstack.network.v2 import segment as _segment
from openstack.network.v2 import service_profile as _flavor_profile
+from openstack.network.v2 import trunk as _trunk
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3
@@ -2152,3 +2153,71 @@ def get_ndp_proxies(ndp_proxies=None, count=2):
create_ndp_proxies(count)
)
return mock.Mock(side_effect=ndp_proxies)
+
+
+def create_one_trunk(attrs=None):
+ """Create a fake trunk.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with name, id, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ trunk_attrs = {
+ 'id': 'trunk-id-' + uuid.uuid4().hex,
+ 'name': 'trunk-name-' + uuid.uuid4().hex,
+ 'description': '',
+ 'port_id': 'port-' + uuid.uuid4().hex,
+ 'admin_state_up': True,
+ 'project_id': 'project-id-' + uuid.uuid4().hex,
+ 'status': 'ACTIVE',
+ 'sub_ports': [{'port_id': 'subport-' +
+ uuid.uuid4().hex,
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 100}],
+ }
+ # Overwrite default attributes.
+ trunk_attrs.update(attrs)
+
+ trunk = _trunk.Trunk(**trunk_attrs)
+
+ return trunk
+
+
+def create_trunks(attrs=None, count=2):
+ """Create multiple fake trunks.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of trunks to fake
+ :return:
+ A list of FakeResource objects faking the trunks
+ """
+ trunks = []
+ for i in range(0, count):
+ trunks.append(create_one_trunk(attrs))
+
+ return trunks
+
+
+def get_trunks(trunks=None, count=2):
+ """Get an iterable Mock object with a list of faked trunks.
+
+ If trunk list is provided, then initialize the Mock object
+ with the list. Otherwise create one.
+
+ :param List trunks:
+ A list of FakeResource objects faking trunks
+ :param int count:
+ The number of trunks to fake
+ :return:
+ An iterable Mock object with side_effect set to a list of faked
+ trunks
+ """
+ if trunks is None:
+ trunks = create_trunks(count)
+ return mock.Mock(side_effect=trunks)
diff --git a/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py b/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py
index 08a83fab..3aae822e 100644
--- a/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py
+++ b/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py
@@ -115,3 +115,37 @@ class TestListNetworkQosRuleType(TestNetworkQosRuleType):
self.network.qos_rule_types.assert_called_once_with(**{})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
+
+ def test_qos_rule_type_list_all_supported(self):
+ arglist = [
+ '--all-supported'
+ ]
+ verifylist = [
+ ('all_supported', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.qos_rule_types.assert_called_once_with(
+ **{'all_supported': True}
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
+ def test_qos_rule_type_list_all_rules(self):
+ arglist = [
+ '--all-rules'
+ ]
+ verifylist = [
+ ('all_rules', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.qos_rule_types.assert_called_once_with(
+ **{'all_rules': True}
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
diff --git a/openstackclient/tests/unit/network/v2/test_network_trunk.py b/openstackclient/tests/unit/network/v2/test_network_trunk.py
new file mode 100644
index 00000000..fae70fb0
--- /dev/null
+++ b/openstackclient/tests/unit/network/v2/test_network_trunk.py
@@ -0,0 +1,851 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import argparse
+import copy
+from unittest import mock
+from unittest.mock import call
+
+from osc_lib.cli import format_columns
+from osc_lib import exceptions
+import testtools
+
+from openstackclient.network.v2 import network_trunk
+from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3
+from openstackclient.tests.unit.network.v2 import fakes as network_fakes
+from openstackclient.tests.unit import utils as tests_utils
+
+
+# Tests for Neutron trunks
+#
+class TestNetworkTrunk(network_fakes.TestNetworkV2):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the network client
+ self.network = self.app.client_manager.network
+ # Get a shortcut to the ProjectManager Mock
+ self.projects_mock = self.app.client_manager.identity.projects
+ # Get a shortcut to the DomainManager Mock
+ self.domains_mock = self.app.client_manager.identity.domains
+
+
+class TestCreateNetworkTrunk(TestNetworkTrunk):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+
+ new_trunk = network_fakes.create_one_trunk(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+
+ columns = (
+ 'description',
+ 'id',
+ 'is_admin_state_up',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ 'tags'
+ )
+ data = (
+ new_trunk.description,
+ new_trunk.id,
+ new_trunk.is_admin_state_up,
+ new_trunk.name,
+ new_trunk.port_id,
+ new_trunk.project_id,
+ new_trunk.status,
+ format_columns.ListDictColumn(new_trunk.sub_ports),
+ [],
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.network.create_trunk = mock.Mock(return_value=self.new_trunk)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.parent_port, self.sub_port])
+
+ # Get the command object to test
+ self.cmd = network_trunk.CreateNetworkTrunk(self.app, self.namespace)
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ def test_create_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_create_default_options(self):
+ arglist = [
+ "--parent-port", self.new_trunk['port_id'],
+ self.new_trunk['name'],
+ ]
+ verifylist = [
+ ('parent_port', self.new_trunk['port_id']),
+ ('name', self.new_trunk['name']),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_trunk.assert_called_once_with(**{
+ 'name': self.new_trunk['name'],
+ 'admin_state_up': self.new_trunk['admin_state_up'],
+ 'port_id': self.new_trunk['port_id'],
+ })
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+ def test_create_full_options(self):
+ self.new_trunk['description'] = 'foo description'
+ subport = self.new_trunk.sub_ports[0]
+ arglist = [
+ "--disable",
+ "--description", self.new_trunk.description,
+ "--parent-port", self.new_trunk.port_id,
+ "--subport", 'port=%(port)s,segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type'],
+ 'port': subport['port_id']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('description', self.new_trunk.description),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'port': subport['port_id'],
+ 'segmentation-id': str(subport['segmentation_id']),
+ 'segmentation-type': subport['segmentation_type']}]),
+ ('disable', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_trunk.assert_called_once_with(**{
+ 'name': self.new_trunk.name,
+ 'description': self.new_trunk.description,
+ 'admin_state_up': False,
+ 'port_id': self.new_trunk.port_id,
+ 'sub_ports': [subport],
+ })
+ self.assertEqual(self.columns, columns)
+ data_with_desc = list(self.data)
+ data_with_desc[0] = self.new_trunk['description']
+ data_with_desc = tuple(data_with_desc)
+ self.assertEqual(data_with_desc, data)
+
+ def test_create_trunk_with_subport_invalid_segmentation_id_fail(self):
+ subport = self.new_trunk.sub_ports[0]
+ arglist = [
+ "--parent-port", self.new_trunk.port_id,
+ "--subport", "port=%(port)s,segmentation-type=%(seg_type)s,"
+ "segmentation-id=boom" % {
+ 'seg_type': subport['segmentation_type'],
+ 'port': subport['port_id']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'port': subport['port_id'],
+ 'segmentation-id': 'boom',
+ 'segmentation-type': subport['segmentation_type']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual("Segmentation-id 'boom' is not an integer",
+ str(e))
+
+ def test_create_network_trunk_subports_without_optional_keys(self):
+ subport = copy.copy(self.new_trunk.sub_ports[0])
+ # Pop out the segmentation-id and segmentation-type
+ subport.pop('segmentation_type')
+ subport.pop('segmentation_id')
+ arglist = [
+ '--parent-port', self.new_trunk.port_id,
+ '--subport', 'port=%(port)s' % {'port': subport['port_id']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'port': subport['port_id']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_trunk.assert_called_once_with(**{
+ 'name': self.new_trunk.name,
+ 'admin_state_up': True,
+ 'port_id': self.new_trunk.port_id,
+ 'sub_ports': [subport],
+ })
+ self.assertEqual(self.columns, columns)
+ data_with_desc = list(self.data)
+ data_with_desc[0] = self.new_trunk['description']
+ data_with_desc = tuple(data_with_desc)
+ self.assertEqual(data_with_desc, data)
+
+ def test_create_network_trunk_subports_without_required_key_fail(self):
+ subport = self.new_trunk.sub_ports[0]
+ arglist = [
+ '--parent-port', self.new_trunk.port_id,
+ '--subport', 'segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'segmentation_id': str(subport['segmentation_id']),
+ 'segmentation_type': subport['segmentation_type']}]),
+ ]
+
+ with testtools.ExpectedException(argparse.ArgumentTypeError):
+ self.check_parser(self.cmd, arglist, verifylist)
+
+
+class TestDeleteNetworkTrunk(TestNetworkTrunk):
+ # The trunk to be deleted.
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+
+ new_trunks = network_fakes.create_trunks(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+
+ def setUp(self):
+ super().setUp()
+ self.network.find_trunk = mock.Mock(
+ side_effect=[self.new_trunks[0], self.new_trunks[1]])
+ self.network.delete_trunk = mock.Mock(return_value=None)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.parent_port, self.sub_port])
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.DeleteNetworkTrunk(self.app, self.namespace)
+
+ def test_delete_trunkx(self):
+ arglist = [
+ self.new_trunks[0].name,
+ ]
+ verifylist = [
+ ('trunk', [self.new_trunks[0].name]),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.network.delete_trunk.assert_called_once_with(
+ self.new_trunks[0].id)
+ self.assertIsNone(result)
+
+ def test_delete_trunk_multiple(self):
+ arglist = []
+ verifylist = []
+
+ for t in self.new_trunks:
+ arglist.append(t['name'])
+ verifylist = [
+ ('trunk', arglist),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ calls = []
+ for t in self.new_trunks:
+ calls.append(call(t.id))
+ self.network.delete_trunk.assert_has_calls(calls)
+ self.assertIsNone(result)
+
+ def test_delete_trunk_multiple_with_exception(self):
+ arglist = [
+ self.new_trunks[0].name,
+ 'unexist_trunk',
+ ]
+ verifylist = [
+ ('trunk',
+ [self.new_trunks[0].name, 'unexist_trunk']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.find_trunk = mock.Mock(
+ side_effect=[self.new_trunks[0], exceptions.CommandError])
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual('1 of 2 trunks failed to delete.', str(e))
+ self.network.delete_trunk.assert_called_once_with(
+ self.new_trunks[0].id
+ )
+
+
+class TestShowNetworkTrunk(TestNetworkTrunk):
+
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ # The trunk to set.
+ new_trunk = network_fakes.create_one_trunk()
+ columns = (
+ 'description',
+ 'id',
+ 'is_admin_state_up',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ 'tags'
+ )
+ data = (
+ new_trunk.description,
+ new_trunk.id,
+ new_trunk.is_admin_state_up,
+ new_trunk.name,
+ new_trunk.port_id,
+ new_trunk.project_id,
+ new_trunk.status,
+ format_columns.ListDictColumn(new_trunk.sub_ports),
+ [],
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.network.find_trunk = mock.Mock(return_value=self.new_trunk)
+ self.network.get_trunk = mock.Mock(return_value=self.new_trunk)
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.ShowNetworkTrunk(self.app, self.namespace)
+
+ def test_show_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_show_all_options(self):
+ arglist = [
+ self.new_trunk.id,
+ ]
+ verifylist = [
+ ('trunk', self.new_trunk.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.get_trunk.assert_called_once_with(self.new_trunk.id)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+
+class TestListNetworkTrunk(TestNetworkTrunk):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ # Create trunks to be listed.
+ new_trunks = network_fakes.create_trunks(
+ {'created_at': '2001-01-01 00:00:00',
+ 'updated_at': '2001-01-01 00:00:00'}, count=3)
+
+ columns = (
+ 'ID',
+ 'Name',
+ 'Parent Port',
+ 'Description'
+ )
+ columns_long = columns + (
+ 'Status',
+ 'State',
+ 'Created At',
+ 'Updated At'
+ )
+ data = []
+ for t in new_trunks:
+ data.append((
+ t['id'],
+ t['name'],
+ t['port_id'],
+ t['description']
+ ))
+ data_long = []
+ for t in new_trunks:
+ data_long.append((
+ t['id'],
+ t['name'],
+ t['port_id'],
+ t['description'],
+ t['status'],
+ network_trunk.AdminStateColumn(''),
+ '2001-01-01 00:00:00',
+ '2001-01-01 00:00:00',
+ ))
+
+ def setUp(self):
+ super().setUp()
+ self.network.trunks = mock.Mock(return_value=self.new_trunks)
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.ListNetworkTrunk(self.app, self.namespace)
+
+ def test_trunk_list_no_option(self):
+ arglist = []
+ verifylist = []
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.trunks.assert_called_once_with()
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
+ def test_trunk_list_long(self):
+ arglist = [
+ '--long',
+ ]
+ verifylist = [
+ ('long', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.trunks.assert_called_once_with()
+ self.assertEqual(self.columns_long, columns)
+ self.assertEqual(self.data_long, list(data))
+
+
+class TestSetNetworkTrunk(TestNetworkTrunk):
+
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+ # Create trunks to be listed.
+ _trunk = network_fakes.create_one_trunk(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+ columns = (
+ 'admin_state_up',
+ 'id',
+ 'name',
+ 'description',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ )
+ data = (
+ _trunk.id,
+ _trunk.name,
+ _trunk.description,
+ _trunk.port_id,
+ _trunk.project_id,
+ _trunk.status,
+ format_columns.ListDictColumn(_trunk.sub_ports),
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.network.update_trunk = mock.Mock(return_value=self._trunk)
+ self.network.add_trunk_subports = mock.Mock(return_value=self._trunk)
+ self.network.find_trunk = mock.Mock(return_value=self._trunk)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.sub_port, self.sub_port])
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.SetNetworkTrunk(self.app, self.namespace)
+
+ def _test_set_network_trunk_attr(self, attr, value):
+ arglist = [
+ '--%s' % attr, value,
+ self._trunk[attr],
+ ]
+ verifylist = [
+ (attr, value),
+ ('trunk', self._trunk[attr]),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ attr: value,
+ }
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_name(self):
+ self._test_set_network_trunk_attr('name', 'trunky')
+
+ def test_set_network_trunk_description(self):
+ self._test_set_network_trunk_attr('description', 'description')
+
+ def test_set_network_trunk_admin_state_up_disable(self):
+ arglist = [
+ '--disable',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('disable', True),
+ ('trunk', self._trunk['name']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ 'admin_state_up': False,
+ }
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_admin_state_up_enable(self):
+ arglist = [
+ '--enable',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('enable', True),
+ ('trunk', self._trunk['name']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ 'admin_state_up': True,
+ }
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_nothing(self):
+ arglist = [self._trunk['name'], ]
+ verifylist = [('trunk', self._trunk['name']), ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {}
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_subports(self):
+ subport = self._trunk['sub_ports'][0]
+ arglist = [
+ '--subport', 'port=%(port)s,segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type'],
+ 'port': subport['port_id']},
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{
+ 'port': subport['port_id'],
+ 'segmentation-id': str(subport['segmentation_id']),
+ 'segmentation-type': subport['segmentation_type']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.add_trunk_subports.assert_called_once_with(
+ self._trunk, [subport])
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_subports_without_optional_keys(self):
+ subport = copy.copy(self._trunk['sub_ports'][0])
+ # Pop out the segmentation-id and segmentation-type
+ subport.pop('segmentation_type')
+ subport.pop('segmentation_id')
+ arglist = [
+ '--subport', 'port=%(port)s' % {'port': subport['port_id']},
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{
+ 'port': subport['port_id']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.add_trunk_subports.assert_called_once_with(
+ self._trunk, [subport])
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_subports_without_required_key_fail(self):
+ subport = self._trunk['sub_ports'][0]
+ arglist = [
+ '--subport', 'segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type']},
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{
+ 'segmentation-id': str(subport['segmentation_id']),
+ 'segmentation-type': subport['segmentation_type']}]),
+ ]
+
+ with testtools.ExpectedException(argparse.ArgumentTypeError):
+ self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.add_trunk_subports.assert_not_called()
+
+ def test_set_trunk_attrs_with_exception(self):
+ arglist = [
+ '--name', 'reallylongname',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('name', 'reallylongname'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.update_trunk = (
+ mock.Mock(side_effect=exceptions.CommandError)
+ )
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual(
+ "Failed to set trunk '%s': " % self._trunk['name'],
+ str(e))
+ attrs = {'name': 'reallylongname'}
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.network.add_trunk_subports.assert_not_called()
+
+ def test_set_trunk_add_subport_with_exception(self):
+ arglist = [
+ '--subport', 'port=invalid_subport',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{'port': 'invalid_subport'}]),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.add_trunk_subports = (
+ mock.Mock(side_effect=exceptions.CommandError)
+ )
+ self.network.find_port = (mock.Mock(
+ return_value={'id': 'invalid_subport'}))
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual(
+ "Failed to add subports to trunk '%s': " % self._trunk['name'],
+ str(e))
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk)
+ self.network.add_trunk_subports.assert_called_once_with(
+ self._trunk, [{'port_id': 'invalid_subport'}])
+
+
+class TestListNetworkSubport(TestNetworkTrunk):
+
+ _trunk = network_fakes.create_one_trunk()
+ _subports = _trunk['sub_ports']
+
+ columns = (
+ 'Port',
+ 'Segmentation Type',
+ 'Segmentation ID',
+ )
+ data = []
+ for s in _subports:
+ data.append((
+ s['port_id'],
+ s['segmentation_type'],
+ s['segmentation_id'],
+ ))
+
+ def setUp(self):
+ super().setUp()
+
+ self.network.find_trunk = mock.Mock(return_value=self._trunk)
+ self.network.get_trunk_subports = mock.Mock(
+ return_value={network_trunk.SUB_PORTS: self._subports})
+
+ # Get the command object to test
+ self.cmd = network_trunk.ListNetworkSubport(self.app, self.namespace)
+
+ def test_subport_list(self):
+ arglist = [
+ '--trunk', self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.get_trunk_subports.assert_called_once_with(self._trunk)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
+
+class TestUnsetNetworkTrunk(TestNetworkTrunk):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+ _trunk = network_fakes.create_one_trunk(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+
+ columns = (
+ 'admin_state_up',
+ 'id',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ )
+ data = (
+ network_trunk.AdminStateColumn(_trunk['admin_state_up']),
+ _trunk['id'],
+ _trunk['name'],
+ _trunk['port_id'],
+ _trunk['project_id'],
+ _trunk['status'],
+ format_columns.ListDictColumn(_trunk['sub_ports']),
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.network.find_trunk = mock.Mock(return_value=self._trunk)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.sub_port, self.sub_port])
+ self.network.delete_trunk_subports = mock.Mock(return_value=None)
+
+ # Get the command object to test
+ self.cmd = network_trunk.UnsetNetworkTrunk(self.app, self.namespace)
+
+ def test_unset_network_trunk_subport(self):
+ subport = self._trunk['sub_ports'][0]
+ arglist = [
+ "--subport", subport['port_id'],
+ self._trunk['name'],
+ ]
+
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('unset_subports', [subport['port_id']]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.delete_trunk_subports.assert_called_once_with(
+ self._trunk,
+ [{'port_id': subport['port_id']}]
+ )
+ self.assertIsNone(result)
+
+ def test_unset_subport_no_arguments_fail(self):
+ arglist = [
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ]
+ self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd, arglist, verifylist)
diff --git a/openstackclient/tests/unit/volume/v3/fakes.py b/openstackclient/tests/unit/volume/v3/fakes.py
index 3e3a05fa..62383580 100644
--- a/openstackclient/tests/unit/volume/v3/fakes.py
+++ b/openstackclient/tests/unit/volume/v3/fakes.py
@@ -47,6 +47,10 @@ class FakeVolumeClient:
self.volumes.resource_class = fakes.FakeResource(None, {})
self.volume_types = mock.Mock()
self.volume_types.resource_class = fakes.FakeResource(None, {})
+ self.services = mock.Mock()
+ self.services.resource_class = fakes.FakeResource(None, {})
+ self.workers = mock.Mock()
+ self.workers.resource_class = fakes.FakeResource(None, {})
class TestVolume(utils.TestCommand):
@@ -436,3 +440,88 @@ def get_volume_attachments(attachments=None, count=2):
attachments = create_volume_attachments(count)
return mock.Mock(side_effect=attachments)
+
+
+def create_service_log_level_entry(attrs=None):
+ service_log_level_info = {
+ 'host': 'host_test',
+ 'binary': 'cinder-api',
+ 'prefix': 'cinder.api.common',
+ 'level': 'DEBUG',
+ }
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
+
+ service_log_level_info.update(attrs)
+
+ service_log_level = fakes.FakeResource(
+ None, service_log_level_info, loaded=True)
+ return service_log_level
+
+
+def create_cleanup_records():
+ """Create fake service cleanup records.
+
+ :return: A list of FakeResource objects
+ """
+ cleaning_records = []
+ unavailable_records = []
+ cleaning_work_info = {
+ 'id': 1,
+ 'host': 'devstack@fakedriver-1',
+ 'binary': 'cinder-volume',
+ 'cluster_name': 'fake_cluster',
+ }
+ unavailable_work_info = {
+ 'id': 2,
+ 'host': 'devstack@fakedriver-2',
+ 'binary': 'cinder-scheduler',
+ 'cluster_name': 'new_cluster',
+ }
+ cleaning_records.append(cleaning_work_info)
+ unavailable_records.append(unavailable_work_info)
+
+ cleaning = [fakes.FakeResource(
+ None, obj, loaded=True) for obj in cleaning_records]
+ unavailable = [fakes.FakeResource(
+ None, obj, loaded=True) for obj in unavailable_records]
+
+ return cleaning, unavailable
+
+
+def create_one_manage_record(attrs=None, snapshot=False):
+ manage_dict = {
+ 'reference': {'source-name': 'fake-volume'},
+ 'size': '1',
+ 'safe_to_manage': False,
+ 'reason_not_safe': 'already managed',
+ 'cinder_id': 'fake-volume',
+ 'extra_info': None,
+ }
+ if snapshot:
+ manage_dict['source_reference'] = {'source-name': 'fake-source'}
+
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
+
+ manage_dict.update(attrs)
+ manage_record = fakes.FakeResource(None, manage_dict, loaded=True)
+ return manage_record
+
+
+def create_volume_manage_list_records(count=2):
+ volume_manage_list = []
+ for i in range(count):
+ volume_manage_list.append(
+ create_one_manage_record({'size': str(i + 1)}))
+
+ return volume_manage_list
+
+
+def create_snapshot_manage_list_records(count=2):
+ snapshot_manage_list = []
+ for i in range(count):
+ snapshot_manage_list.append(
+ create_one_manage_record({'size': str(i + 1)}, snapshot=True))
+
+ return snapshot_manage_list
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py b/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py
new file mode 100644
index 00000000..b48ce2f9
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py
@@ -0,0 +1,178 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from cinderclient import api_versions
+from osc_lib import exceptions
+
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_cleanup
+
+
+class TestBlockStorage(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the BlockStorageWorkerManager Mock
+ self.worker_mock = self.app.client_manager.volume.workers
+ self.worker_mock.reset_mock()
+
+
+class TestBlockStorageCleanup(TestBlockStorage):
+
+ cleaning, unavailable = volume_fakes.create_cleanup_records()
+
+ def setUp(self):
+ super().setUp()
+
+ self.worker_mock.clean.return_value = (self.cleaning, self.unavailable)
+
+ # Get the command object to test
+ self.cmd = \
+ block_storage_cleanup.BlockStorageCleanup(self.app, None)
+
+ def test_cleanup(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.24')
+
+ arglist = [
+ ]
+ verifylist = [
+ ('cluster', None),
+ ('host', None),
+ ('binary', None),
+ ('is_up', None),
+ ('disabled', None),
+ ('resource_id', None),
+ ('resource_type', None),
+ ('service_id', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = ('ID', 'Cluster Name', 'Host', 'Binary', 'Status')
+ cleaning_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Cleaning'
+ ) for obj in self.cleaning
+ )
+ unavailable_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Unavailable'
+ ) for obj in self.unavailable
+ )
+ expected_data = cleaning_data + unavailable_data
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, tuple(data))
+
+ # checking if proper call was made to cleanup resources
+ # Since we ignore all parameters with None value, we don't
+ # have any arguments passed to the API
+ self.worker_mock.clean.assert_called_once_with()
+
+ def test_block_storage_cleanup_pre_324(self):
+ arglist = [
+ ]
+ verifylist = [
+ ('cluster', None),
+ ('host', None),
+ ('binary', None),
+ ('is_up', None),
+ ('disabled', None),
+ ('resource_id', None),
+ ('resource_type', None),
+ ('service_id', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.24 or greater is required', str(exc))
+
+ def test_cleanup_with_args(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.24')
+
+ fake_cluster = 'fake-cluster'
+ fake_host = 'fake-host'
+ fake_binary = 'fake-service'
+ fake_resource_id = str(uuid.uuid4())
+ fake_resource_type = 'Volume'
+ fake_service_id = 1
+ arglist = [
+ '--cluster', fake_cluster,
+ '--host', fake_host,
+ '--binary', fake_binary,
+ '--down',
+ '--enabled',
+ '--resource-id', fake_resource_id,
+ '--resource-type', fake_resource_type,
+ '--service-id', str(fake_service_id),
+ ]
+ verifylist = [
+ ('cluster', fake_cluster),
+ ('host', fake_host),
+ ('binary', fake_binary),
+ ('is_up', False),
+ ('disabled', False),
+ ('resource_id', fake_resource_id),
+ ('resource_type', fake_resource_type),
+ ('service_id', fake_service_id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = ('ID', 'Cluster Name', 'Host', 'Binary', 'Status')
+ cleaning_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Cleaning'
+ ) for obj in self.cleaning
+ )
+ unavailable_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Unavailable'
+ ) for obj in self.unavailable
+ )
+ expected_data = cleaning_data + unavailable_data
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, tuple(data))
+
+ # checking if proper call was made to cleanup resources
+ self.worker_mock.clean.assert_called_once_with(
+ cluster_name=fake_cluster,
+ host=fake_host,
+ binary=fake_binary,
+ is_up=False,
+ disabled=False,
+ resource_id=fake_resource_id,
+ resource_type=fake_resource_type,
+ service_id=fake_service_id)
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py b/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py
new file mode 100644
index 00000000..35ea6274
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py
@@ -0,0 +1,233 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from cinderclient import api_versions
+import ddt
+from osc_lib import exceptions
+
+from openstackclient.tests.unit import utils as tests_utils
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_log_level as service
+
+
+class TestService(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the ServiceManager Mock
+ self.service_mock = self.app.client_manager.volume.services
+ self.service_mock.reset_mock()
+
+
+class TestBlockStorageLogLevelList(TestService):
+
+ service_log = volume_fakes.create_service_log_level_entry()
+
+ def setUp(self):
+ super().setUp()
+
+ self.service_mock.get_log_levels.return_value = [self.service_log]
+
+ # Get the command object to test
+ self.cmd = service.BlockStorageLogLevelList(self.app, None)
+
+ def test_block_storage_log_level_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ '--host', self.service_log.host,
+ '--service', self.service_log.binary,
+ '--log-prefix', self.service_log.prefix,
+ ]
+ verifylist = [
+ ('host', self.service_log.host),
+ ('service', self.service_log.binary),
+ ('log_prefix', self.service_log.prefix),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'Binary',
+ 'Host',
+ 'Prefix',
+ 'Level',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = ((
+ self.service_log.binary,
+ self.service_log.host,
+ self.service_log.prefix,
+ self.service_log.level,
+ ), )
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get log level of services
+ self.service_mock.get_log_levels.assert_called_with(
+ server=self.service_log.host,
+ binary=self.service_log.binary,
+ prefix=self.service_log.prefix,
+ )
+
+ def test_block_storage_log_level_list_pre_332(self):
+ arglist = [
+ '--host', self.service_log.host,
+ '--service', 'cinder-api',
+ '--log-prefix', 'cinder_test.api.common',
+ ]
+ verifylist = [
+ ('host', self.service_log.host),
+ ('service', 'cinder-api'),
+ ('log_prefix', 'cinder_test.api.common'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.32 or greater is required', str(exc))
+
+ def test_block_storage_log_level_list_invalid_service_name(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ '--host', self.service_log.host,
+ '--service', 'nova-api',
+ '--log-prefix', 'cinder_test.api.common',
+ ]
+ verifylist = [
+ ('host', self.service_log.host),
+ ('service', 'nova-api'),
+ ('log_prefix', 'cinder_test.api.common'),
+ ]
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+
+@ddt.ddt
+class TestBlockStorageLogLevelSet(TestService):
+
+ service_log = volume_fakes.create_service_log_level_entry()
+
+ def setUp(self):
+ super().setUp()
+
+ # Get the command object to test
+ self.cmd = service.BlockStorageLogLevelSet(self.app, None)
+
+ def test_block_storage_log_level_set(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ 'ERROR',
+ '--host', self.service_log.host,
+ '--service', self.service_log.binary,
+ '--log-prefix', self.service_log.prefix,
+ ]
+ verifylist = [
+ ('level', 'ERROR'),
+ ('host', self.service_log.host),
+ ('service', self.service_log.binary),
+ ('log_prefix', self.service_log.prefix),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # checking if proper call was made to set log level of services
+ self.service_mock.set_log_levels.assert_called_with(
+ level='ERROR',
+ server=self.service_log.host,
+ binary=self.service_log.binary,
+ prefix=self.service_log.prefix,
+ )
+
+ def test_block_storage_log_level_set_pre_332(self):
+ arglist = [
+ 'ERROR',
+ '--host', self.service_log.host,
+ '--service', 'cinder-api',
+ '--log-prefix', 'cinder_test.api.common',
+ ]
+ verifylist = [
+ ('level', 'ERROR'),
+ ('host', self.service_log.host),
+ ('service', 'cinder-api'),
+ ('log_prefix', 'cinder_test.api.common'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.32 or greater is required', str(exc))
+
+ def test_block_storage_log_level_set_invalid_service_name(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ 'ERROR',
+ '--host', self.service_log.host,
+ '--service', 'nova-api',
+ '--log-prefix', 'cinder.api.common',
+ ]
+ verifylist = [
+ ('level', 'ERROR'),
+ ('host', self.service_log.host),
+ ('service', 'nova-api'),
+ ('log_prefix', 'cinder.api.common'),
+ ]
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ @ddt.data('WARNING', 'info', 'Error', 'debuG', 'fake-log-level')
+ def test_block_storage_log_level_set_log_level(self, log_level):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ log_level,
+ '--host', self.service_log.host,
+ '--service', 'cinder-api',
+ '--log-prefix', 'cinder.api.common',
+ ]
+ verifylist = [
+ ('level', log_level.upper()),
+ ('host', self.service_log.host),
+ ('service', 'cinder-api'),
+ ('log_prefix', 'cinder.api.common'),
+ ]
+
+ if log_level == 'fake-log-level':
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+ else:
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # checking if proper call was made to set log level of services
+ self.service_mock.set_log_levels.assert_called_with(
+ level=log_level.upper(),
+ server=self.service_log.host,
+ binary=self.service_log.binary,
+ prefix=self.service_log.prefix)
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py b/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py
new file mode 100644
index 00000000..afd0fd35
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py
@@ -0,0 +1,411 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from cinderclient import api_versions
+from osc_lib import exceptions
+
+from openstackclient.tests.unit import utils as tests_utils
+from openstackclient.tests.unit.volume.v2 import fakes as v2_volume_fakes
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_manage
+
+
+class TestBlockStorageManage(v2_volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ self.volumes_mock.reset_mock()
+ self.snapshots_mock = self.app.client_manager.volume.volume_snapshots
+ self.snapshots_mock.reset_mock()
+
+
+class TestBlockStorageVolumeManage(TestBlockStorageManage):
+
+ volume_manage_list = volume_fakes.create_volume_manage_list_records()
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock.list_manageable.return_value = (
+ self.volume_manage_list)
+
+ # Get the command object to test
+ self.cmd = block_storage_manage.BlockStorageManageVolumes(
+ self.app, None)
+
+ def test_block_storage_volume_manage_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for volume_record in self.volume_manage_list:
+ manage_details = (
+ volume_record.reference,
+ volume_record.size,
+ volume_record.safe_to_manage,
+ volume_record.reason_not_safe,
+ volume_record.cinder_id,
+ volume_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get volume manageable list
+ self.volumes_mock.list_manageable.assert_called_with(
+ host=parsed_args.host,
+ detailed=parsed_args.detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=parsed_args.cluster,
+ )
+
+ def test_block_storage_volume_manage_pre_38(self):
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.8 or greater is required', str(exc))
+
+ def test_block_storage_volume_manage_pre_317(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.16')
+ cluster = 'fake_cluster'
+ arglist = [
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('cluster', cluster),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.17 or greater is required', str(exc))
+ self.assertIn('--cluster', str(exc))
+
+ def test_block_storage_volume_manage_host_and_cluster(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.17')
+ host = 'fake_host'
+ cluster = 'fake_cluster'
+ arglist = [
+ host,
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('host', host),
+ ('cluster', cluster),
+ ]
+ exc = self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd,
+ arglist, verifylist)
+ self.assertIn(
+ 'argument --cluster: not allowed with argument <host>', str(exc))
+
+ def test_block_storage_volume_manage_list_all_args(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ detailed = True
+ marker = 'fake_marker'
+ limit = '5'
+ offset = '3'
+ sort = 'size:asc'
+ arglist = [
+ host,
+ '--detailed', str(detailed),
+ '--marker', marker,
+ '--limit', limit,
+ '--offset', offset,
+ '--sort', sort,
+ ]
+ verifylist = [
+ ('host', host),
+ ('detailed', str(detailed)),
+ ('marker', marker),
+ ('limit', limit),
+ ('offset', offset),
+ ('sort', sort),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for volume_record in self.volume_manage_list:
+ manage_details = (
+ volume_record.reference,
+ volume_record.size,
+ volume_record.safe_to_manage,
+ volume_record.reason_not_safe,
+ volume_record.cinder_id,
+ volume_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get volume manageable list
+ self.volumes_mock.list_manageable.assert_called_with(
+ host=host,
+ detailed=detailed,
+ marker=marker,
+ limit=limit,
+ offset=offset,
+ sort=sort,
+ cluster=parsed_args.cluster,
+ )
+
+
+class TestBlockStorageSnapshotManage(TestBlockStorageManage):
+
+ snapshot_manage_list = volume_fakes.create_snapshot_manage_list_records()
+
+ def setUp(self):
+ super().setUp()
+
+ self.snapshots_mock.list_manageable.return_value = (
+ self.snapshot_manage_list)
+
+ # Get the command object to test
+ self.cmd = block_storage_manage.BlockStorageManageSnapshots(
+ self.app, None)
+
+ def test_block_storage_snapshot_manage_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'source_reference',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for snapshot_record in self.snapshot_manage_list:
+ manage_details = (
+ snapshot_record.reference,
+ snapshot_record.size,
+ snapshot_record.safe_to_manage,
+ snapshot_record.source_reference,
+ snapshot_record.reason_not_safe,
+ snapshot_record.cinder_id,
+ snapshot_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get snapshot manageable list
+ self.snapshots_mock.list_manageable.assert_called_with(
+ host=parsed_args.host,
+ detailed=parsed_args.detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=parsed_args.cluster,
+ )
+
+ def test_block_storage_volume_manage_pre_38(self):
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.8 or greater is required', str(exc))
+
+ def test_block_storage_volume_manage_pre_317(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.16')
+ cluster = 'fake_cluster'
+ arglist = [
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('cluster', cluster),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.17 or greater is required', str(exc))
+ self.assertIn('--cluster', str(exc))
+
+ def test_block_storage_volume_manage_host_and_cluster(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.17')
+ host = 'fake_host'
+ cluster = 'fake_cluster'
+ arglist = [
+ host,
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('host', host),
+ ('cluster', cluster),
+ ]
+ exc = self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd,
+ arglist, verifylist)
+ self.assertIn(
+ 'argument --cluster: not allowed with argument <host>', str(exc))
+
+ def test_block_storage_volume_manage_list_all_args(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ detailed = True
+ marker = 'fake_marker'
+ limit = '5'
+ offset = '3'
+ sort = 'size:asc'
+ arglist = [
+ host,
+ '--detailed', str(detailed),
+ '--marker', marker,
+ '--limit', limit,
+ '--offset', offset,
+ '--sort', sort,
+ ]
+ verifylist = [
+ ('host', host),
+ ('detailed', str(detailed)),
+ ('marker', marker),
+ ('limit', limit),
+ ('offset', offset),
+ ('sort', sort),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'source_reference',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for snapshot_record in self.snapshot_manage_list:
+ manage_details = (
+ snapshot_record.reference,
+ snapshot_record.size,
+ snapshot_record.safe_to_manage,
+ snapshot_record.source_reference,
+ snapshot_record.reason_not_safe,
+ snapshot_record.cinder_id,
+ snapshot_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get snapshot manageable list
+ self.snapshots_mock.list_manageable.assert_called_with(
+ host=host,
+ detailed=detailed,
+ marker=marker,
+ limit=limit,
+ offset=offset,
+ sort=sort,
+ cluster=parsed_args.cluster,
+ )
diff --git a/openstackclient/tests/unit/volume/v3/test_volume.py b/openstackclient/tests/unit/volume/v3/test_volume.py
new file mode 100644
index 00000000..ed72bfa1
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_volume.py
@@ -0,0 +1,179 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import copy
+from unittest import mock
+
+from cinderclient import api_versions
+from osc_lib.cli import format_columns
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
+from openstackclient.volume.v3 import volume
+
+
+class TestVolumeSummary(volume_fakes.TestVolume):
+
+ columns = [
+ 'Total Count',
+ 'Total Size',
+ ]
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ self.volumes_mock.reset_mock()
+ self.mock_vol_1 = volume_fakes.create_one_volume()
+ self.mock_vol_2 = volume_fakes.create_one_volume()
+ self.return_dict = {
+ 'volume-summary': {
+ 'total_count': 2,
+ 'total_size': self.mock_vol_1.size + self.mock_vol_2.size}}
+ self.volumes_mock.summary.return_value = self.return_dict
+
+ # Get the command object to test
+ self.cmd = volume.VolumeSummary(self.app, None)
+
+ def test_volume_summary(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.12')
+ arglist = [
+ '--all-projects',
+ ]
+ verifylist = [
+ ('all_projects', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.summary.assert_called_once_with(
+ all_tenants=True,
+ )
+
+ self.assertEqual(self.columns, columns)
+
+ datalist = (
+ 2,
+ self.mock_vol_1.size + self.mock_vol_2.size)
+ self.assertCountEqual(datalist, tuple(data))
+
+ def test_volume_summary_pre_312(self):
+ arglist = [
+ '--all-projects',
+ ]
+ verifylist = [
+ ('all_projects', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.12 or greater is required',
+ str(exc))
+
+ def test_volume_summary_with_metadata(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.36')
+
+ combine_meta = {**self.mock_vol_1.metadata, **self.mock_vol_2.metadata}
+ meta_dict = copy.deepcopy(self.return_dict)
+ meta_dict['volume-summary']['metadata'] = combine_meta
+ self.volumes_mock.summary.return_value = meta_dict
+
+ new_cols = copy.deepcopy(self.columns)
+ new_cols.extend(['Metadata'])
+
+ arglist = [
+ '--all-projects',
+ ]
+ verifylist = [
+ ('all_projects', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.summary.assert_called_once_with(
+ all_tenants=True,
+ )
+
+ self.assertEqual(new_cols, columns)
+
+ datalist = (
+ 2,
+ self.mock_vol_1.size + self.mock_vol_2.size,
+ format_columns.DictColumn(combine_meta))
+ self.assertCountEqual(datalist, tuple(data))
+
+
+class TestVolumeRevertToSnapshot(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ self.volumes_mock.reset_mock()
+ self.snapshots_mock = self.app.client_manager.volume.volume_snapshots
+ self.snapshots_mock.reset_mock()
+ self.mock_volume = volume_fakes.create_one_volume()
+ self.mock_snapshot = volume_fakes.create_one_snapshot(
+ attrs={'volume_id': self.volumes_mock.id})
+
+ # Get the command object to test
+ self.cmd = volume.VolumeRevertToSnapshot(self.app, None)
+
+ def test_volume_revert_to_snapshot_pre_340(self):
+ arglist = [
+ self.mock_snapshot.id,
+ ]
+ verifylist = [
+ ('snapshot', self.mock_snapshot.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.40 or greater is required',
+ str(exc))
+
+ def test_volume_revert_to_snapshot(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.40')
+ arglist = [
+ self.mock_snapshot.id,
+ ]
+ verifylist = [
+ ('snapshot', self.mock_snapshot.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ find_mock_result = [self.mock_snapshot, self.mock_volume]
+ with mock.patch.object(utils, 'find_resource',
+ side_effect=find_mock_result) as find_mock:
+ self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.revert_to_snapshot.assert_called_once_with(
+ volume=self.mock_volume,
+ snapshot=self.mock_snapshot,
+ )
+ self.assertEqual(2, find_mock.call_count)
diff --git a/openstackclient/tests/unit/volume/v3/test_volume_group.py b/openstackclient/tests/unit/volume/v3/test_volume_group.py
index 96079a08..a8338a80 100644
--- a/openstackclient/tests/unit/volume/v3/test_volume_group.py
+++ b/openstackclient/tests/unit/volume/v3/test_volume_group.py
@@ -10,9 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from cinderclient import api_versions
from osc_lib import exceptions
+from openstackclient.tests.unit import utils as tests_utils
from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
from openstackclient.volume.v3 import volume_group
@@ -32,6 +35,10 @@ class TestVolumeGroup(volume_fakes.TestVolume):
self.volume_types_mock = self.app.client_manager.volume.volume_types
self.volume_types_mock.reset_mock()
+ self.volume_group_snapshots_mock = \
+ self.app.client_manager.volume.group_snapshots
+ self.volume_group_snapshots_mock.reset_mock()
+
class TestVolumeGroupCreate(TestVolumeGroup):
@@ -43,6 +50,8 @@ class TestVolumeGroupCreate(TestVolumeGroup):
'volume_types': [fake_volume_type.id],
},
)
+ fake_volume_group_snapshot = \
+ volume_fakes.create_one_volume_group_snapshot()
columns = (
'ID',
@@ -79,6 +88,10 @@ class TestVolumeGroupCreate(TestVolumeGroup):
self.fake_volume_group_type
self.volume_groups_mock.create.return_value = self.fake_volume_group
self.volume_groups_mock.get.return_value = self.fake_volume_group
+ self.volume_groups_mock.create_from_src.return_value = \
+ self.fake_volume_group
+ self.volume_group_snapshots_mock.get.return_value = \
+ self.fake_volume_group_snapshot
self.cmd = volume_group.CreateVolumeGroup(self.app, None)
@@ -115,6 +128,29 @@ class TestVolumeGroupCreate(TestVolumeGroup):
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
+ def test_volume_group_create_no_volume_type(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.13')
+
+ arglist = [
+ self.fake_volume_group_type.id
+ ]
+ verifylist = [
+ ('volume_group_type', self.fake_volume_group_type.id),
+ ('name', None),
+ ('description', None),
+ ('availability_zone', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '<volume_types> is a required argument',
+ str(exc))
+
def test_volume_group_create_with_options(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.13')
@@ -176,6 +212,101 @@ class TestVolumeGroupCreate(TestVolumeGroup):
'--os-volume-api-version 3.13 or greater is required',
str(exc))
+ def test_volume_group_create_from_source_group(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.14')
+
+ arglist = [
+ '--source-group', self.fake_volume_group.id,
+ ]
+ verifylist = [
+ ('source_group', self.fake_volume_group.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volume_groups_mock.get.assert_has_calls(
+ [mock.call(self.fake_volume_group.id),
+ mock.call(self.fake_volume_group.id)])
+ self.volume_groups_mock.create_from_src.assert_called_once_with(
+ None,
+ self.fake_volume_group.id,
+ None,
+ None,
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, data)
+
+ def test_volume_group_create_from_group_snapshot(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.14')
+
+ arglist = [
+ '--group-snapshot', self.fake_volume_group_snapshot.id,
+ ]
+ verifylist = [
+ ('group_snapshot', self.fake_volume_group_snapshot.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volume_group_snapshots_mock.get.assert_called_once_with(
+ self.fake_volume_group_snapshot.id)
+ self.volume_groups_mock.get.assert_called_once_with(
+ self.fake_volume_group.id)
+ self.volume_groups_mock.create_from_src.assert_called_once_with(
+ self.fake_volume_group_snapshot.id,
+ None,
+ None,
+ None,
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, data)
+
+ def test_volume_group_create_from_src_pre_v314(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.13')
+
+ arglist = [
+ '--source-group', self.fake_volume_group.id,
+ ]
+ verifylist = [
+ ('source_group', self.fake_volume_group.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.14 or greater is required',
+ str(exc))
+
+ def test_volume_group_create_from_src_source_group_group_snapshot(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.14')
+
+ arglist = [
+ '--source-group', self.fake_volume_group.id,
+ '--group-snapshot', self.fake_volume_group_snapshot.id,
+ ]
+ verifylist = [
+ ('source_group', self.fake_volume_group.id),
+ ('group_snapshot', self.fake_volume_group_snapshot.id),
+ ]
+
+ exc = self.assertRaises(tests_utils.ParserException,
+ self.check_parser,
+ self.cmd,
+ arglist,
+ verifylist)
+ self.assertIn(
+ '--group-snapshot: not allowed with argument --source-group',
+ str(exc))
+
class TestVolumeGroupDelete(TestVolumeGroup):
diff --git a/openstackclient/volume/v3/block_storage_cleanup.py b/openstackclient/volume/v3/block_storage_cleanup.py
new file mode 100644
index 00000000..f99b8217
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_cleanup.py
@@ -0,0 +1,146 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+
+from openstackclient.i18n import _
+
+
+def _format_cleanup_response(cleaning, unavailable):
+ column_headers = (
+ 'ID',
+ 'Cluster Name',
+ 'Host',
+ 'Binary',
+ 'Status',
+ )
+ combined_data = []
+ for obj in cleaning:
+ details = (obj.id, obj.cluster_name, obj.host, obj.binary, 'Cleaning')
+ combined_data.append(details)
+
+ for obj in unavailable:
+ details = (obj.id, obj.cluster_name, obj.host, obj.binary,
+ 'Unavailable')
+ combined_data.append(details)
+
+ return (column_headers, combined_data)
+
+
+class BlockStorageCleanup(command.Lister):
+ """Do block storage cleanup.
+
+ This command requires ``--os-volume-api-version`` 3.24 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ '--cluster',
+ metavar='<cluster>',
+ help=_('Name of block storage cluster in which cleanup needs '
+ 'to be performed (name only)')
+ )
+ parser.add_argument(
+ "--host",
+ metavar="<host>",
+ default=None,
+ help=_("Host where the service resides. (name only)")
+ )
+ parser.add_argument(
+ '--binary',
+ metavar='<binary>',
+ default=None,
+ help=_("Name of the service binary.")
+ )
+ service_up_parser = parser.add_mutually_exclusive_group()
+ service_up_parser.add_argument(
+ '--up',
+ dest='is_up',
+ action='store_true',
+ default=None,
+ help=_(
+ 'Filter by up status. If this is set, services need to be up.'
+ )
+ )
+ service_up_parser.add_argument(
+ '--down',
+ dest='is_up',
+ action='store_false',
+ help=_(
+ 'Filter by down status. If this is set, services need to be '
+ 'down.'
+ )
+ )
+ service_disabled_parser = parser.add_mutually_exclusive_group()
+ service_disabled_parser.add_argument(
+ '--disabled',
+ dest='disabled',
+ action='store_true',
+ default=None,
+ help=_('Filter by disabled status.')
+ )
+ service_disabled_parser.add_argument(
+ '--enabled',
+ dest='disabled',
+ action='store_false',
+ help=_('Filter by enabled status.')
+ )
+ parser.add_argument(
+ '--resource-id',
+ metavar='<resource-id>',
+ default=None,
+ help=_('UUID of a resource to cleanup.')
+ )
+ parser.add_argument(
+ '--resource-type',
+ metavar='<Volume|Snapshot>',
+ choices=('Volume', 'Snapshot'),
+ help=_('Type of resource to cleanup.')
+ )
+ parser.add_argument(
+ '--service-id',
+ type=int,
+ default=None,
+ help=_(
+ 'The service ID field from the DB, not the UUID of the '
+ 'service.'
+ )
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.24'):
+ msg = _(
+ "--os-volume-api-version 3.24 or greater is required to "
+ "support the 'block storage cleanup' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ filters = {
+ 'cluster_name': parsed_args.cluster,
+ 'host': parsed_args.host,
+ 'binary': parsed_args.binary,
+ 'is_up': parsed_args.is_up,
+ 'disabled': parsed_args.disabled,
+ 'resource_id': parsed_args.resource_id,
+ 'resource_type': parsed_args.resource_type,
+ 'service_id': parsed_args.service_id
+ }
+
+ filters = {k: v for k, v in filters.items() if v is not None}
+ cleaning, unavailable = volume_client.workers.clean(**filters)
+ return _format_cleanup_response(cleaning, unavailable)
diff --git a/openstackclient/volume/v3/block_storage_log_level.py b/openstackclient/volume/v3/block_storage_log_level.py
new file mode 100644
index 00000000..d5286cdd
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_log_level.py
@@ -0,0 +1,147 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Block Storage Service action implementations"""
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+
+class BlockStorageLogLevelList(command.Lister):
+ """List log levels of block storage service.
+
+ Supported by --os-volume-api-version 3.32 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "--host",
+ metavar="<host>",
+ default="",
+ help=_("List block storage service log level of specified host "
+ "(name only)")
+ )
+ parser.add_argument(
+ "--service",
+ metavar="<service>",
+ default="",
+ choices=(
+ '',
+ '*',
+ 'cinder-api',
+ 'cinder-volume',
+ 'cinder-scheduler',
+ 'cinder-backup'),
+ help=_("List block storage service log level of the specified "
+ "service (name only)")
+ )
+ parser.add_argument(
+ "--log-prefix",
+ metavar="<log-prefix>",
+ default="",
+ help="Prefix for the log, e.g. 'sqlalchemy'"
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ service_client = self.app.client_manager.volume
+ columns = [
+ "Binary",
+ "Host",
+ "Prefix",
+ "Level",
+ ]
+
+ if service_client.api_version < api_versions.APIVersion('3.32'):
+ msg = _(
+ "--os-volume-api-version 3.32 or greater is required to "
+ "support the 'block storage log level list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ data = service_client.services.get_log_levels(
+ binary=parsed_args.service,
+ server=parsed_args.host,
+ prefix=parsed_args.log_prefix)
+
+ return (columns,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))
+
+
+class BlockStorageLogLevelSet(command.Command):
+ """Set log level of block storage service
+
+ Supported by --os-volume-api-version 3.32 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "level",
+ metavar="<log-level>",
+ choices=('INFO', 'WARNING', 'ERROR', 'DEBUG'),
+ type=str.upper,
+ help=_("Desired log level.")
+ )
+ parser.add_argument(
+ "--host",
+ metavar="<host>",
+ default="",
+ help=_("Set block storage service log level of specified host "
+ "(name only)")
+ )
+ parser.add_argument(
+ "--service",
+ metavar="<service>",
+ default="",
+ choices=(
+ '',
+ '*',
+ 'cinder-api',
+ 'cinder-volume',
+ 'cinder-scheduler',
+ 'cinder-backup'),
+ help=_("Set block storage service log level of specified service "
+ "(name only)")
+ )
+ parser.add_argument(
+ "--log-prefix",
+ metavar="<log-prefix>",
+ default="",
+ help="Prefix for the log, e.g. 'sqlalchemy'"
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ service_client = self.app.client_manager.volume
+
+ if service_client.api_version < api_versions.APIVersion('3.32'):
+ msg = _(
+ "--os-volume-api-version 3.32 or greater is required to "
+ "support the 'block storage log level set' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ service_client.services.set_log_levels(
+ level=parsed_args.level,
+ binary=parsed_args.service,
+ server=parsed_args.host,
+ prefix=parsed_args.log_prefix)
diff --git a/openstackclient/volume/v3/block_storage_manage.py b/openstackclient/volume/v3/block_storage_manage.py
new file mode 100644
index 00000000..9015f44d
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_manage.py
@@ -0,0 +1,258 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Block Storage Volume/Snapshot Management implementations"""
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+from oslo_utils import strutils
+
+from openstackclient.i18n import _
+
+
+SORT_MANAGEABLE_KEY_VALUES = ('size', 'reference')
+
+
+class BlockStorageManageVolumes(command.Lister):
+ """List manageable volumes.
+
+ Supported by --os-volume-api-version 3.8 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ host_group = parser.add_mutually_exclusive_group()
+ host_group.add_argument(
+ "host",
+ metavar="<host>",
+ nargs='?',
+ help=_('Cinder host on which to list manageable volumes. '
+ 'Takes the form: host@backend-name#pool')
+ )
+ host_group.add_argument(
+ "--cluster",
+ metavar="<cluster>",
+ help=_('Cinder cluster on which to list manageable volumes. '
+ 'Takes the form: cluster@backend-name#pool. '
+ '(supported by --os-volume-api-version 3.17 or later)')
+ )
+ parser.add_argument(
+ '--detailed',
+ metavar='<detailed>',
+ default=True,
+ help=_('Returns detailed information (Default=True).')
+ )
+ parser.add_argument(
+ '--marker',
+ metavar='<marker>',
+ default=None,
+ help=_('Begin returning volumes that appear later in the volume '
+ 'list than that represented by this reference. This '
+ 'reference should be json like. Default=None.')
+ )
+ parser.add_argument(
+ '--limit',
+ metavar='<limit>',
+ default=None,
+ help=_('Maximum number of volumes to return. Default=None.')
+ )
+ parser.add_argument(
+ '--offset',
+ metavar='<offset>',
+ default=None,
+ help=_('Number of volumes to skip after marker. Default=None.')
+ )
+ parser.add_argument(
+ '--sort',
+ metavar='<key>[:<direction>]',
+ default=None,
+ help=(_('Comma-separated list of sort keys and directions in the '
+ 'form of <key>[:<asc|desc>]. '
+ 'Valid keys: %s. '
+ 'Default=None.') % ', '.join(SORT_MANAGEABLE_KEY_VALUES))
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if parsed_args.host is None and parsed_args.cluster is None:
+ msg = _(
+ "Either <host> or '--cluster <cluster>' needs to be provided "
+ "to run the 'block storage volume manageable list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if volume_client.api_version < api_versions.APIVersion('3.8'):
+ msg = _(
+ "--os-volume-api-version 3.8 or greater is required to "
+ "support the 'block storage volume manageable list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.cluster:
+ if volume_client.api_version < api_versions.APIVersion('3.17'):
+ msg = _(
+ "--os-volume-api-version 3.17 or greater is required to "
+ "support the '--cluster' option"
+ )
+ raise exceptions.CommandError(msg)
+
+ detailed = strutils.bool_from_string(parsed_args.detailed)
+ cluster = getattr(parsed_args, 'cluster', None)
+
+ columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ ]
+ if detailed:
+ columns.extend([
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ])
+
+ data = volume_client.volumes.list_manageable(
+ host=parsed_args.host,
+ detailed=detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=cluster)
+
+ return (columns,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))
+
+
+class BlockStorageManageSnapshots(command.Lister):
+ """List manageable snapshots.
+
+ Supported by --os-volume-api-version 3.8 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ host_group = parser.add_mutually_exclusive_group()
+ host_group.add_argument(
+ "host",
+ metavar="<host>",
+ nargs='?',
+ help=_('Cinder host on which to list manageable snapshots. '
+ 'Takes the form: host@backend-name#pool')
+ )
+ host_group.add_argument(
+ "--cluster",
+ metavar="<cluster>",
+ help=_('Cinder cluster on which to list manageable snapshots. '
+ 'Takes the form: cluster@backend-name#pool. '
+ '(supported by --os-volume-api-version 3.17 or later)')
+ )
+ parser.add_argument(
+ '--detailed',
+ metavar='<detailed>',
+ default=True,
+ help=_('Returns detailed information (Default=True).')
+ )
+ parser.add_argument(
+ '--marker',
+ metavar='<marker>',
+ default=None,
+ help=_('Begin returning snapshots that appear later in the '
+ 'snapshot list than that represented by this reference. '
+ 'This reference should be json like. Default=None.')
+ )
+ parser.add_argument(
+ '--limit',
+ metavar='<limit>',
+ default=None,
+ help=_('Maximum number of snapshots to return. Default=None.')
+ )
+ parser.add_argument(
+ '--offset',
+ metavar='<offset>',
+ default=None,
+ help=_('Number of snapshots to skip after marker. Default=None.')
+ )
+ parser.add_argument(
+ '--sort',
+ metavar='<key>[:<direction>]',
+ default=None,
+ help=(_('Comma-separated list of sort keys and directions in the '
+ 'form of <key>[:<asc|desc>]. '
+ 'Valid keys: %s. '
+ 'Default=None.') % ', '.join(SORT_MANAGEABLE_KEY_VALUES))
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if parsed_args.host is None and parsed_args.cluster is None:
+ msg = _(
+ "Either <host> or '--cluster <cluster>' needs to be provided "
+ "to run the 'block storage volume snapshot manageable list' "
+ "command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if volume_client.api_version < api_versions.APIVersion('3.8'):
+ msg = _(
+ "--os-volume-api-version 3.8 or greater is required to "
+ "support the 'block storage volume snapshot manageable list' "
+ "command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.cluster:
+ if volume_client.api_version < api_versions.APIVersion('3.17'):
+ msg = _(
+ "--os-volume-api-version 3.17 or greater is required to "
+ "support the '--cluster' option"
+ )
+ raise exceptions.CommandError(msg)
+
+ detailed = strutils.bool_from_string(parsed_args.detailed)
+ cluster = getattr(parsed_args, 'cluster', None)
+
+ columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'source_reference',
+ ]
+ if detailed:
+ columns.extend([
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ])
+
+ data = volume_client.volume_snapshots.list_manageable(
+ host=parsed_args.host,
+ detailed=detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=cluster)
+
+ return (columns,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))
diff --git a/openstackclient/volume/v3/volume.py b/openstackclient/volume/v3/volume.py
new file mode 100644
index 00000000..4b159688
--- /dev/null
+++ b/openstackclient/volume/v3/volume.py
@@ -0,0 +1,114 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Volume V3 Volume action implementations"""
+
+import logging
+
+from cinderclient import api_versions
+from osc_lib.cli import format_columns
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+
+LOG = logging.getLogger(__name__)
+
+
+class VolumeSummary(command.ShowOne):
+ _description = _("Show a summary of all volumes in this deployment.")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ '--all-projects',
+ action='store_true',
+ default=False,
+ help=_('Include all projects (admin only)'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.12'):
+ msg = _(
+ "--os-volume-api-version 3.12 or greater is required to "
+ "support the 'volume summary' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ columns = [
+ 'total_count',
+ 'total_size',
+ ]
+ column_headers = [
+ 'Total Count',
+ 'Total Size',
+ ]
+ if volume_client.api_version.matches('3.36'):
+ columns.append('metadata')
+ column_headers.append('Metadata')
+
+ # set value of 'all_tenants' when using project option
+ all_projects = parsed_args.all_projects
+
+ vol_summary = volume_client.volumes.summary(
+ all_tenants=all_projects,
+ )
+
+ return (
+ column_headers,
+ utils.get_dict_properties(
+ vol_summary['volume-summary'],
+ columns,
+ formatters={'metadata': format_columns.DictColumn},
+ ),
+ )
+
+
+class VolumeRevertToSnapshot(command.Command):
+ _description = _("Revert a volume to a snapshot.")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'snapshot',
+ metavar="<snapshot>",
+ help=_('Name or ID of the snapshot to restore. The snapshot must '
+ 'be the most recent one known to cinder.'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.40'):
+ msg = _(
+ "--os-volume-api-version 3.40 or greater is required to "
+ "support the 'volume revert snapshot' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ snapshot = utils.find_resource(
+ volume_client.volume_snapshots, parsed_args.snapshot)
+ volume = utils.find_resource(
+ volume_client.volumes, snapshot.volume_id)
+
+ volume_client.volumes.revert_to_snapshot(
+ volume=volume, snapshot=snapshot)
diff --git a/openstackclient/volume/v3/volume_group.py b/openstackclient/volume/v3/volume_group.py
index db4e9a94..69b18ceb 100644
--- a/openstackclient/volume/v3/volume_group.py
+++ b/openstackclient/volume/v3/volume_group.py
@@ -82,15 +82,17 @@ class CreateVolumeGroup(command.ShowOne):
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
- parser.add_argument(
+ source_parser = parser.add_mutually_exclusive_group()
+ source_parser.add_argument(
'volume_group_type',
metavar='<volume_group_type>',
+ nargs='?',
help=_('Name or ID of volume group type to use.'),
)
parser.add_argument(
'volume_types',
metavar='<volume_type>',
- nargs='+',
+ nargs='*',
default=[],
help=_('Name or ID of volume type(s) to use.'),
)
@@ -107,44 +109,101 @@ class CreateVolumeGroup(command.ShowOne):
parser.add_argument(
'--availability-zone',
metavar='<availability-zone>',
- help=_('Availability zone for volume group.'),
+ help=_('Availability zone for volume group. '
+ '(not available if creating group from source)'),
+ )
+ source_parser.add_argument(
+ '--source-group',
+ metavar='<source-group>',
+ help=_('Existing volume group (name or ID) '
+ '(supported by --os-volume-api-version 3.14 or later)'),
+ )
+ source_parser.add_argument(
+ '--group-snapshot',
+ metavar='<group-snapshot>',
+ help=_('Existing group snapshot (name or ID) '
+ '(supported by --os-volume-api-version 3.14 or later)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
- if volume_client.api_version < api_versions.APIVersion('3.13'):
- msg = _(
- "--os-volume-api-version 3.13 or greater is required to "
- "support the 'volume group create' command"
- )
- raise exceptions.CommandError(msg)
-
- volume_group_type = utils.find_resource(
- volume_client.group_types,
- parsed_args.volume_group_type,
- )
-
- volume_types = []
- for volume_type in parsed_args.volume_types:
- volume_types.append(
- utils.find_resource(
- volume_client.volume_types,
- volume_type,
+ if parsed_args.volume_group_type:
+ if volume_client.api_version < api_versions.APIVersion('3.13'):
+ msg = _(
+ "--os-volume-api-version 3.13 or greater is required to "
+ "support the 'volume group create' command"
)
+ raise exceptions.CommandError(msg)
+ if not parsed_args.volume_types:
+ msg = _(
+ "<volume_types> is a required argument when creating a "
+ "group from group type."
+ )
+ raise exceptions.CommandError(msg)
+
+ volume_group_type = utils.find_resource(
+ volume_client.group_types,
+ parsed_args.volume_group_type,
)
+ volume_types = []
+ for volume_type in parsed_args.volume_types:
+ volume_types.append(
+ utils.find_resource(
+ volume_client.volume_types,
+ volume_type,
+ )
+ )
- group = volume_client.groups.create(
- volume_group_type.id,
- ','.join(x.id for x in volume_types),
- parsed_args.name,
- parsed_args.description,
- availability_zone=parsed_args.availability_zone)
+ group = volume_client.groups.create(
+ volume_group_type.id,
+ ','.join(x.id for x in volume_types),
+ parsed_args.name,
+ parsed_args.description,
+ availability_zone=parsed_args.availability_zone)
- group = volume_client.groups.get(group.id)
+ group = volume_client.groups.get(group.id)
+ return _format_group(group)
- return _format_group(group)
+ else:
+ if volume_client.api_version < api_versions.APIVersion('3.14'):
+ msg = _(
+ "--os-volume-api-version 3.14 or greater is required to "
+ "support the 'volume group create "
+ "[--source-group|--group-snapshot]' command"
+ )
+ raise exceptions.CommandError(msg)
+ if (parsed_args.source_group is None and
+ parsed_args.group_snapshot is None):
+ msg = _(
+ "Either --source-group <source_group> or "
+ "'--group-snapshot <group_snapshot>' needs to be "
+ "provided to run the 'volume group create "
+ "[--source-group|--group-snapshot]' command"
+ )
+ raise exceptions.CommandError(msg)
+ if parsed_args.availability_zone:
+ msg = _("'--availability-zone' option will not work "
+ "if creating group from source.")
+ LOG.warning(msg)
+
+ source_group = None
+ if parsed_args.source_group:
+ source_group = utils.find_resource(volume_client.groups,
+ parsed_args.source_group)
+ group_snapshot = None
+ if parsed_args.group_snapshot:
+ group_snapshot = utils.find_resource(
+ volume_client.group_snapshots,
+ parsed_args.group_snapshot)
+ group = volume_client.groups.create_from_src(
+ group_snapshot.id if group_snapshot else None,
+ source_group.id if source_group else None,
+ parsed_args.name,
+ parsed_args.description)
+ group = volume_client.groups.get(group.id)
+ return _format_group(group)
class DeleteVolumeGroup(command.Command):
diff --git a/releasenotes/notes/add-baremetal-agent-type-7c46365e8d457ac8.yaml b/releasenotes/notes/add-baremetal-agent-type-7c46365e8d457ac8.yaml
new file mode 100644
index 00000000..a9a3a0df
--- /dev/null
+++ b/releasenotes/notes/add-baremetal-agent-type-7c46365e8d457ac8.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add ``baremetal`` agent type to ``--agent-type`` option for
+ ``network agent list`` command.
diff --git a/releasenotes/notes/add-block-storage-manage-commands-6ebf029bd7a67bb3.yaml b/releasenotes/notes/add-block-storage-manage-commands-6ebf029bd7a67bb3.yaml
new file mode 100644
index 00000000..7b40a341
--- /dev/null
+++ b/releasenotes/notes/add-block-storage-manage-commands-6ebf029bd7a67bb3.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Added ``block storage volume manageable list`` and
+ ``block storage snapshot manageable list`` commands that
+ allow operators to list the volumes and snapshots on a
+ particular host or cluster for management under OpenStack.
diff --git a/releasenotes/notes/add-create-group-from-src-options-6fcb0c87f617ca91.yaml b/releasenotes/notes/add-create-group-from-src-options-6fcb0c87f617ca91.yaml
new file mode 100644
index 00000000..9a4f1cb3
--- /dev/null
+++ b/releasenotes/notes/add-create-group-from-src-options-6fcb0c87f617ca91.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added ``--source-group`` and ``--group-snapshot`` options to the
+ ``volume group create`` command to allow creating group from
+ a source group or a group snapshot.
diff --git a/releasenotes/notes/add-vol-service-get-set-log-commands-f9420e5061d994b5.yaml b/releasenotes/notes/add-vol-service-get-set-log-commands-f9420e5061d994b5.yaml
new file mode 100644
index 00000000..ccaf69c1
--- /dev/null
+++ b/releasenotes/notes/add-vol-service-get-set-log-commands-f9420e5061d994b5.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added ``block storage log level list`` and ``block storage log level set``
+ commands that allows operators to list and set log levels for cinder
+ services.
diff --git a/releasenotes/notes/add-volume-revert-command-1c8f695420acbe7e.yaml b/releasenotes/notes/add-volume-revert-command-1c8f695420acbe7e.yaml
new file mode 100644
index 00000000..2832b888
--- /dev/null
+++ b/releasenotes/notes/add-volume-revert-command-1c8f695420acbe7e.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added ``volume revert`` command that reverts
+ the volume to the given snapshot.
diff --git a/releasenotes/notes/add-volume-summary-command-b2175b48af3ccab1.yaml b/releasenotes/notes/add-volume-summary-command-b2175b48af3ccab1.yaml
new file mode 100644
index 00000000..1c5cdf18
--- /dev/null
+++ b/releasenotes/notes/add-volume-summary-command-b2175b48af3ccab1.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added ``volume summary`` command to show the total size,
+ total count and metadata of volumes.
diff --git a/releasenotes/notes/add-workers-cleanup-command-720573c0f642efe9.yaml b/releasenotes/notes/add-workers-cleanup-command-720573c0f642efe9.yaml
new file mode 100644
index 00000000..7406cd62
--- /dev/null
+++ b/releasenotes/notes/add-workers-cleanup-command-720573c0f642efe9.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added ``block storage cleanup`` command that allows cleanup
+ of resources (volumes and snapshots) by services in other nodes
+ in a cluster in an Active-Active deployments.
diff --git a/releasenotes/notes/image-metadef-namespace-b940206bece64f97.yaml b/releasenotes/notes/image-metadef-namespace-b940206bece64f97.yaml
new file mode 100644
index 00000000..361e57fe
--- /dev/null
+++ b/releasenotes/notes/image-metadef-namespace-b940206bece64f97.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - Add ``openstack image metadef namespace create`` command
+ to create metadef namespace for the image service.
+ - Add ``openstack image metadef namespace delete`` command
+ to delete image metadef namespace.
+ - Add ``openstack image metadef namespace set`` command
+ to update metadef namespace for the image service.
+ - Add ``openstack image metadef namespace show`` command
+ to show metadef namespace for the image service.
diff --git a/releasenotes/notes/network-qos-rule-type-filters-47f4911a02011501.yaml b/releasenotes/notes/network-qos-rule-type-filters-47f4911a02011501.yaml
new file mode 100644
index 00000000..d0503f59
--- /dev/null
+++ b/releasenotes/notes/network-qos-rule-type-filters-47f4911a02011501.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Added two new filter flags to ``openstack network qos rule type list``:
+ ``--all-supported``, to return any QoS rule type supported by at least
+ one loaded driver; ``--all-rules``, to return all QoS rule types
+ supported by the current version of Neutron server, regardless of the
+ loaded drivers.
diff --git a/releasenotes/notes/switch-hypervisor-to-sdk-f6495f070b034718.yaml b/releasenotes/notes/switch-hypervisor-to-sdk-f6495f070b034718.yaml
new file mode 100644
index 00000000..6f1721b1
--- /dev/null
+++ b/releasenotes/notes/switch-hypervisor-to-sdk-f6495f070b034718.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Switch hypervisor operations to consume OpenStackSDK
diff --git a/releasenotes/notes/switch-server-migration-to-sdk-4e4530f787f90fd2.yaml b/releasenotes/notes/switch-server-migration-to-sdk-4e4530f787f90fd2.yaml
new file mode 100644
index 00000000..318ac097
--- /dev/null
+++ b/releasenotes/notes/switch-server-migration-to-sdk-4e4530f787f90fd2.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ The ``server migration *`` commands now use the OpenStackSDK instead of
+ novaclient.
diff --git a/releasenotes/notes/switch-server-show-to-sdk-44a614aebf2c6da6.yaml b/releasenotes/notes/switch-server-show-to-sdk-44a614aebf2c6da6.yaml
new file mode 100644
index 00000000..c116f6e0
--- /dev/null
+++ b/releasenotes/notes/switch-server-show-to-sdk-44a614aebf2c6da6.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ The ``server show`` command now uses the OpenStack SDK instead of the
+ Python nova bindings. The command prints data fields both by their
+ novaclient names used in previous releases as well as the names used in the
+ SDK.
diff --git a/requirements.txt b/requirements.txt
index c787ef76..1ae8cec4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,7 +6,7 @@ pbr!=2.1.0,>=2.0.0 # Apache-2.0
cliff>=3.5.0 # Apache-2.0
iso8601>=0.1.11 # MIT
-openstacksdk>=0.102.0 # Apache-2.0
+openstacksdk>=0.103.0 # Apache-2.0
osc-lib>=2.3.0 # Apache-2.0
oslo.i18n>=3.15.3 # Apache-2.0
oslo.utils>=3.33.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index fa3d30fe..c3c99ccd 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -386,7 +386,12 @@ openstack.image.v2 =
image_stage = openstackclient.image.v2.image:StageImage
image_task_show = openstackclient.image.v2.task:ShowTask
image_task_list = openstackclient.image.v2.task:ListTask
+
+ image_metadef_namespace_create = openstackclient.image.v2.metadef_namespaces:CreateMetadefNameSpace
+ image_metadef_namespace_delete = openstackclient.image.v2.metadef_namespaces:DeleteMetadefNameSpace
image_metadef_namespace_list = openstackclient.image.v2.metadef_namespaces:ListMetadefNameSpaces
+ image_metadef_namespace_set = openstackclient.image.v2.metadef_namespaces:SetMetadefNameSpace
+ image_metadef_namespace_show = openstackclient.image.v2.metadef_namespaces:ShowMetadefNameSpace
openstack.network.v2 =
address_group_create = openstackclient.network.v2.address_group:CreateAddressGroup
@@ -514,6 +519,14 @@ openstack.network.v2 =
network_service_provider_list = openstackclient.network.v2.network_service_provider:ListNetworkServiceProvider
+ network_subport_list = openstackclient.network.v2.network_trunk:ListNetworkSubport
+ network_trunk_create = openstackclient.network.v2.network_trunk:CreateNetworkTrunk
+ network_trunk_delete = openstackclient.network.v2.network_trunk:DeleteNetworkTrunk
+ network_trunk_list = openstackclient.network.v2.network_trunk:ListNetworkTrunk
+ network_trunk_set = openstackclient.network.v2.network_trunk:SetNetworkTrunk
+ network_trunk_show = openstackclient.network.v2.network_trunk:ShowNetworkTrunk
+ network_trunk_unset = openstackclient.network.v2.network_trunk:UnsetNetworkTrunk
+
port_create = openstackclient.network.v2.port:CreatePort
port_delete = openstackclient.network.v2.port:DeletePort
port_list = openstackclient.network.v2.port:ListPort
@@ -806,3 +819,11 @@ openstack.volume.v3 =
volume_transfer_request_delete = openstackclient.volume.v2.volume_transfer_request:DeleteTransferRequest
volume_transfer_request_list = openstackclient.volume.v2.volume_transfer_request:ListTransferRequest
volume_transfer_request_show = openstackclient.volume.v2.volume_transfer_request:ShowTransferRequest
+
+ volume_summary = openstackclient.volume.v3.volume:VolumeSummary
+ volume_revert = openstackclient.volume.v3.volume:VolumeRevertToSnapshot
+ block_storage_log_level_list = openstackclient.volume.v3.block_storage_log_level:BlockStorageLogLevelList
+ block_storage_log_level_set = openstackclient.volume.v3.block_storage_log_level:BlockStorageLogLevelSet
+ block_storage_cleanup = openstackclient.volume.v3.block_storage_cleanup:BlockStorageCleanup
+ block_storage_volume_manageable_list = openstackclient.volume.v3.block_storage_manage:BlockStorageManageVolumes
+ block_storage_snapshot_manageable_list = openstackclient.volume.v3.block_storage_manage:BlockStorageManageSnapshots
diff --git a/tox.ini b/tox.ini
index 5f02e7c2..3de7dd38 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
[tox]
minversion = 3.18.0
-envlist = py38,pep8
-skipsdist = True
+envlist = py3,pep8
+#skipsdist = True
# Automatic envs (pyXX) will only use the python version appropriate to that
# env and ignore basepython inherited from [testenv] if we set
# ignore_basepython_conflict.
@@ -10,19 +10,21 @@ ignore_basepython_conflict = True
[testenv]
usedevelop = True
basepython = python3
-setenv = OS_STDOUT_CAPTURE=1
- OS_STDERR_CAPTURE=1
- OS_TEST_TIMEOUT=60
+setenv =
+ OS_STDOUT_CAPTURE=1
+ OS_STDERR_CAPTURE=1
+ OS_TEST_TIMEOUT=60
deps =
- -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/test-requirements.txt
- -r{toxinidir}/requirements.txt
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/test-requirements.txt
+ -r{toxinidir}/requirements.txt
commands = stestr run {posargs}
allowlist_externals = stestr
[testenv:fast8]
# Use same environment directory as pep8 env to save space and install time
-setenv = VIRTUAL_ENV={envdir}
+setenv =
+ VIRTUAL_ENV={envdir}
envdir = {toxworkdir}/pep8
commands =
{toxinidir}/tools/fast8.sh
@@ -74,14 +76,18 @@ commands =
allowlist_externals = stestr
[testenv:functional]
-setenv = OS_TEST_PATH=./openstackclient/tests/functional
-passenv = OS_*
+setenv =
+ OS_TEST_PATH=./openstackclient/tests/functional
+passenv =
+ OS_*
commands =
stestr run {posargs}
[testenv:functional-tips]
-setenv = OS_TEST_PATH=./openstackclient/tests/functional
-passenv = OS_*
+setenv =
+ OS_TEST_PATH=./openstackclient/tests/functional
+passenv =
+ OS_*
commands =
python -m pip install -q -U -e "git+file://{toxinidir}/../cliff#egg=cliff"
python -m pip install -q -U -e "git+file://{toxinidir}/../keystoneauth#egg=keystoneauth1"
@@ -108,7 +114,8 @@ commands =
coverage xml -o cover/coverage.xml
[testenv:debug]
-passenv = OS_*
+passenv =
+ OS_*
commands =
oslo_debug_helper -t openstackclient/tests {posargs}