summaryrefslogtreecommitdiff
path: root/openstackclient
diff options
context:
space:
mode:
Diffstat (limited to 'openstackclient')
-rw-r--r--openstackclient/common/configuration.py6
-rw-r--r--openstackclient/common/progressbar.py2
-rw-r--r--openstackclient/common/quota.py791
-rw-r--r--openstackclient/compute/v2/flavor.py2
-rw-r--r--openstackclient/compute/v2/hypervisor.py137
-rw-r--r--openstackclient/compute/v2/hypervisor_stats.py40
-rw-r--r--openstackclient/compute/v2/server.py584
-rw-r--r--openstackclient/compute/v2/server_event.py6
-rw-r--r--openstackclient/compute/v2/server_group.py103
-rw-r--r--openstackclient/compute/v2/server_migration.py130
-rw-r--r--openstackclient/compute/v2/server_volume.py73
-rw-r--r--openstackclient/compute/v2/usage.py69
-rw-r--r--openstackclient/identity/v3/endpoint_group.py2
-rw-r--r--openstackclient/identity/v3/identity_provider.py45
-rw-r--r--openstackclient/identity/v3/trust.py87
-rw-r--r--openstackclient/image/v2/image.py1225
-rw-r--r--openstackclient/image/v2/metadef_namespaces.py312
-rw-r--r--openstackclient/image/v2/task.py179
-rw-r--r--openstackclient/network/v2/floating_ip.py2
-rw-r--r--openstackclient/network/v2/floating_ip_port_forwarding.py2
-rw-r--r--openstackclient/network/v2/l3_conntrack_helper.py2
-rw-r--r--openstackclient/network/v2/local_ip.py2
-rw-r--r--openstackclient/network/v2/local_ip_association.py2
-rw-r--r--openstackclient/network/v2/ndp_proxy.py269
-rw-r--r--openstackclient/network/v2/network.py17
-rw-r--r--openstackclient/network/v2/network_agent.py9
-rw-r--r--openstackclient/network/v2/network_flavor.py2
-rw-r--r--openstackclient/network/v2/network_flavor_profile.py4
-rw-r--r--openstackclient/network/v2/network_meter.py2
-rw-r--r--openstackclient/network/v2/network_meter_rule.py2
-rw-r--r--openstackclient/network/v2/network_qos_policy.py2
-rw-r--r--openstackclient/network/v2/network_qos_rule.py42
-rw-r--r--openstackclient/network/v2/network_qos_rule_type.py29
-rw-r--r--openstackclient/network/v2/network_rbac.py17
-rw-r--r--openstackclient/network/v2/network_segment.py2
-rw-r--r--openstackclient/network/v2/network_segment_range.py2
-rw-r--r--openstackclient/network/v2/network_trunk.py402
-rw-r--r--openstackclient/network/v2/port.py2
-rw-r--r--openstackclient/network/v2/router.py41
-rw-r--r--openstackclient/network/v2/security_group.py1
-rw-r--r--openstackclient/network/v2/security_group_rule.py2
-rw-r--r--openstackclient/network/v2/subnet.py9
-rw-r--r--openstackclient/network/v2/subnet_pool.py2
-rw-r--r--openstackclient/tests/functional/base.py83
-rw-r--r--openstackclient/tests/functional/common/test_args.py25
-rw-r--r--openstackclient/tests/functional/common/test_availability_zone.py8
-rw-r--r--openstackclient/tests/functional/common/test_configuration.py24
-rw-r--r--openstackclient/tests/functional/common/test_extension.py56
-rw-r--r--openstackclient/tests/functional/common/test_help.py8
-rw-r--r--openstackclient/tests/functional/common/test_module.py16
-rw-r--r--openstackclient/tests/functional/common/test_quota.py108
-rw-r--r--openstackclient/tests/functional/common/test_versions.py6
-rw-r--r--openstackclient/tests/functional/compute/v2/common.py43
-rw-r--r--openstackclient/tests/functional/compute/v2/test_aggregate.py88
-rw-r--r--openstackclient/tests/functional/compute/v2/test_flavor.py114
-rw-r--r--openstackclient/tests/functional/compute/v2/test_hypervisor.py52
-rw-r--r--openstackclient/tests/functional/compute/v2/test_keypair.py8
-rw-r--r--openstackclient/tests/functional/compute/v2/test_server.py660
-rw-r--r--openstackclient/tests/functional/compute/v2/test_server_event.py44
-rw-r--r--openstackclient/tests/functional/compute/v2/test_server_group.py79
-rw-r--r--openstackclient/tests/functional/identity/v3/test_project.py14
-rw-r--r--openstackclient/tests/functional/image/v1/test_image.py63
-rw-r--r--openstackclient/tests/functional/image/v2/test_image.py186
-rw-r--r--openstackclient/tests/functional/network/v2/common.py28
-rw-r--r--openstackclient/tests/functional/network/v2/test_address_group.py89
-rw-r--r--openstackclient/tests/functional/network/v2/test_address_scope.py74
-rw-r--r--openstackclient/tests/functional/network/v2/test_floating_ip.py125
-rw-r--r--openstackclient/tests/functional/network/v2/test_ip_availability.py20
-rw-r--r--openstackclient/tests/functional/network/v2/test_l3_conntrack_helper.py48
-rw-r--r--openstackclient/tests/functional/network/v2/test_local_ip.py83
-rw-r--r--openstackclient/tests/functional/network/v2/test_network.py174
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_agent.py70
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_flavor.py89
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_flavor_profile.py68
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_meter.py62
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_meter_rule.py60
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_ndp_proxy.py217
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_qos_policy.py46
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_qos_rule.py200
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py43
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_rbac.py35
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_segment.py58
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_segment_range.py54
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_service_provider.py14
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_trunk.py149
-rw-r--r--openstackclient/tests/functional/network/v2/test_port.py154
-rw-r--r--openstackclient/tests/functional/network/v2/test_router.py154
-rw-r--r--openstackclient/tests/functional/network/v2/test_security_group.py24
-rw-r--r--openstackclient/tests/functional/network/v2/test_security_group_rule.py31
-rw-r--r--openstackclient/tests/functional/network/v2/test_subnet.py92
-rw-r--r--openstackclient/tests/functional/network/v2/test_subnet_pool.py59
-rw-r--r--openstackclient/tests/functional/volume/base.py11
-rw-r--r--openstackclient/tests/functional/volume/v1/test_qos.py53
-rw-r--r--openstackclient/tests/functional/volume/v1/test_service.py35
-rw-r--r--openstackclient/tests/functional/volume/v1/test_snapshot.py109
-rw-r--r--openstackclient/tests/functional/volume/v1/test_transfer_request.py57
-rw-r--r--openstackclient/tests/functional/volume/v1/test_volume.py145
-rw-r--r--openstackclient/tests/functional/volume/v1/test_volume_type.py110
-rw-r--r--openstackclient/tests/functional/volume/v2/test_qos.py116
-rw-r--r--openstackclient/tests/functional/volume/v2/test_service.py44
-rw-r--r--openstackclient/tests/functional/volume/v2/test_transfer_request.py62
-rw-r--r--openstackclient/tests/functional/volume/v2/test_volume.py136
-rw-r--r--openstackclient/tests/functional/volume/v2/test_volume_backup.py29
-rw-r--r--openstackclient/tests/functional/volume/v2/test_volume_snapshot.py118
-rw-r--r--openstackclient/tests/functional/volume/v2/test_volume_type.py124
-rw-r--r--openstackclient/tests/functional/volume/v3/test_qos.py116
-rw-r--r--openstackclient/tests/functional/volume/v3/test_transfer_request.py66
-rw-r--r--openstackclient/tests/functional/volume/v3/test_volume.py130
-rw-r--r--openstackclient/tests/functional/volume/v3/test_volume_snapshot.py109
-rw-r--r--openstackclient/tests/functional/volume/v3/test_volume_type.py124
-rw-r--r--openstackclient/tests/unit/common/test_availability_zone.py7
-rw-r--r--openstackclient/tests/unit/common/test_configuration.py57
-rw-r--r--openstackclient/tests/unit/common/test_extension.py8
-rw-r--r--openstackclient/tests/unit/common/test_limits.py4
-rw-r--r--openstackclient/tests/unit/common/test_project_purge.py10
-rw-r--r--openstackclient/tests/unit/common/test_quota.py304
-rw-r--r--openstackclient/tests/unit/compute/v2/fakes.py620
-rw-r--r--openstackclient/tests/unit/compute/v2/test_flavor.py57
-rw-r--r--openstackclient/tests/unit/compute/v2/test_hypervisor.py230
-rw-r--r--openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py51
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server.py1015
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server_group.py285
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server_migration.py277
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server_volume.py186
-rw-r--r--openstackclient/tests/unit/compute/v2/test_usage.py45
-rw-r--r--openstackclient/tests/unit/fakes.py40
-rw-r--r--openstackclient/tests/unit/identity/v3/test_identity_provider.py170
-rw-r--r--openstackclient/tests/unit/identity/v3/test_trust.py108
-rw-r--r--openstackclient/tests/unit/image/v2/fakes.py160
-rw-r--r--openstackclient/tests/unit/image/v2/test_image.py644
-rw-r--r--openstackclient/tests/unit/image/v2/test_metadef_namespaces.py215
-rw-r--r--openstackclient/tests/unit/image/v2/test_task.py187
-rw-r--r--openstackclient/tests/unit/network/v2/fakes.py700
-rw-r--r--openstackclient/tests/unit/network/v2/test_floating_ip_network.py8
-rw-r--r--openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py2
-rw-r--r--openstackclient/tests/unit/network/v2/test_local_ip.py16
-rw-r--r--openstackclient/tests/unit/network/v2/test_local_ip_association.py2
-rw-r--r--openstackclient/tests/unit/network/v2/test_ndp_proxy.py454
-rw-r--r--openstackclient/tests/unit/network/v2/test_network.py5
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_agent.py53
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_flavor.py6
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_flavor_profile.py54
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_qos_rule.py327
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py34
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_rbac.py63
-rw-r--r--openstackclient/tests/unit/network/v2/test_network_trunk.py851
-rw-r--r--openstackclient/tests/unit/network/v2/test_port.py94
-rw-r--r--openstackclient/tests/unit/network/v2/test_router.py9
-rw-r--r--openstackclient/tests/unit/network/v2/test_subnet.py4
-rw-r--r--openstackclient/tests/unit/volume/v1/fakes.py1131
-rw-r--r--openstackclient/tests/unit/volume/v1/test_qos_specs.py44
-rw-r--r--openstackclient/tests/unit/volume/v1/test_service.py14
-rw-r--r--openstackclient/tests/unit/volume/v1/test_transfer_request.py47
-rw-r--r--openstackclient/tests/unit/volume/v1/test_type.py69
-rw-r--r--openstackclient/tests/unit/volume/v1/test_volume.py38
-rw-r--r--openstackclient/tests/unit/volume/v1/test_volume_backup.py91
-rw-r--r--openstackclient/tests/unit/volume/v2/fakes.py2183
-rw-r--r--openstackclient/tests/unit/volume/v2/test_backup_record.py20
-rw-r--r--openstackclient/tests/unit/volume/v2/test_consistency_group.py74
-rw-r--r--openstackclient/tests/unit/volume/v2/test_consistency_group_snapshot.py26
-rw-r--r--openstackclient/tests/unit/volume/v2/test_qos_specs.py26
-rw-r--r--openstackclient/tests/unit/volume/v2/test_service.py14
-rw-r--r--openstackclient/tests/unit/volume/v2/test_type.py94
-rw-r--r--openstackclient/tests/unit/volume/v2/test_volume.py170
-rw-r--r--openstackclient/tests/unit/volume/v2/test_volume_backend.py8
-rw-r--r--openstackclient/tests/unit/volume/v2/test_volume_backup.py112
-rw-r--r--openstackclient/tests/unit/volume/v2/test_volume_host.py14
-rw-r--r--openstackclient/tests/unit/volume/v2/test_volume_snapshot.py18
-rw-r--r--openstackclient/tests/unit/volume/v2/test_volume_transfer_request.py47
-rw-r--r--openstackclient/tests/unit/volume/v3/fakes.py766
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py178
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_cluster.py434
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py233
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_manage.py411
-rw-r--r--openstackclient/tests/unit/volume/v3/test_block_storage_resource_filter.py142
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume.py179
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume_attachment.py23
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume_group.py199
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py9
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume_group_type.py29
-rw-r--r--openstackclient/tests/unit/volume/v3/test_volume_message.py14
-rw-r--r--openstackclient/volume/v1/volume_backup.py19
-rw-r--r--openstackclient/volume/v1/volume_type.py2
-rw-r--r--openstackclient/volume/v2/consistency_group.py45
-rw-r--r--openstackclient/volume/v2/volume.py49
-rw-r--r--openstackclient/volume/v2/volume_backup.py58
-rw-r--r--openstackclient/volume/v2/volume_snapshot.py2
-rw-r--r--openstackclient/volume/v3/block_storage_cleanup.py146
-rw-r--r--openstackclient/volume/v3/block_storage_cluster.py281
-rw-r--r--openstackclient/volume/v3/block_storage_log_level.py147
-rw-r--r--openstackclient/volume/v3/block_storage_manage.py258
-rw-r--r--openstackclient/volume/v3/block_storage_resource_filter.py83
-rw-r--r--openstackclient/volume/v3/volume.py114
-rw-r--r--openstackclient/volume/v3/volume_attachment.py25
-rw-r--r--openstackclient/volume/v3/volume_group.py182
195 files changed, 18404 insertions, 7586 deletions
diff --git a/openstackclient/common/configuration.py b/openstackclient/common/configuration.py
index 49ef0e05..cb415505 100644
--- a/openstackclient/common/configuration.py
+++ b/openstackclient/common/configuration.py
@@ -45,7 +45,6 @@ class ShowConfiguration(command.ShowOne):
return parser
def take_action(self, parsed_args):
-
info = self.app.client_manager.get_configuration()
# Assume a default secret list in case we do not have an auth_plugin
@@ -63,4 +62,9 @@ class ShowConfiguration(command.ShowOne):
value = REDACTED
info['auth.' + key] = value
+ if parsed_args.mask:
+ for secret_opt in secret_opts:
+ if secret_opt in info:
+ info[secret_opt] = REDACTED
+
return zip(*sorted(info.items()))
diff --git a/openstackclient/common/progressbar.py b/openstackclient/common/progressbar.py
index ef767a9c..7678aceb 100644
--- a/openstackclient/common/progressbar.py
+++ b/openstackclient/common/progressbar.py
@@ -17,7 +17,7 @@ import sys
class _ProgressBarBase(object):
- """A progress bar provider for a wrapped obect.
+ """A progress bar provider for a wrapped object.
Base abstract class used by specific class wrapper to show
a progress bar when the wrapped object are consumed.
diff --git a/openstackclient/common/quota.py b/openstackclient/common/quota.py
index e096f186..246e44b3 100644
--- a/openstackclient/common/quota.py
+++ b/openstackclient/common/quota.py
@@ -15,6 +15,7 @@
"""Quota action implementations"""
+import argparse
import itertools
import logging
import sys
@@ -25,7 +26,6 @@ from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.network import common
-
LOG = logging.getLogger(__name__)
# List the quota items, map the internal argument name to the option
@@ -78,9 +78,17 @@ NETWORK_QUOTAS = {
'subnetpool': 'subnetpools',
}
-NETWORK_KEYS = ['floating_ips', 'networks', 'rbac_policies', 'routers',
- 'ports', 'security_group_rules', 'security_groups',
- 'subnet_pools', 'subnets']
+NETWORK_KEYS = [
+ 'floating_ips',
+ 'networks',
+ 'rbac_policies',
+ 'routers',
+ 'ports',
+ 'security_group_rules',
+ 'security_groups',
+ 'subnet_pools',
+ 'subnets',
+]
def _xform_get_quota(data, value, keys):
@@ -94,168 +102,158 @@ def _xform_get_quota(data, value, keys):
return res
-class BaseQuota(object):
- def _get_project(self, parsed_args):
- if parsed_args.project is not None:
- identity_client = self.app.client_manager.identity
- project = utils.find_resource(
- identity_client.projects,
- parsed_args.project,
- )
- project_id = project.id
- project_name = project.name
- elif self.app.client_manager.auth_ref:
- # Get the project from the current auth
- project = self.app.client_manager.auth_ref
- project_id = project.project_id
- project_name = project.project_name
+def get_project(app, project):
+ if project is not None:
+ identity_client = app.client_manager.identity
+ project = utils.find_resource(
+ identity_client.projects,
+ project,
+ )
+ project_id = project.id
+ project_name = project.name
+ elif app.client_manager.auth_ref:
+ # Get the project from the current auth
+ project = app.client_manager.auth_ref
+ project_id = project.project_id
+ project_name = project.project_name
+ else:
+ project_id = None
+ project_name = None
+
+ return {
+ 'id': project_id,
+ 'name': project_name,
+ }
+
+
+def get_compute_quotas(
+ app,
+ project_id,
+ *,
+ quota_class=False,
+ detail=False,
+ default=False,
+):
+ try:
+ client = app.client_manager.compute
+ if quota_class:
+ # NOTE(stephenfin): The 'project' argument here could be anything
+ # as the nova API doesn't care what you pass in. We only pass the
+ # project in to avoid weirding people out :)
+ quota = client.quota_classes.get(project_id)
+ elif default:
+ quota = client.quotas.defaults(project_id)
+ else:
+ quota = client.quotas.get(project_id, detail=detail)
+ except Exception as e:
+ if type(e).__name__ == 'EndpointNotFound':
+ return {}
+ raise
+ return quota._info
+
+
+def get_volume_quotas(
+ app,
+ project_id,
+ *,
+ quota_class=False,
+ detail=False,
+ default=False,
+):
+ try:
+ client = app.client_manager.volume
+ if quota_class:
+ quota = client.quota_classes.get(project_id)
+ elif default:
+ quota = client.quotas.defaults(project_id)
else:
- project = None
- project_id = None
- project_name = None
- project_info = {}
- project_info['id'] = project_id
- project_info['name'] = project_name
- return project_info
-
- def get_compute_quota(self, client, parsed_args):
- quota_class = (
- parsed_args.quota_class if 'quota_class' in parsed_args else False)
- detail = parsed_args.detail if 'detail' in parsed_args else False
- default = parsed_args.default if 'default' in parsed_args else False
- try:
- if quota_class:
- quota = client.quota_classes.get(parsed_args.project)
- else:
- project_info = self._get_project(parsed_args)
- project = project_info['id']
- if default:
- quota = client.quotas.defaults(project)
- else:
- quota = client.quotas.get(project, detail=detail)
- except Exception as e:
- if type(e).__name__ == 'EndpointNotFound':
- return {}
- else:
- raise
- return quota._info
-
- def get_volume_quota(self, client, parsed_args):
- quota_class = (
- parsed_args.quota_class if 'quota_class' in parsed_args else False)
- default = parsed_args.default if 'default' in parsed_args else False
- try:
- if quota_class:
- quota = client.quota_classes.get(parsed_args.project)
- else:
- project_info = self._get_project(parsed_args)
- project = project_info['id']
- if default:
- quota = client.quotas.defaults(project)
- else:
- quota = client.quotas.get(project)
- except Exception as e:
- if type(e).__name__ == 'EndpointNotFound':
- return {}
- else:
- raise
- return quota._info
-
- def _network_quota_to_dict(self, network_quota):
+ quota = client.quotas.get(project_id, usage=detail)
+ except Exception as e:
+ if type(e).__name__ == 'EndpointNotFound':
+ return {}
+ else:
+ raise
+ return quota._info
+
+
+def get_network_quotas(
+ app,
+ project_id,
+ *,
+ quota_class=False,
+ detail=False,
+ default=False,
+):
+ def _network_quota_to_dict(network_quota, detail=False):
if type(network_quota) is not dict:
dict_quota = network_quota.to_dict()
else:
dict_quota = network_quota
- return {k: v for k, v in dict_quota.items() if v is not None}
- def get_network_quota(self, parsed_args):
- quota_class = (
- parsed_args.quota_class if 'quota_class' in parsed_args else False)
- detail = parsed_args.detail if 'detail' in parsed_args else False
- default = parsed_args.default if 'default' in parsed_args else False
- if quota_class:
- return {}
- if self.app.client_manager.is_network_endpoint_enabled():
- project_info = self._get_project(parsed_args)
- project = project_info['id']
- client = self.app.client_manager.network
- if default:
- network_quota = client.get_quota_default(project)
- network_quota = self._network_quota_to_dict(network_quota)
- else:
- network_quota = client.get_quota(project,
- details=detail)
- network_quota = self._network_quota_to_dict(network_quota)
- if detail:
- # NOTE(slaweq): Neutron returns values with key "used" but
- # Nova for example returns same data with key "in_use"
- # instead.
- # Because of that we need to convert Neutron key to
- # the same as is returned from Nova to make result
- # more consistent
- for key, values in network_quota.items():
- if type(values) is dict and "used" in values:
- values[u'in_use'] = values.pop("used")
- network_quota[key] = values
- return network_quota
- else:
- return {}
+ result = {}
+ for key, values in dict_quota.items():
+ if values is None:
+ continue
-class ListQuota(command.Lister, BaseQuota):
- _description = _(
- "List quotas for all projects with non-default quota values or "
- "list detailed quota informations for requested project")
+ # NOTE(slaweq): Neutron returns values with key "used" but Nova for
+ # example returns same data with key "in_use" instead. Because of
+ # that we need to convert Neutron key to the same as is returned
+ # from Nova to make result more consistent
+ if isinstance(values, dict) and 'used' in values:
+ values['in_use'] = values.pop("used")
- def _get_detailed_quotas(self, parsed_args):
- columns = (
- 'resource',
- 'in_use',
- 'reserved',
- 'limit'
- )
- column_headers = (
- 'Resource',
- 'In Use',
- 'Reserved',
- 'Limit'
- )
- quotas = {}
- if parsed_args.compute:
- quotas.update(self.get_compute_quota(
- self.app.client_manager.compute, parsed_args))
- if parsed_args.network:
- quotas.update(self.get_network_quota(parsed_args))
+ result[key] = values
- result = []
- for resource, values in quotas.items():
- # NOTE(slaweq): there is no detailed quotas info for some resources
- # and it should't be displayed here
- if type(values) is dict:
- result.append({
- 'resource': resource,
- 'in_use': values.get('in_use'),
- 'reserved': values.get('reserved'),
- 'limit': values.get('limit')
- })
- return (column_headers,
- (utils.get_dict_properties(
- s, columns,
- ) for s in result))
+ return result
+
+ # neutron doesn't have the concept of quota classes and if we're using
+ # nova-network we already fetched this
+ if quota_class:
+ return {}
+
+ # we have nothing to return if we are not using neutron
+ if not app.client_manager.is_network_endpoint_enabled():
+ return {}
+
+ client = app.client_manager.network
+ if default:
+ network_quota = client.get_quota_default(project_id)
+ network_quota = _network_quota_to_dict(network_quota)
+ else:
+ network_quota = client.get_quota(project_id, details=detail)
+ network_quota = _network_quota_to_dict(network_quota, detail=detail)
+ return network_quota
+
+
+class ListQuota(command.Lister):
+ _description = _(
+ "List quotas for all projects with non-default quota values or "
+ "list detailed quota information for requested project"
+ )
def get_parser(self, prog_name):
- parser = super(ListQuota, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
+ # TODO(stephenfin): Remove in OSC 8.0
parser.add_argument(
'--project',
metavar='<project>',
- help=_('List quotas for this project <project> (name or ID)'),
+ help=_(
+ "**Deprecated** List quotas for this project <project> "
+ "(name or ID). "
+ "Use 'quota show' instead."
+ ),
)
+ # TODO(stephenfin): Remove in OSC 8.0
parser.add_argument(
'--detail',
dest='detail',
action='store_true',
default=False,
- help=_('Show details about quotas usage')
+ help=_(
+ "**Deprecated** Show details about quotas usage. "
+ "Use 'quota show --usage' instead."
+ ),
)
option = parser.add_mutually_exclusive_group(required=True)
option.add_argument(
@@ -278,7 +276,85 @@ class ListQuota(command.Lister, BaseQuota):
)
return parser
+ def _get_detailed_quotas(self, parsed_args):
+ project_info = get_project(self.app, parsed_args.project)
+ project = project_info['id']
+
+ quotas = {}
+
+ if parsed_args.compute:
+ quotas.update(
+ get_compute_quotas(
+ self.app,
+ project,
+ detail=parsed_args.detail,
+ )
+ )
+
+ if parsed_args.network:
+ quotas.update(
+ get_network_quotas(
+ self.app,
+ project,
+ detail=parsed_args.detail,
+ )
+ )
+
+ if parsed_args.volume:
+ quotas.update(
+ get_volume_quotas(
+ self.app,
+ parsed_args,
+ detail=parsed_args.detail,
+ ),
+ )
+
+ result = []
+ for resource, values in quotas.items():
+ # NOTE(slaweq): there is no detailed quotas info for some resources
+ # and it shouldn't be displayed here
+ if isinstance(values, dict):
+ result.append(
+ {
+ 'resource': resource,
+ 'in_use': values.get('in_use'),
+ 'reserved': values.get('reserved'),
+ 'limit': values.get('limit'),
+ }
+ )
+
+ columns = (
+ 'resource',
+ 'in_use',
+ 'reserved',
+ 'limit',
+ )
+ column_headers = (
+ 'Resource',
+ 'In Use',
+ 'Reserved',
+ 'Limit',
+ )
+
+ return (
+ column_headers,
+ (utils.get_dict_properties(s, columns) for s in result),
+ )
+
def take_action(self, parsed_args):
+ if parsed_args.detail:
+ msg = _(
+ "The --detail option has been deprecated. "
+ "Use 'openstack quota show --usage' instead."
+ )
+ self.log.warning(msg)
+ elif parsed_args.project: # elif to avoid being too noisy
+ msg = _(
+ "The --project option has been deprecated. "
+ "Use 'openstack quota show' instead."
+ )
+ self.log.warning(msg)
+
result = []
project_ids = []
if parsed_args.project is None:
@@ -295,14 +371,16 @@ class ListQuota(command.Lister, BaseQuota):
if parsed_args.compute:
if parsed_args.detail:
return self._get_detailed_quotas(parsed_args)
+
compute_client = self.app.client_manager.compute
for p in project_ids:
try:
data = compute_client.quotas.get(p)
except Exception as ex:
if (
- type(ex).__name__ == 'NotFound' or
- ex.http_status >= 400 and ex.http_status <= 499
+ type(ex).__name__ == 'NotFound'
+ or ex.http_status >= 400
+ and ex.http_status <= 499
):
# Project not found, move on to next one
LOG.warning("Project %s not found: %s" % (p, ex))
@@ -352,15 +430,15 @@ class ListQuota(command.Lister, BaseQuota):
'Server Groups',
'Server Group Members',
)
- return (column_headers,
- (utils.get_dict_properties(
- s, columns,
- ) for s in result))
+ return (
+ column_headers,
+ (utils.get_dict_properties(s, columns) for s in result),
+ )
if parsed_args.volume:
if parsed_args.detail:
- LOG.warning("Volume service doesn't provide detailed quota"
- " information")
+ return self._get_detailed_quotas(parsed_args)
+
volume_client = self.app.client_manager.volume
for p in project_ids:
try:
@@ -405,14 +483,16 @@ class ListQuota(command.Lister, BaseQuota):
'Snapshots',
'Volumes',
)
- return (column_headers,
- (utils.get_dict_properties(
- s, columns,
- ) for s in result))
+
+ return (
+ column_headers,
+ (utils.get_dict_properties(s, columns) for s in result),
+ )
if parsed_args.network:
if parsed_args.detail:
return self._get_detailed_quotas(parsed_args)
+
client = self.app.client_manager.network
for p in project_ids:
try:
@@ -461,12 +541,13 @@ class ListQuota(command.Lister, BaseQuota):
'Security Groups',
'Security Group Rules',
'Subnets',
- 'Subnet Pools'
+ 'Subnet Pools',
+ )
+
+ return (
+ column_headers,
+ (utils.get_dict_properties(s, columns) for s in result),
)
- return (column_headers,
- (utils.get_dict_properties(
- s, columns,
- ) for s in result))
return ((), ())
@@ -477,10 +558,13 @@ class SetQuota(common.NetDetectionMixin, command.Command):
def _build_options_list(self):
help_fmt = _('New value for the %s quota')
# Compute and volume quota options are always the same
- rets = [(k, v, help_fmt % v) for k, v in itertools.chain(
- COMPUTE_QUOTAS.items(),
- VOLUME_QUOTAS.items(),
- )]
+ rets = [
+ (k, v, help_fmt % v)
+ for k, v in itertools.chain(
+ COMPUTE_QUOTAS.items(),
+ VOLUME_QUOTAS.items(),
+ )
+ ]
# For docs build, we want to produce helps for both neutron and
# nova-network options. They overlap, so we have to figure out which
# need to be tagged as specific to one network type or the other.
@@ -497,10 +581,12 @@ class SetQuota(common.NetDetectionMixin, command.Command):
rets.append((k, v, _help))
elif self.is_neutron:
rets.extend(
- [(k, v, help_fmt % v) for k, v in NETWORK_QUOTAS.items()])
+ [(k, v, help_fmt % v) for k, v in NETWORK_QUOTAS.items()]
+ )
elif self.is_nova_network:
rets.extend(
- [(k, v, help_fmt % v) for k, v in NOVA_NETWORK_QUOTAS.items()])
+ [(k, v, help_fmt % v) for k, v in NOVA_NETWORK_QUOTAS.items()]
+ )
return rets
def get_parser(self, prog_name):
@@ -508,14 +594,20 @@ class SetQuota(common.NetDetectionMixin, command.Command):
parser.add_argument(
'project',
metavar='<project/class>',
- help=_('Set quotas for this project or class (name/ID)'),
+ help=_('Set quotas for this project or class (name or ID)'),
)
+ # TODO(stephenfin): Remove in OSC 8.0
parser.add_argument(
'--class',
dest='quota_class',
action='store_true',
default=False,
- help=_('Set quotas for <class>'),
+ help=_(
+ '**Deprecated** Set quotas for <class>. '
+ 'Deprecated as quota classes were never fully implemented '
+ 'and only the default class is supported. '
+ '(compute and volume only)'
+ ),
)
for k, v, h in self._build_options_list():
parser.add_argument(
@@ -530,21 +622,49 @@ class SetQuota(common.NetDetectionMixin, command.Command):
metavar='<volume-type>',
help=_('Set quotas for a specific <volume-type>'),
)
- parser.add_argument(
+ force_group = parser.add_mutually_exclusive_group()
+ force_group.add_argument(
'--force',
action='store_true',
- help=_('Force quota update (only supported by compute and '
- 'network)')
+ dest='force',
+ # TODO(stephenfin): Change the default to False in Z or later
+ default=None,
+ help=_(
+ 'Force quota update (only supported by compute and network) '
+ '(default for network)'
+ ),
)
- parser.add_argument(
+ force_group.add_argument(
+ '--no-force',
+ action='store_false',
+ dest='force',
+ default=None,
+ help=_(
+ 'Do not force quota update '
+ '(only supported by compute and network) '
+ '(default for compute)'
+ ),
+ )
+ # kept here for backwards compatibility/to keep the neutron folks happy
+ force_group.add_argument(
'--check-limit',
- action='store_true',
- help=_('Check quota limit when updating (only supported by '
- 'network)')
+ action='store_false',
+ dest='force',
+ default=None,
+ help=argparse.SUPPRESS,
)
return parser
def take_action(self, parsed_args):
+ if parsed_args.quota_class:
+ msg = _(
+ "The '--class' option has been deprecated. Quota classes were "
+ "never fully implemented and the compute and volume services "
+ "only support a single 'default' quota class while the "
+ "network service does not support quota classes at all. "
+ "Please use 'openstack quota show --default' instead."
+ )
+ self.log.warning(msg)
identity_client = self.app.client_manager.identity
compute_client = self.app.client_manager.compute
@@ -555,23 +675,33 @@ class SetQuota(common.NetDetectionMixin, command.Command):
if value is not None:
compute_kwargs[k] = value
- if parsed_args.force:
- compute_kwargs['force'] = True
+ if parsed_args.force is not None:
+ compute_kwargs['force'] = parsed_args.force
volume_kwargs = {}
for k, v in VOLUME_QUOTAS.items():
value = getattr(parsed_args, k, None)
if value is not None:
- if (parsed_args.volume_type and
- k in IMPACT_VOLUME_TYPE_QUOTAS):
+ if parsed_args.volume_type and k in IMPACT_VOLUME_TYPE_QUOTAS:
k = k + '_%s' % parsed_args.volume_type
volume_kwargs[k] = value
network_kwargs = {}
- if parsed_args.check_limit:
- network_kwargs['check_limit'] = True
- if parsed_args.force:
+ if parsed_args.force is True:
+ # Unlike compute, network doesn't provide a simple boolean option.
+ # Instead, it provides two options: 'force' and 'check_limit'
+ # (a.k.a. 'not force')
network_kwargs['force'] = True
+ elif parsed_args.force is False:
+ network_kwargs['check_limit'] = True
+ else:
+ msg = _(
+ "This command currently defaults to '--force' when modifying "
+ "network quotas. This behavior will change in a future "
+ "release. Consider explicitly providing '--force' or "
+ "'--no-force' options to avoid changes in behavior."
+ )
+ self.log.warning(msg)
if self.app.client_manager.is_network_endpoint_enabled():
for k, v in NETWORK_QUOTAS.items():
@@ -588,87 +718,170 @@ class SetQuota(common.NetDetectionMixin, command.Command):
if compute_kwargs:
compute_client.quota_classes.update(
parsed_args.project,
- **compute_kwargs)
+ **compute_kwargs,
+ )
if volume_kwargs:
volume_client.quota_classes.update(
parsed_args.project,
- **volume_kwargs)
+ **volume_kwargs,
+ )
if network_kwargs:
- sys.stderr.write("Network quotas are ignored since quota class"
- " is not supported.")
+ sys.stderr.write(
+ "Network quotas are ignored since quota classes are not "
+ "supported."
+ )
else:
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
).id
+
if compute_kwargs:
- compute_client.quotas.update(
- project,
- **compute_kwargs)
+ compute_client.quotas.update(project, **compute_kwargs)
if volume_kwargs:
- volume_client.quotas.update(
- project,
- **volume_kwargs)
+ volume_client.quotas.update(project, **volume_kwargs)
if (
- network_kwargs and
- self.app.client_manager.is_network_endpoint_enabled()
+ network_kwargs
+ and self.app.client_manager.is_network_endpoint_enabled()
):
network_client = self.app.client_manager.network
- network_client.update_quota(
- project,
- **network_kwargs)
+ network_client.update_quota(project, **network_kwargs)
-class ShowQuota(command.ShowOne, BaseQuota):
+class ShowQuota(command.Lister):
_description = _(
- "Show quotas for project or class. Specify "
- "``--os-compute-api-version 2.50`` or higher to see ``server-groups`` "
- "and ``server-group-members`` output for a given quota class.")
+ "Show quotas for project or class. "
+ "Specify ``--os-compute-api-version 2.50`` or higher to see "
+ "``server-groups`` and ``server-group-members`` output for a given "
+ "quota class."
+ )
def get_parser(self, prog_name):
- parser = super(ShowQuota, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
'project',
metavar='<project/class>',
nargs='?',
- help=_('Show quotas for this project or class (name or ID)'),
+ help=_(
+ 'Show quotas for this project or class (name or ID) '
+ '(defaults to current project)'
+ ),
)
type_group = parser.add_mutually_exclusive_group()
+ # TODO(stephenfin): Remove in OSC 8.0
type_group.add_argument(
'--class',
dest='quota_class',
action='store_true',
default=False,
- help=_('Show quotas for <class>'),
+ help=_(
+ '**Deprecated** Show quotas for <class>. '
+ 'Deprecated as quota classes were never fully implemented '
+ 'and only the default class is supported. '
+ 'Use --default instead which is also supported by the network '
+ 'service. '
+ '(compute and volume only)'
+ ),
)
type_group.add_argument(
'--default',
dest='default',
action='store_true',
default=False,
- help=_('Show default quotas for <project>')
+ help=_('Show default quotas for <project>'),
+ )
+ type_group.add_argument(
+ '--usage',
+ dest='usage',
+ action='store_true',
+ default=False,
+ help=_('Show details about quotas usage'),
+ )
+ service_group = parser.add_mutually_exclusive_group()
+ service_group.add_argument(
+ '--all',
+ action='store_const',
+ const='all',
+ dest='service',
+ default='all',
+ help=_('Show quotas for all services'),
+ )
+ service_group.add_argument(
+ '--compute',
+ action='store_const',
+ const='compute',
+ dest='service',
+ default='all',
+ help=_('Show compute quota'),
+ )
+ service_group.add_argument(
+ '--volume',
+ action='store_const',
+ const='volume',
+ dest='service',
+ default='all',
+ help=_('Show volume quota'),
+ )
+ service_group.add_argument(
+ '--network',
+ action='store_const',
+ const='network',
+ dest='service',
+ default='all',
+ help=_('Show network quota'),
)
+
return parser
def take_action(self, parsed_args):
+ project = parsed_args.project
- compute_client = self.app.client_manager.compute
- volume_client = self.app.client_manager.volume
- # NOTE(dtroyer): These quota API calls do not validate the project
- # or class arguments and return what appears to be
- # the default quota values if the project or class
- # does not exist. If this is determined to be the
- # intended behaviour of the API we will validate
- # the argument with Identity ourselves later.
- compute_quota_info = self.get_compute_quota(compute_client,
- parsed_args)
- volume_quota_info = self.get_volume_quota(volume_client,
- parsed_args)
- network_quota_info = self.get_network_quota(parsed_args)
- # NOTE(reedip): Remove the below check once requirement for
- # Openstack SDK is fixed to version 0.9.12 and above
- if type(network_quota_info) is not dict:
- network_quota_info = network_quota_info.to_dict()
+ if parsed_args.quota_class:
+ msg = _(
+ "The '--class' option has been deprecated. Quota classes were "
+ "never fully implemented and the compute and volume services "
+ "only support a single 'default' quota class while the "
+ "network service does not support quota classes at all. "
+ "Please use 'openstack quota show --default' instead."
+ )
+ self.log.warning(msg)
+ else:
+ project_info = get_project(self.app, parsed_args.project)
+ project = project_info['id']
+
+ compute_quota_info = {}
+ volume_quota_info = {}
+ network_quota_info = {}
+
+ # NOTE(stephenfin): These quota API calls do not validate the project
+ # or class arguments and return what appears to be the default quota
+ # values if the project or class does not exist. This is expected
+ # behavior. However, we have already checked for the presence of the
+ # project above so it shouldn't be an issue.
+ if parsed_args.service in {'all', 'compute'}:
+ compute_quota_info = get_compute_quotas(
+ self.app,
+ project,
+ detail=parsed_args.usage,
+ quota_class=parsed_args.quota_class,
+ default=parsed_args.default,
+ )
+ if parsed_args.service in {'all', 'volume'}:
+ volume_quota_info = get_volume_quotas(
+ self.app,
+ project,
+ detail=parsed_args.usage,
+ quota_class=parsed_args.quota_class,
+ default=parsed_args.default,
+ )
+ if parsed_args.service in {'all', 'network'}:
+ network_quota_info = get_network_quotas(
+ self.app,
+ project,
+ detail=parsed_args.usage,
+ quota_class=parsed_args.quota_class,
+ default=parsed_args.default,
+ )
info = {}
info.update(compute_quota_info)
@@ -681,19 +894,127 @@ class ShowQuota(command.ShowOne, BaseQuota):
# neutron is enabled, quotas of these three resources
# in nova will be replaced by neutron's.
for k, v in itertools.chain(
- COMPUTE_QUOTAS.items(), NOVA_NETWORK_QUOTAS.items(),
- VOLUME_QUOTAS.items(), NETWORK_QUOTAS.items()):
+ COMPUTE_QUOTAS.items(),
+ NOVA_NETWORK_QUOTAS.items(),
+ VOLUME_QUOTAS.items(),
+ NETWORK_QUOTAS.items(),
+ ):
if not k == v and info.get(k) is not None:
info[v] = info[k]
info.pop(k)
- # Handle project ID special as it only appears in output
+ # Remove the 'id' field since it's not very useful
if 'id' in info:
- info['project'] = info.pop('id')
- if 'project_id' in info:
- del info['project_id']
- project_info = self._get_project(parsed_args)
- project_name = project_info['name']
- info['project_name'] = project_name
-
- return zip(*sorted(info.items()))
+ del info['id']
+
+ # Remove the 'location' field for resources from openstacksdk
+ if 'location' in info:
+ del info['location']
+
+ if not parsed_args.usage:
+ result = [{'resource': k, 'limit': v} for k, v in info.items()]
+ else:
+ result = [{'resource': k, **v} for k, v in info.items()]
+
+ columns = (
+ 'resource',
+ 'limit',
+ )
+ column_headers = (
+ 'Resource',
+ 'Limit',
+ )
+
+ if parsed_args.usage:
+ columns += (
+ 'in_use',
+ 'reserved',
+ )
+ column_headers += (
+ 'In Use',
+ 'Reserved',
+ )
+
+ return (
+ column_headers,
+ (utils.get_dict_properties(s, columns) for s in result),
+ )
+
+
+class DeleteQuota(command.Command):
+ _description = _(
+ "Delete configured quota for a project and revert to defaults."
+ )
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'project',
+ metavar='<project>',
+ help=_('Delete quotas for this project (name or ID)'),
+ )
+ option = parser.add_mutually_exclusive_group()
+ option.add_argument(
+ '--all',
+ action='store_const',
+ const='all',
+ dest='service',
+ default='all',
+ help=_('Delete project quotas for all services (default)'),
+ )
+ option.add_argument(
+ '--compute',
+ action='store_const',
+ const='compute',
+ dest='service',
+ default='all',
+ help=_(
+ 'Delete compute quotas for the project '
+ '(including network quotas when using nova-network)'
+ ),
+ )
+ option.add_argument(
+ '--volume',
+ action='store_const',
+ const='volume',
+ dest='service',
+ default='all',
+ help=_('Delete volume quotas for the project'),
+ )
+ option.add_argument(
+ '--network',
+ action='store_const',
+ const='network',
+ dest='service',
+ default='all',
+ help=_('Delete network quotas for the project'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+ project = utils.find_resource(
+ identity_client.projects,
+ parsed_args.project,
+ )
+
+ # compute quotas
+ if parsed_args.service in {'all', 'compute'}:
+ compute_client = self.app.client_manager.compute
+ compute_client.quotas.delete(project.id)
+
+ # volume quotas
+ if parsed_args.service in {'all', 'volume'}:
+ volume_client = self.app.client_manager.volume
+ volume_client.quotas.delete(project.id)
+
+ # network quotas (but only if we're not using nova-network, otherwise
+ # we already deleted the quotas in the compute step)
+ if (
+ parsed_args.service in {'all', 'network'}
+ and self.app.client_manager.is_network_endpoint_enabled()
+ ):
+ network_client = self.app.client_manager.network
+ network_client.delete_quota(project.id)
+
+ return None
diff --git a/openstackclient/compute/v2/flavor.py b/openstackclient/compute/v2/flavor.py
index 8a9eb07a..bc8f758b 100644
--- a/openstackclient/compute/v2/flavor.py
+++ b/openstackclient/compute/v2/flavor.py
@@ -333,7 +333,7 @@ class ListFlavor(command.Lister):
# Even if server supports 2.61 some policy might stop it sending us
# extra_specs. So try to fetch them if they are absent
for f in data:
- if not f.extra_specs:
+ if parsed_args.long and not f.extra_specs:
compute_client.fetch_flavor_extra_specs(f)
columns = (
diff --git a/openstackclient/compute/v2/hypervisor.py b/openstackclient/compute/v2/hypervisor.py
index 5f7497b5..d4b4003b 100644
--- a/openstackclient/compute/v2/hypervisor.py
+++ b/openstackclient/compute/v2/hypervisor.py
@@ -18,8 +18,8 @@
import json
import re
-from novaclient import api_versions
from novaclient import exceptions as nova_exceptions
+from openstack import utils as sdk_utils
from osc_lib.cli import format_columns
from osc_lib.command import command
from osc_lib import exceptions
@@ -28,11 +28,44 @@ from osc_lib import utils
from openstackclient.i18n import _
+def _get_hypervisor_columns(item, client):
+ column_map = {'name': 'hypervisor_hostname'}
+ hidden_columns = ['location', 'servers']
+
+ if sdk_utils.supports_microversion(client, '2.88'):
+ hidden_columns.extend([
+ 'current_workload',
+ 'disk_available',
+ 'local_disk_free',
+ 'local_disk_size',
+ 'local_disk_used',
+ 'memory_free',
+ 'memory_size',
+ 'memory_used',
+ 'running_vms',
+ 'vcpus_used',
+ 'vcpus',
+ ])
+ else:
+ column_map.update({
+ 'disk_available': 'disk_available_least',
+ 'local_disk_free': 'free_disk_gb',
+ 'local_disk_size': 'local_gb',
+ 'local_disk_used': 'local_gb_used',
+ 'memory_free': 'free_ram_mb',
+ 'memory_used': 'memory_mb_used',
+ 'memory_size': 'memory_mb',
+ })
+
+ return utils.get_osc_show_columns_for_sdk_resource(
+ item, column_map, hidden_columns)
+
+
class ListHypervisor(command.Lister):
_description = _("List hypervisors")
def get_parser(self, prog_name):
- parser = super(ListHypervisor, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
'--matching',
metavar='<hostname>',
@@ -67,7 +100,7 @@ class ListHypervisor(command.Lister):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
list_opts = {}
@@ -78,7 +111,7 @@ class ListHypervisor(command.Lister):
raise exceptions.CommandError(msg)
if parsed_args.marker:
- if compute_client.api_version < api_versions.APIVersion('2.33'):
+ if not sdk_utils.supports_microversion(compute_client, '2.33'):
msg = _(
'--os-compute-api-version 2.33 or greater is required to '
'support the --marker option'
@@ -87,7 +120,7 @@ class ListHypervisor(command.Lister):
list_opts['marker'] = parsed_args.marker
if parsed_args.limit:
- if compute_client.api_version < api_versions.APIVersion('2.33'):
+ if not sdk_utils.supports_microversion(compute_client, '2.33'):
msg = _(
'--os-compute-api-version 2.33 or greater is required to '
'support the --limit option'
@@ -95,23 +128,43 @@ class ListHypervisor(command.Lister):
raise exceptions.CommandError(msg)
list_opts['limit'] = parsed_args.limit
- columns = (
+ column_headers = (
"ID",
"Hypervisor Hostname",
"Hypervisor Type",
"Host IP",
"State"
)
+ columns = (
+ 'id',
+ 'name',
+ 'hypervisor_type',
+ 'host_ip',
+ 'state'
+ )
if parsed_args.long:
- columns += ("vCPUs Used", "vCPUs", "Memory MB Used", "Memory MB")
+ if not sdk_utils.supports_microversion(compute_client, '2.88'):
+ column_headers += (
+ 'vCPUs Used',
+ 'vCPUs',
+ 'Memory MB Used',
+ 'Memory MB'
+ )
+ columns += (
+ 'vcpus_used',
+ 'vcpus',
+ 'memory_used',
+ 'memory_size'
+ )
if parsed_args.matching:
- data = compute_client.hypervisors.search(parsed_args.matching)
+ data = compute_client.find_hypervisor(
+ parsed_args.matching, ignore_missing=False)
else:
- data = compute_client.hypervisors.list(**list_opts)
+ data = compute_client.hypervisors(**list_opts, details=True)
return (
- columns,
+ column_headers,
(utils.get_item_properties(s, columns) for s in data),
)
@@ -120,7 +173,7 @@ class ShowHypervisor(command.ShowOne):
_description = _("Display hypervisor details")
def get_parser(self, prog_name):
- parser = super(ShowHypervisor, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"hypervisor",
metavar="<hypervisor>",
@@ -129,20 +182,25 @@ class ShowHypervisor(command.ShowOne):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
- hypervisor = utils.find_resource(compute_client.hypervisors,
- parsed_args.hypervisor)._info.copy()
+ compute_client = self.app.client_manager.sdk_connection.compute
+ hypervisor = compute_client.find_hypervisor(
+ parsed_args.hypervisor, ignore_missing=False).copy()
+
+ # Some of the properties in the hypervisor object need to be processed
+ # before they get reported to the user. We spend this section
+ # extracting the relevant details to be reported by modifying our
+ # copy of the hypervisor object.
+ aggregates = compute_client.aggregates()
+ hypervisor['aggregates'] = list()
+ service_details = hypervisor['service_details']
- aggregates = compute_client.aggregates.list()
- hypervisor["aggregates"] = list()
if aggregates:
# Hypervisors in nova cells are prefixed by "<cell>@"
- if "@" in hypervisor['service']['host']:
- cell, service_host = hypervisor['service']['host'].split(
- '@', 1)
+ if "@" in service_details['host']:
+ cell, service_host = service_details['host'].split('@', 1)
else:
cell = None
- service_host = hypervisor['service']['host']
+ service_host = service_details['host']
if cell:
# The host aggregates are also prefixed by "<cell>@"
@@ -154,42 +212,45 @@ class ShowHypervisor(command.ShowOne):
member_of = [aggregate.name
for aggregate in aggregates
if service_host in aggregate.hosts]
- hypervisor["aggregates"] = member_of
+ hypervisor['aggregates'] = member_of
try:
- uptime = compute_client.hypervisors.uptime(hypervisor['id'])._info
+ if sdk_utils.supports_microversion(compute_client, '2.88'):
+ uptime = hypervisor['uptime'] or ''
+ del hypervisor['uptime']
+ else:
+ del hypervisor['uptime']
+ uptime = compute_client.get_hypervisor_uptime(
+ hypervisor['id'])['uptime']
# Extract data from uptime value
# format: 0 up 0, 0 users, load average: 0, 0, 0
# example: 17:37:14 up 2:33, 3 users,
# load average: 0.33, 0.36, 0.34
m = re.match(
r"\s*(.+)\sup\s+(.+),\s+(.+)\susers?,\s+load average:\s(.+)",
- uptime['uptime'])
+ uptime)
if m:
- hypervisor["host_time"] = m.group(1)
- hypervisor["uptime"] = m.group(2)
- hypervisor["users"] = m.group(3)
- hypervisor["load_average"] = m.group(4)
+ hypervisor['host_time'] = m.group(1)
+ hypervisor['uptime'] = m.group(2)
+ hypervisor['users'] = m.group(3)
+ hypervisor['load_average'] = m.group(4)
except nova_exceptions.HTTPNotImplemented:
pass
- hypervisor["service_id"] = hypervisor["service"]["id"]
- hypervisor["service_host"] = hypervisor["service"]["host"]
- del hypervisor["service"]
+ hypervisor['service_id'] = service_details['id']
+ hypervisor['service_host'] = service_details['host']
+ del hypervisor['service_details']
- if compute_client.api_version < api_versions.APIVersion('2.28'):
+ if not sdk_utils.supports_microversion(compute_client, '2.28'):
# microversion 2.28 transformed this to a JSON blob rather than a
# string; on earlier fields, do this manually
- if hypervisor['cpu_info']:
- hypervisor['cpu_info'] = json.loads(hypervisor['cpu_info'])
- else:
- hypervisor['cpu_info'] = {}
-
- columns = tuple(sorted(hypervisor))
+ hypervisor['cpu_info'] = json.loads(hypervisor['cpu_info'] or '{}')
+ display_columns, columns = _get_hypervisor_columns(
+ hypervisor, compute_client)
data = utils.get_dict_properties(
hypervisor, columns,
formatters={
'cpu_info': format_columns.DictColumn,
})
- return (columns, data)
+ return display_columns, data
diff --git a/openstackclient/compute/v2/hypervisor_stats.py b/openstackclient/compute/v2/hypervisor_stats.py
index 4493e080..cb63a800 100644
--- a/openstackclient/compute/v2/hypervisor_stats.py
+++ b/openstackclient/compute/v2/hypervisor_stats.py
@@ -11,19 +11,49 @@
# under the License.
#
-
"""Hypervisor Stats action implementations"""
from osc_lib.command import command
+from osc_lib import utils
from openstackclient.i18n import _
+def _get_hypervisor_stat_columns(item):
+ column_map = {
+ # NOTE(gtema): If we decide to use SDK names - empty this
+ 'disk_available': 'disk_available_least',
+ 'local_disk_free': 'free_disk_gb',
+ 'local_disk_size': 'local_gb',
+ 'local_disk_used': 'local_gb_used',
+ 'memory_free': 'free_ram_mb',
+ 'memory_size': 'memory_mb',
+ 'memory_used': 'memory_mb_used',
+
+ }
+ hidden_columns = ['id', 'links', 'location', 'name']
+ return utils.get_osc_show_columns_for_sdk_resource(
+ item, column_map, hidden_columns)
+
+
class ShowHypervisorStats(command.ShowOne):
_description = _("Display hypervisor stats details")
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
- hypervisor_stats = compute_client.hypervisors.statistics().to_dict()
-
- return zip(*sorted(hypervisor_stats.items()))
+ # The command is deprecated since it is being dropped in Nova.
+ self.log.warning(
+ _("This command is deprecated.")
+ )
+ compute_client = self.app.client_manager.sdk_connection.compute
+ # We do API request directly cause this deprecated method is not and
+ # will not be supported by OpenStackSDK.
+ response = compute_client.get(
+ '/os-hypervisors/statistics',
+ microversion='2.1')
+ hypervisor_stats = response.json().get('hypervisor_statistics')
+
+ display_columns, columns = _get_hypervisor_stat_columns(
+ hypervisor_stats)
+ data = utils.get_dict_properties(
+ hypervisor_stats, columns)
+ return (display_columns, data)
diff --git a/openstackclient/compute/v2/server.py b/openstackclient/compute/v2/server.py
index 69aaa3c5..e2d8112d 100644
--- a/openstackclient/compute/v2/server.py
+++ b/openstackclient/compute/v2/server.py
@@ -66,6 +66,32 @@ class PowerStateColumn(cliff_columns.FormattableColumn):
return 'N/A'
+class AddressesColumn(cliff_columns.FormattableColumn):
+ """Generate a formatted string of a server's addresses."""
+
+ def human_readable(self):
+ try:
+ return utils.format_dict_of_list({
+ k: [i['addr'] for i in v if 'addr' in i]
+ for k, v in self._value.items()})
+ except Exception:
+ return 'N/A'
+
+ def machine_readable(self):
+ return {k: [i['addr'] for i in v if 'addr' in i]
+ for k, v in self._value.items()}
+
+
+class HostColumn(cliff_columns.FormattableColumn):
+ """Generate a formatted string of a hostname."""
+
+ def human_readable(self):
+ if self._value is None:
+ return ''
+
+ return self._value
+
+
def _get_ip_address(addresses, address_type, ip_address_family):
# Old style addresses
if address_type in addresses:
@@ -111,14 +137,61 @@ def _prep_server_detail(compute_client, image_client, server, refresh=True):
the latest details of a server after creating it.
:rtype: a dict of server details
"""
+ # Note: Some callers of this routine pass a novaclient server, and others
+ # pass an SDK server. Column names may be different across those cases.
info = server.to_dict()
if refresh:
server = utils.find_resource(compute_client.servers, info['id'])
info.update(server.to_dict())
+ # Some commands using this routine were originally implemented with the
+ # nova python wrappers, and were later migrated to use the SDK. Map the
+ # SDK's property names to the original property names to maintain backward
+ # compatibility for existing users. Data is duplicated under both the old
+ # and new name so users can consume the data by either name.
+ column_map = {
+ 'access_ipv4': 'accessIPv4',
+ 'access_ipv6': 'accessIPv6',
+ 'admin_password': 'adminPass',
+ 'admin_password': 'adminPass',
+ 'volumes': 'os-extended-volumes:volumes_attached',
+ 'availability_zone': 'OS-EXT-AZ:availability_zone',
+ 'block_device_mapping': 'block_device_mapping_v2',
+ 'compute_host': 'OS-EXT-SRV-ATTR:host',
+ 'created_at': 'created',
+ 'disk_config': 'OS-DCF:diskConfig',
+ 'flavor_id': 'flavorRef',
+ 'has_config_drive': 'config_drive',
+ 'host_id': 'hostId',
+ 'fault': 'fault',
+ 'hostname': 'OS-EXT-SRV-ATTR:hostname',
+ 'hypervisor_hostname': 'OS-EXT-SRV-ATTR:hypervisor_hostname',
+ 'image_id': 'imageRef',
+ 'instance_name': 'OS-EXT-SRV-ATTR:instance_name',
+ 'is_locked': 'locked',
+ 'kernel_id': 'OS-EXT-SRV-ATTR:kernel_id',
+ 'launch_index': 'OS-EXT-SRV-ATTR:launch_index',
+ 'launched_at': 'OS-SRV-USG:launched_at',
+ 'power_state': 'OS-EXT-STS:power_state',
+ 'project_id': 'tenant_id',
+ 'ramdisk_id': 'OS-EXT-SRV-ATTR:ramdisk_id',
+ 'reservation_id': 'OS-EXT-SRV-ATTR:reservation_id',
+ 'root_device_name': 'OS-EXT-SRV-ATTR:root_device_name',
+ 'scheduler_hints': 'OS-SCH-HNT:scheduler_hints',
+ 'task_state': 'OS-EXT-STS:task_state',
+ 'terminated_at': 'OS-SRV-USG:terminated_at',
+ 'updated_at': 'updated',
+ 'user_data': 'OS-EXT-SRV-ATTR:user_data',
+ 'vm_state': 'OS-EXT-STS:vm_state',
+ }
+
+ info.update({
+ column_map[column]: data for column, data in info.items()
+ if column in column_map})
+
# Convert the image blob to a name
image_info = info.get('image', {})
- if image_info:
+ if image_info and any(image_info.values()):
image_id = image_info.get('id', '')
try:
image = image_client.get_image(image_id)
@@ -166,7 +239,9 @@ def _prep_server_detail(compute_client, image_client, server, refresh=True):
# NOTE(dtroyer): novaclient splits these into separate entries...
# Format addresses in a useful way
- info['addresses'] = format_columns.DictListColumn(server.networks)
+ info['addresses'] = (
+ AddressesColumn(info['addresses']) if 'addresses' in info
+ else format_columns.DictListColumn(info.get('networks')))
# Map 'metadata' field to 'properties'
info['properties'] = format_columns.DictColumn(info.pop('metadata'))
@@ -268,9 +343,11 @@ class AddFixedIP(command.ShowOne):
return ((), ())
kwargs = {
- 'net_id': net_id,
- 'fixed_ip': parsed_args.fixed_ip_address,
+ 'net_id': net_id
}
+ if parsed_args.fixed_ip_address:
+ kwargs['fixed_ips'] = [
+ {"ip_address": parsed_args.fixed_ip_address}]
if parsed_args.tag:
kwargs['tag'] = parsed_args.tag
@@ -409,8 +486,8 @@ class AddPort(command.Command):
'--tag',
metavar='<tag>',
help=_(
- "Tag for the attached interface. "
- "(Supported by API versions '2.49' - '2.latest')"
+ 'Tag for the attached interface '
+ '(supported by --os-compute-api-version 2.49 or later)'
)
)
return parser
@@ -429,8 +506,7 @@ class AddPort(command.Command):
port_id = parsed_args.port
kwargs = {
- 'port_id': port_id,
- 'fixed_ip': None,
+ 'port_id': port_id
}
if parsed_args.tag:
@@ -484,8 +560,7 @@ class AddNetwork(command.Command):
net_id = parsed_args.network
kwargs = {
- 'net_id': net_id,
- 'fixed_ip': None,
+ 'net_id': net_id
}
if parsed_args.tag:
@@ -652,29 +727,68 @@ class AddServerVolume(command.ShowOne):
)
-# TODO(stephenfin): Replace with 'MultiKeyValueAction' when we no longer
-# support '--nic=auto' and '--nic=none'
+class NoneNICAction(argparse.Action):
+
+ def __init__(self, option_strings, dest, help=None):
+ super().__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=0,
+ default=[],
+ required=False,
+ help=help,
+ )
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ # Make sure we have an empty dict rather than None
+ if getattr(namespace, self.dest, None) is None:
+ setattr(namespace, self.dest, [])
+
+ getattr(namespace, self.dest).append('none')
+
+
+class AutoNICAction(argparse.Action):
+
+ def __init__(self, option_strings, dest, help=None):
+ super().__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=0,
+ default=[],
+ required=False,
+ help=help,
+ )
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ # Make sure we have an empty dict rather than None
+ if getattr(namespace, self.dest, None) is None:
+ setattr(namespace, self.dest, [])
+
+ getattr(namespace, self.dest).append('auto')
+
+
class NICAction(argparse.Action):
def __init__(
self,
option_strings,
dest,
- nargs=None,
- const=None,
- default=None,
- type=None,
- choices=None,
- required=False,
help=None,
metavar=None,
key=None,
):
self.key = key
super().__init__(
- option_strings=option_strings, dest=dest, nargs=nargs, const=const,
- default=default, type=type, choices=choices, required=required,
- help=help, metavar=metavar,
+ option_strings=option_strings,
+ dest=dest,
+ nargs=None,
+ const=None,
+ default=[],
+ type=None,
+ choices=None,
+ required=False,
+ help=help,
+ metavar=metavar,
)
def __call__(self, parser, namespace, values, option_string=None):
@@ -682,11 +796,6 @@ class NICAction(argparse.Action):
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
- # Handle the special auto/none cases
- if values in ('auto', 'none'):
- getattr(namespace, self.dest).append(values)
- return
-
if self.key:
if ',' in values or '=' in values:
msg = _(
@@ -696,6 +805,12 @@ class NICAction(argparse.Action):
raise argparse.ArgumentTypeError(msg % values)
values = '='.join([self.key, values])
+ else:
+ # Handle the special auto/none cases but only when a key isn't set
+ # (otherwise those could be valid values for the key)
+ if values in ('auto', 'none'):
+ getattr(namespace, self.dest).append(values)
+ return
# We don't include 'tag' here by default since that requires a
# particular microversion
@@ -707,7 +822,7 @@ class NICAction(argparse.Action):
}
for kv_str in values.split(','):
- k, sep, v = kv_str.partition("=")
+ k, sep, v = kv_str.partition('=')
if k not in list(info) + ['tag'] or not v:
msg = _(
@@ -851,9 +966,7 @@ class CreateServer(command.ShowOne):
required=True,
help=_('Create server with this flavor (name or ID)'),
)
- disk_group = parser.add_mutually_exclusive_group(
- required=True,
- )
+ disk_group = parser.add_mutually_exclusive_group()
disk_group.add_argument(
'--image',
metavar='<image>',
@@ -998,28 +1111,23 @@ class CreateServer(command.ShowOne):
)
parser.add_argument(
'--network',
- metavar="<network>",
+ metavar='<network>',
dest='nics',
- default=[],
action=NICAction,
key='net-id',
- # NOTE(RuiChen): Add '\n' to the end of line to improve formatting;
- # see cliff's _SmartHelpFormatter for more details.
help=_(
"Create a NIC on the server and connect it to network. "
"Specify option multiple times to create multiple NICs. "
"This is a wrapper for the '--nic net-id=<network>' "
"parameter that provides simple syntax for the standard "
"use case of connecting a new server to a given network. "
- "For more advanced use cases, refer to the '--nic' "
- "parameter."
+ "For more advanced use cases, refer to the '--nic' parameter."
),
)
parser.add_argument(
'--port',
- metavar="<port>",
+ metavar='<port>',
dest='nics',
- default=[],
action=NICAction,
key='port-id',
help=_(
@@ -1032,12 +1140,40 @@ class CreateServer(command.ShowOne):
),
)
parser.add_argument(
+ '--no-network',
+ dest='nics',
+ action=NoneNICAction,
+ help=_(
+ "Do not attach a network to the server. "
+ "This is a wrapper for the '--nic none' option that provides "
+ "a simple syntax for disabling network connectivity for a new "
+ "server. "
+ "For more advanced use cases, refer to the '--nic' parameter. "
+ "(supported by --os-compute-api-version 2.37 or above)"
+ ),
+ )
+ parser.add_argument(
+ '--auto-network',
+ dest='nics',
+ action=AutoNICAction,
+ help=_(
+ "Automatically allocate a network to the server. "
+ "This is the default network allocation policy. "
+ "This is a wrapper for the '--nic auto' option that provides "
+ "a simple syntax for enabling automatic configuration of "
+ "network connectivity for a new server. "
+ "For more advanced use cases, refer to the '--nic' parameter. "
+ "(supported by --os-compute-api-version 2.37 or above)"
+ ),
+ )
+ parser.add_argument(
'--nic',
metavar="<net-id=net-uuid,port-id=port-uuid,v4-fixed-ip=ip-addr,"
"v6-fixed-ip=ip-addr,tag=tag,auto,none>",
- action=NICAction,
dest='nics',
- default=[],
+ action=NICAction,
+ # NOTE(RuiChen): Add '\n' to the end of line to improve formatting;
+ # see cliff's _SmartHelpFormatter for more details.
help=_(
"Create a NIC on the server.\n"
"NIC in the format:\n"
@@ -1388,14 +1524,14 @@ class CreateServer(command.ShowOne):
if volume:
block_device_mapping_v2 = [{
'uuid': volume,
- 'boot_index': '0',
+ 'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume'
}]
elif snapshot:
block_device_mapping_v2 = [{
'uuid': snapshot,
- 'boot_index': '0',
+ 'boot_index': 0,
'source_type': 'snapshot',
'destination_type': 'volume',
'delete_on_termination': False
@@ -1404,7 +1540,7 @@ class CreateServer(command.ShowOne):
# Tell nova to create a root volume from the image provided.
block_device_mapping_v2 = [{
'uuid': image.id,
- 'boot_index': '0',
+ 'boot_index': 0,
'source_type': 'image',
'destination_type': 'volume',
'volume_size': parsed_args.boot_from_volume
@@ -1541,6 +1677,15 @@ class CreateServer(command.ShowOne):
block_device_mapping_v2.append(mapping)
+ if not image and not any(
+ [bdm.get('boot_index') == 0 for bdm in block_device_mapping_v2]
+ ):
+ msg = _(
+ 'An image (--image, --image-property) or bootable volume '
+ '(--volume, --snapshot, --block-device) is required'
+ )
+ raise exceptions.CommandError(msg)
+
nics = parsed_args.nics
if 'auto' in nics or 'none' in nics:
@@ -1552,6 +1697,14 @@ class CreateServer(command.ShowOne):
)
raise exceptions.CommandError(msg)
+ if compute_client.api_version < api_versions.APIVersion('2.37'):
+ msg = _(
+ '--os-compute-api-version 2.37 or greater is '
+ 'required to support explicit auto-allocation of a '
+ 'network or to disable network allocation'
+ )
+ raise exceptions.CommandError(msg)
+
nics = nics[0]
else:
for nic in nics:
@@ -1756,8 +1909,9 @@ class CreateServerDump(command.Command):
Trigger crash dump in server(s) with features like kdump in Linux.
It will create a dump file in the server(s) dumping the server(s)'
- memory, and also crash the server(s). OSC sees the dump file
- (server dump) as a kind of resource.
+ memory, and also crash the server(s). This is contingent on guest operating
+ system support, and the location of the dump file inside the guest will
+ depend on the exact guest operating system.
This command requires ``--os-compute-api-version`` 2.17 or greater.
"""
@@ -1773,12 +1927,10 @@ class CreateServerDump(command.Command):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
- for server in parsed_args.server:
- utils.find_resource(
- compute_client.servers,
- server,
- ).trigger_crash_dump()
+ compute_client = self.app.client_manager.sdk_connection.compute
+ for name_or_id in parsed_args.server:
+ server = compute_client.find_server(name_or_id)
+ server.trigger_crash_dump(compute_client)
class DeleteServer(command.Command):
@@ -2216,7 +2368,7 @@ class ListServer(command.Lister):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
identity_client = self.app.client_manager.identity
image_client = self.app.client_manager.image
@@ -2241,10 +2393,11 @@ class ListServer(command.Lister):
# flavor name is given, map it to ID.
flavor_id = None
if parsed_args.flavor:
- flavor_id = utils.find_resource(
- compute_client.flavors,
- parsed_args.flavor,
- ).id
+ flavor = compute_client.find_flavor(parsed_args.flavor)
+ if flavor is None:
+ msg = _('Unable to find flavor: %s') % parsed_args.flavor
+ raise exceptions.CommandError(msg)
+ flavor_id = flavor.id
# Nova only supports list servers searching by image ID. So if a
# image name is given, map it to ID.
@@ -2260,19 +2413,21 @@ class ListServer(command.Lister):
'ip': parsed_args.ip,
'ip6': parsed_args.ip6,
'name': parsed_args.name,
- 'instance_name': parsed_args.instance_name,
'status': parsed_args.status,
'flavor': flavor_id,
'image': image_id,
'host': parsed_args.host,
- 'tenant_id': project_id,
- 'all_tenants': parsed_args.all_projects,
+ 'project_id': project_id,
+ 'all_projects': parsed_args.all_projects,
'user_id': user_id,
'deleted': parsed_args.deleted,
'changes-before': parsed_args.changes_before,
'changes-since': parsed_args.changes_since,
}
+ if parsed_args.instance_name is not None:
+ search_opts['instance_name'] = parsed_args.instance_name
+
if parsed_args.availability_zone:
search_opts['availability_zone'] = parsed_args.availability_zone
@@ -2304,7 +2459,7 @@ class ListServer(command.Lister):
search_opts['power_state'] = power_state
if parsed_args.tags:
- if compute_client.api_version < api_versions.APIVersion('2.26'):
+ if not sdk_utils.supports_microversion(compute_client, '2.26'):
msg = _(
'--os-compute-api-version 2.26 or greater is required to '
'support the --tag option'
@@ -2314,7 +2469,7 @@ class ListServer(command.Lister):
search_opts['tags'] = ','.join(parsed_args.tags)
if parsed_args.not_tags:
- if compute_client.api_version < api_versions.APIVersion('2.26'):
+ if not sdk_utils.supports_microversion(compute_client, '2.26'):
msg = _(
'--os-compute-api-version 2.26 or greater is required to '
'support the --not-tag option'
@@ -2324,7 +2479,7 @@ class ListServer(command.Lister):
search_opts['not-tags'] = ','.join(parsed_args.not_tags)
if parsed_args.locked:
- if compute_client.api_version < api_versions.APIVersion('2.73'):
+ if not sdk_utils.supports_microversion(compute_client, '2.73'):
msg = _(
'--os-compute-api-version 2.73 or greater is required to '
'support the --locked option'
@@ -2333,7 +2488,7 @@ class ListServer(command.Lister):
search_opts['locked'] = True
elif parsed_args.unlocked:
- if compute_client.api_version < api_versions.APIVersion('2.73'):
+ if not sdk_utils.supports_microversion(compute_client, '2.73'):
msg = _(
'--os-compute-api-version 2.73 or greater is required to '
'support the --unlocked option'
@@ -2342,10 +2497,14 @@ class ListServer(command.Lister):
search_opts['locked'] = False
+ if parsed_args.limit is not None:
+ search_opts['limit'] = parsed_args.limit
+ search_opts['paginated'] = False
+
LOG.debug('search options: %s', search_opts)
if search_opts['changes-before']:
- if compute_client.api_version < api_versions.APIVersion('2.66'):
+ if not sdk_utils.supports_microversion(compute_client, '2.66'):
msg = _('--os-compute-api-version 2.66 or later is required')
raise exceptions.CommandError(msg)
@@ -2379,15 +2538,15 @@ class ListServer(command.Lister):
if parsed_args.long:
columns += (
- 'OS-EXT-STS:task_state',
- 'OS-EXT-STS:power_state',
+ 'task_state',
+ 'power_state',
)
column_headers += (
'Task State',
'Power State',
)
- columns += ('networks',)
+ columns += ('addresses',)
column_headers += ('Networks',)
if parsed_args.long:
@@ -2409,7 +2568,7 @@ class ListServer(command.Lister):
# microversion 2.47 puts the embedded flavor into the server response
# body but omits the id, so if not present we just expose the original
# flavor name in the output
- if compute_client.api_version >= api_versions.APIVersion('2.47'):
+ if sdk_utils.supports_microversion(compute_client, '2.47'):
columns += ('flavor_name',)
column_headers += ('Flavor',)
else:
@@ -2431,8 +2590,8 @@ class ListServer(command.Lister):
if parsed_args.long:
columns += (
- 'OS-EXT-AZ:availability_zone',
- 'OS-EXT-SRV-ATTR:host',
+ 'availability_zone',
+ 'hypervisor_hostname',
'metadata',
)
column_headers += (
@@ -2441,29 +2600,48 @@ class ListServer(command.Lister):
'Properties',
)
- marker_id = None
-
# support for additional columns
if parsed_args.columns:
for c in parsed_args.columns:
if c in ('Project ID', 'project_id'):
- columns += ('tenant_id',)
+ columns += ('project_id',)
column_headers += ('Project ID',)
if c in ('User ID', 'user_id'):
columns += ('user_id',)
column_headers += ('User ID',)
if c in ('Created At', 'created_at'):
- columns += ('created',)
+ columns += ('created_at',)
column_headers += ('Created At',)
if c in ('Security Groups', 'security_groups'):
columns += ('security_groups_name',)
column_headers += ('Security Groups',)
-
- # convert back to tuple
- column_headers = tuple(column_headers)
- columns = tuple(columns)
-
- if parsed_args.marker:
+ if c in ("Task State", "task_state"):
+ columns += ('task_state',)
+ column_headers += ('Task State',)
+ if c in ("Power State", "power_state"):
+ columns += ('power_state',)
+ column_headers += ('Power State',)
+ if c in ("Image ID", "image_id"):
+ columns += ('Image ID',)
+ column_headers += ('Image ID',)
+ if c in ("Flavor ID", "flavor_id"):
+ columns += ('flavor_id',)
+ column_headers += ('Flavor ID',)
+ if c in ('Availability Zone', "availability_zone"):
+ columns += ('availability_zone',)
+ column_headers += ('Availability Zone',)
+ if c in ('Host', "host"):
+ columns += ('hypervisor_hostname',)
+ column_headers += ('Host',)
+ if c in ('Properties', "properties"):
+ columns += ('Metadata',)
+ column_headers += ('Properties',)
+
+ # remove duplicates
+ column_headers = tuple(dict.fromkeys(column_headers))
+ columns = tuple(dict.fromkeys(columns))
+
+ if parsed_args.marker is not None:
# Check if both "--marker" and "--deleted" are used.
# In that scenario a lookup is not needed as the marker
# needs to be an ID, because find_resource does not
@@ -2471,36 +2649,47 @@ class ListServer(command.Lister):
if parsed_args.deleted:
marker_id = parsed_args.marker
else:
- marker_id = utils.find_resource(
- compute_client.servers,
- parsed_args.marker,
- ).id
+ marker_id = compute_client.find_server(parsed_args.marker).id
+ search_opts['marker'] = marker_id
- data = compute_client.servers.list(
- search_opts=search_opts,
- marker=marker_id,
- limit=parsed_args.limit)
+ data = list(compute_client.servers(**search_opts))
images = {}
flavors = {}
if data and not parsed_args.no_name_lookup:
+ # partial responses from down cells will not have an image
+ # attribute so we use getattr
+ image_ids = {
+ s.image['id'] for s in data
+ if getattr(s, 'image', None) and s.image.get('id')
+ }
+
# create a dict that maps image_id to image object, which is used
# to display the "Image Name" column. Note that 'image.id' can be
# empty for BFV instances and 'image' can be missing entirely if
# there are infra failures
if parsed_args.name_lookup_one_by_one or image_id:
- for i_id in set(
- s.image['id'] for s in data
- if s.image and s.image.get('id')
- ):
+ for image_id in image_ids:
# "Image Name" is not crucial, so we swallow any exceptions
try:
- images[i_id] = image_client.get_image(i_id)
+ images[image_id] = image_client.get_image(image_id)
except Exception:
pass
else:
try:
- images_list = image_client.images()
+ # some deployments can have *loads* of images so we only
+ # want to list the ones we care about. It would be better
+ # to only retrun the *fields* we care about (name) but
+ # glance doesn't support that
+ # NOTE(stephenfin): This could result in super long URLs
+ # but it seems unlikely to cause issues. Apache supports
+ # URL lengths of up to 8190 characters by default, which
+ # should allow for more than 220 unique image ID (different
+ # servers are likely use the same image ID) in the filter.
+ # Who'd need more than that in a single command?
+ images_list = image_client.images(
+ id=f"in:{','.join(image_ids)}"
+ )
for i in images_list:
images[i.id] = i
except Exception:
@@ -2518,12 +2707,12 @@ class ListServer(command.Lister):
# "Flavor Name" is not crucial, so we swallow any
# exceptions
try:
- flavors[f_id] = compute_client.flavors.get(f_id)
+ flavors[f_id] = compute_client.find_flavor(f_id)
except Exception:
pass
else:
try:
- flavors_list = compute_client.flavors.list(is_public=None)
+ flavors_list = compute_client.flavors(is_public=None)
for i in flavors_list:
flavors[i.id] = i
except Exception:
@@ -2532,16 +2721,16 @@ class ListServer(command.Lister):
# Populate image_name, image_id, flavor_name and flavor_id attributes
# of server objects so that we can display those columns.
for s in data:
- if compute_client.api_version >= api_versions.APIVersion('2.69'):
+ if sdk_utils.supports_microversion(compute_client, '2.69'):
# NOTE(tssurya): From 2.69, we will have the keys 'flavor'
# and 'image' missing in the server response during
# infrastructure failure situations.
# For those servers with partial constructs we just skip the
- # processing of the image and flavor informations.
+ # processing of the image and flavor information.
if not hasattr(s, 'image') or not hasattr(s, 'flavor'):
continue
- if 'id' in s.image:
+ if 'id' in s.image and s.image.id is not None:
image = images.get(s.image['id'])
if image:
s.image_name = image.name
@@ -2554,7 +2743,7 @@ class ListServer(command.Lister):
s.image_name = IMAGE_STRING_FOR_BFV
s.image_id = IMAGE_STRING_FOR_BFV
- if compute_client.api_version < api_versions.APIVersion('2.47'):
+ if not sdk_utils.supports_microversion(compute_client, '2.47'):
flavor = flavors.get(s.flavor['id'])
if flavor:
s.flavor_name = flavor.name
@@ -2564,27 +2753,43 @@ class ListServer(command.Lister):
# Add a list with security group name as attribute
for s in data:
- if hasattr(s, 'security_groups'):
+ if hasattr(s, 'security_groups') and s.security_groups is not None:
s.security_groups_name = [x["name"] for x in s.security_groups]
else:
s.security_groups_name = []
+ # The host_status field contains the status of the compute host the
+ # server is on. It is only returned by the API when the nova-api
+ # policy allows. Users can look at the host_status field when, for
+ # example, their server has status ACTIVE but is unresponsive. The
+ # host_status field can indicate a possible problem on the host
+ # it's on, providing useful information to a user in this
+ # situation.
+ if (
+ sdk_utils.supports_microversion(compute_client, '2.16') and
+ parsed_args.long
+ ):
+ if any([s.host_status is not None for s in data]):
+ columns += ('Host Status',)
+ column_headers += ('Host Status',)
+
table = (
column_headers,
(
utils.get_item_properties(
s, columns,
mixed_case_fields=(
- 'OS-EXT-STS:task_state',
- 'OS-EXT-STS:power_state',
- 'OS-EXT-AZ:availability_zone',
- 'OS-EXT-SRV-ATTR:host',
+ 'task_state',
+ 'power_state',
+ 'availability_zone',
+ 'host',
),
formatters={
- 'OS-EXT-STS:power_state': PowerStateColumn,
- 'networks': format_columns.DictListColumn,
+ 'power_state': PowerStateColumn,
+ 'addresses': AddressesColumn,
'metadata': format_columns.DictColumn,
'security_groups_name': format_columns.ListColumn,
+ 'hypervisor_hostname': HostColumn,
},
) for s in data
),
@@ -2594,8 +2799,9 @@ class ListServer(command.Lister):
class LockServer(command.Command):
- _description = _("Lock server(s). A non-admin user will not be able to "
- "execute actions")
+ _description = _("""Lock server(s)
+
+A non-admin user will not be able to execute actions.""")
def get_parser(self, prog_name):
parser = super(LockServer, self).get_parser(prog_name)
@@ -3056,6 +3262,28 @@ class RebuildServer(command.ShowOne):
),
)
parser.add_argument(
+ '--reimage-boot-volume',
+ action='store_true',
+ dest='reimage_boot_volume',
+ default=None,
+ help=_(
+ 'Rebuild a volume-backed server. This will wipe the root '
+ 'volume data and overwrite it with the provided image. '
+ 'Defaults to False. '
+ '(supported by --os-compute-api-version 2.93 or above)'
+ ),
+ )
+ parser.add_argument(
+ '--no-reimage-boot-volume',
+ action='store_false',
+ dest='reimage_boot_volume',
+ default=None,
+ help=_(
+ 'Do not rebuild a volume-backed server. '
+ '(supported by --os-compute-api-version 2.93 or above)'
+ ),
+ )
+ parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for rebuild to complete'),
@@ -3075,13 +3303,21 @@ class RebuildServer(command.ShowOne):
server = utils.find_resource(
compute_client.servers, parsed_args.server)
- # If parsed_args.image is not set, default to the currently used one.
+ # If parsed_args.image is not set and if the instance is image backed,
+ # default to the currently used one. If the instance is volume backed,
+ # it is not trivial to fetch the current image and probably better
+ # to error out in this case and ask user to supply the image.
if parsed_args.image:
image = image_client.find_image(
parsed_args.image, ignore_missing=False)
else:
- image_id = server.to_dict().get('image', {}).get('id')
- image = image_client.get_image(image_id)
+ if not server.image:
+ msg = _(
+ 'The --image option is required when rebuilding a '
+ 'volume-backed server'
+ )
+ raise exceptions.CommandError(msg)
+ image = image_client.get_image(server.image['id'])
kwargs = {}
@@ -3182,6 +3418,41 @@ class RebuildServer(command.ShowOne):
kwargs['hostname'] = parsed_args.hostname
+ v2_93 = api_versions.APIVersion('2.93')
+ if parsed_args.reimage_boot_volume:
+ if compute_client.api_version < v2_93:
+ msg = _(
+ '--os-compute-api-version 2.93 or greater is required to '
+ 'support the --reimage-boot-volume option'
+ )
+ raise exceptions.CommandError(msg)
+ else:
+ # force user to explicitly request reimaging of volume-backed
+ # server
+ if not server.image:
+ if compute_client.api_version >= v2_93:
+ msg = (
+ '--reimage-boot-volume is required to rebuild a '
+ 'volume-backed server'
+ )
+ raise exceptions.CommandError(msg)
+ else: # microversion < 2.93
+ # attempts to rebuild a volume-backed server before API
+ # microversion 2.93 will fail in all cases except one: if
+ # the user attempts the rebuild with the exact same image
+ # that the server was initially built with. We can't check
+ # for this since we don't have the original image ID to
+ # hand, so we simply warn the user.
+ # TODO(stephenfin): Make this a failure in a future
+ # version
+ self.log.warning(
+ 'Attempting to rebuild a volume-backed server using '
+ '--os-compute-api-version 2.92 or earlier, which '
+ 'will only succeed if the image is identical to the '
+ 'one initially used. This will be an error in a '
+ 'future release.'
+ )
+
try:
server = server.rebuild(image, parsed_args.password, **kwargs)
finally:
@@ -3299,7 +3570,7 @@ host.""")
server = utils.find_resource(
compute_client.servers, parsed_args.server)
- server = server.evacuate(**kwargs)
+ server.evacuate(**kwargs)
if parsed_args.wait:
if utils.wait_for_status(
@@ -3314,7 +3585,7 @@ host.""")
raise SystemExit
details = _prep_server_detail(
- compute_client, image_client, server, refresh=False)
+ compute_client, image_client, server, refresh=True)
return zip(*sorted(details.items()))
@@ -3527,7 +3798,11 @@ class RemoveServerVolume(command.Command):
class RescueServer(command.Command):
- _description = _("Put server in rescue mode")
+ _description = _(
+ "Put server in rescue mode. "
+ "Specify ``--os-compute-api-version 2.87`` or higher to rescue a "
+ "server booted from a volume."
+ )
def get_parser(self, prog_name):
parser = super(RescueServer, self).get_parser(prog_name)
@@ -3683,9 +3958,7 @@ Confirm (verify) success of resize operation and release the old server.""")
# TODO(stephenfin): Remove in OSC 7.0
class MigrateConfirm(ResizeConfirm):
- _description = _("""DEPRECATED: Confirm server migration.
-
-Use 'server migration confirm' instead.""")
+ _description = _("DEPRECATED: Use 'server migration confirm' instead.")
def take_action(self, parsed_args):
msg = _(
@@ -3731,9 +4004,7 @@ one.""")
# TODO(stephenfin): Remove in OSC 7.0
class MigrateRevert(ResizeRevert):
- _description = _("""Revert server migration.
-
-Use 'server migration revert' instead.""")
+ _description = _("DEPRECATED: Use 'server migration revert' instead.")
def take_action(self, parsed_args):
msg = _(
@@ -4020,7 +4291,7 @@ class ShelveServer(command.Command):
server_obj.shelve()
- # if we don't hav to wait, either because it was requested explicitly
+ # if we don't have to wait, either because it was requested explicitly
# or is required implicitly, then our job is done
if not parsed_args.wait and not parsed_args.offload:
return
@@ -4103,32 +4374,34 @@ class ShowServer(command.ShowOne):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
- server = utils.find_resource(
- compute_client.servers, parsed_args.server)
+ compute_client = self.app.client_manager.sdk_connection.compute
+
+ # Find by name or ID, then get the full details of the server
+ server = compute_client.find_server(
+ parsed_args.server, ignore_missing=False)
+ server = compute_client.get_server(server)
if parsed_args.diagnostics:
- (resp, data) = server.diagnostics()
- if not resp.status_code == 200:
- self.app.stderr.write(_(
- "Error retrieving diagnostics data\n"
- ))
- return ({}, {})
+ data = compute_client.get_server_diagnostics(server)
return zip(*sorted(data.items()))
topology = None
if parsed_args.topology:
- if compute_client.api_version < api_versions.APIVersion('2.78'):
+ if not sdk_utils.supports_microversion(compute_client, '2.78'):
msg = _(
'--os-compute-api-version 2.78 or greater is required to '
'support the --topology option'
)
raise exceptions.CommandError(msg)
- topology = server.topology()
+ topology = server.fetch_topology(compute_client)
data = _prep_server_detail(
- compute_client, self.app.client_manager.image, server,
+ # TODO(dannosliwcd): Replace these clients with SDK clients after
+ # all callers of _prep_server_detail() are using the SDK.
+ self.app.client_manager.compute,
+ self.app.client_manager.image,
+ server,
refresh=False)
if topology:
@@ -4293,7 +4566,7 @@ class SshServer(command.Command):
class StartServer(command.Command):
- _description = _("Start server(s).")
+ _description = _("Start server(s)")
def get_parser(self, prog_name):
parser = super(StartServer, self).get_parser(prog_name)
@@ -4325,7 +4598,7 @@ class StartServer(command.Command):
class StopServer(command.Command):
- _description = _("Stop server(s).")
+ _description = _("Stop server(s)")
def get_parser(self, prog_name):
parser = super(StopServer, self).get_parser(prog_name)
@@ -4528,13 +4801,30 @@ class UnshelveServer(command.Command):
nargs='+',
help=_('Server(s) to unshelve (name or ID)'),
)
- parser.add_argument(
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument(
'--availability-zone',
default=None,
help=_('Name of the availability zone in which to unshelve a '
'SHELVED_OFFLOADED server (supported by '
'--os-compute-api-version 2.77 or above)'),
)
+ group.add_argument(
+ '--no-availability-zone',
+ action='store_true',
+ default=False,
+ help=_('Unpin the availability zone of a SHELVED_OFFLOADED '
+ 'server. Server will be unshelved on a host without '
+ 'availability zone constraint (supported by '
+ '--os-compute-api-version 2.91 or above)'),
+ )
+ parser.add_argument(
+ '--host',
+ default=None,
+ help=_('Name of the destination host in which to unshelve a '
+ 'SHELVED_OFFLOADED server (supported by '
+ '--os-compute-api-version 2.91 or above)'),
+ )
parser.add_argument(
'--wait',
action='store_true',
@@ -4563,6 +4853,26 @@ class UnshelveServer(command.Command):
kwargs['availability_zone'] = parsed_args.availability_zone
+ if parsed_args.host:
+ if compute_client.api_version < api_versions.APIVersion('2.91'):
+ msg = _(
+ '--os-compute-api-version 2.91 or greater is required '
+ 'to support the --host option'
+ )
+ raise exceptions.CommandError(msg)
+
+ kwargs['host'] = parsed_args.host
+
+ if parsed_args.no_availability_zone:
+ if compute_client.api_version < api_versions.APIVersion('2.91'):
+ msg = _(
+ '--os-compute-api-version 2.91 or greater is required '
+ 'to support the --no-availability-zone option'
+ )
+ raise exceptions.CommandError(msg)
+
+ kwargs['availability_zone'] = None
+
for server in parsed_args.server:
server_obj = utils.find_resource(
compute_client.servers,
diff --git a/openstackclient/compute/v2/server_event.py b/openstackclient/compute/v2/server_event.py
index 7ab0cd2f..ebf0d526 100644
--- a/openstackclient/compute/v2/server_event.py
+++ b/openstackclient/compute/v2/server_event.py
@@ -19,10 +19,10 @@ import logging
import iso8601
from novaclient import api_versions
-import openstack.cloud._utils
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
+from oslo_utils import uuidutils
from openstackclient.i18n import _
@@ -152,7 +152,7 @@ class ListServerEvent(command.Lister):
# If we fail to find the resource, it is possible the server is
# deleted. Try once more using the <server> arg directly if it is a
# UUID.
- if openstack.cloud._utils._is_uuid_like(parsed_args.server):
+ if uuidutils.is_uuid_like(parsed_args.server):
server_id = parsed_args.server
else:
raise
@@ -224,7 +224,7 @@ class ShowServerEvent(command.ShowOne):
# If we fail to find the resource, it is possible the server is
# deleted. Try once more using the <server> arg directly if it is a
# UUID.
- if openstack.cloud._utils._is_uuid_like(parsed_args.server):
+ if uuidutils.is_uuid_like(parsed_args.server):
server_id = parsed_args.server
else:
raise
diff --git a/openstackclient/compute/v2/server_group.py b/openstackclient/compute/v2/server_group.py
index 32dd1937..eadc3ffb 100644
--- a/openstackclient/compute/v2/server_group.py
+++ b/openstackclient/compute/v2/server_group.py
@@ -17,7 +17,7 @@
import logging
-from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib.cli import format_columns
from osc_lib.cli import parseractions
from osc_lib.command import command
@@ -31,19 +31,24 @@ LOG = logging.getLogger(__name__)
_formatters = {
- 'members': format_columns.ListColumn,
+ 'member_ids': format_columns.ListColumn,
'policies': format_columns.ListColumn,
'rules': format_columns.DictColumn,
}
-def _get_columns(info):
- columns = list(info.keys())
- if 'metadata' in columns:
- # NOTE(RuiChen): The metadata of server group is always empty since API
- # compatible, so hide it in order to avoid confusion.
- columns.remove('metadata')
- return tuple(sorted(columns))
+def _get_server_group_columns(item, client):
+ column_map = {'member_ids': 'members'}
+ hidden_columns = ['metadata', 'location']
+
+ if sdk_utils.supports_microversion(client, '2.64'):
+ hidden_columns.append('policies')
+ else:
+ hidden_columns.append('policy')
+ hidden_columns.append('rules')
+
+ return utils.get_osc_show_columns_for_sdk_resource(
+ item, column_map, hidden_columns)
class CreateServerGroup(command.ShowOne):
@@ -54,7 +59,7 @@ class CreateServerGroup(command.ShowOne):
parser.add_argument(
'name',
metavar='<name>',
- help=_("New server group name")
+ help=_("New server group name"),
)
parser.add_argument(
'--policy',
@@ -87,11 +92,10 @@ class CreateServerGroup(command.ShowOne):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
- info = {}
+ compute_client = self.app.client_manager.sdk_connection.compute
if parsed_args.policy in ('soft-affinity', 'soft-anti-affinity'):
- if compute_client.api_version < api_versions.APIVersion('2.15'):
+ if not sdk_utils.supports_microversion(compute_client, '2.15'):
msg = _(
'--os-compute-api-version 2.15 or greater is required to '
'support the %s policy'
@@ -99,30 +103,39 @@ class CreateServerGroup(command.ShowOne):
raise exceptions.CommandError(msg % parsed_args.policy)
if parsed_args.rules:
- if compute_client.api_version < api_versions.APIVersion('2.64'):
+ if not sdk_utils.supports_microversion(compute_client, '2.64'):
msg = _(
'--os-compute-api-version 2.64 or greater is required to '
'support the --rule option'
)
raise exceptions.CommandError(msg)
- if compute_client.api_version < api_versions.APIVersion('2.64'):
- kwargs = {'policies': [parsed_args.policy]}
+ if not sdk_utils.supports_microversion(compute_client, '2.64'):
+ kwargs = {
+ 'name': parsed_args.name,
+ 'policies': [parsed_args.policy],
+ }
else:
kwargs = {
+ 'name': parsed_args.name,
'policy': parsed_args.policy,
- 'rules': parsed_args.rules or None,
}
- server_group = compute_client.server_groups.create(
- name=parsed_args.name, **kwargs)
+ if parsed_args.rules:
+ kwargs['rules'] = parsed_args.rules
- info.update(server_group._info)
+ server_group = compute_client.create_server_group(**kwargs)
- columns = _get_columns(info)
- data = utils.get_dict_properties(
- info, columns, formatters=_formatters)
- return columns, data
+ display_columns, columns = _get_server_group_columns(
+ server_group,
+ compute_client,
+ )
+ data = utils.get_item_properties(
+ server_group,
+ columns,
+ formatters=_formatters,
+ )
+ return display_columns, data
class DeleteServerGroup(command.Command):
@@ -134,18 +147,17 @@ class DeleteServerGroup(command.Command):
'server_group',
metavar='<server-group>',
nargs='+',
- help=_("server group(s) to delete (name or ID)")
+ help=_("server group(s) to delete (name or ID)"),
)
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
result = 0
for group in parsed_args.server_group:
try:
- group_obj = utils.find_resource(compute_client.server_groups,
- group)
- compute_client.server_groups.delete(group_obj.id)
+ group_obj = compute_client.find_server_group(group)
+ compute_client.delete_server_group(group_obj.id)
# Catch all exceptions in order to avoid to block the next deleting
except Exception as e:
result += 1
@@ -169,13 +181,13 @@ class ListServerGroup(command.Lister):
'--all-projects',
action='store_true',
default=False,
- help=_("Display information from all projects (admin only)")
+ help=_("Display information from all projects (admin only)"),
)
parser.add_argument(
'--long',
action='store_true',
default=False,
- help=_("List additional fields in output")
+ help=_("List additional fields in output"),
)
# TODO(stephenfin): This should really be a --marker option, but alas
# the API doesn't support that for some reason
@@ -204,7 +216,7 @@ class ListServerGroup(command.Lister):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
kwargs = {}
@@ -217,10 +229,10 @@ class ListServerGroup(command.Lister):
if parsed_args.limit:
kwargs['limit'] = parsed_args.limit
- data = compute_client.server_groups.list(**kwargs)
+ data = compute_client.server_groups(**kwargs)
policy_key = 'Policies'
- if compute_client.api_version >= api_versions.APIVersion("2.64"):
+ if sdk_utils.supports_microversion(compute_client, '2.64'):
policy_key = 'Policy'
columns = (
@@ -235,7 +247,7 @@ class ListServerGroup(command.Lister):
)
if parsed_args.long:
columns += (
- 'members',
+ 'member_ids',
'project_id',
'user_id',
)
@@ -263,17 +275,18 @@ class ShowServerGroup(command.ShowOne):
parser.add_argument(
'server_group',
metavar='<server-group>',
- help=_("server group to display (name or ID)")
+ help=_("server group to display (name or ID)"),
)
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
- group = utils.find_resource(compute_client.server_groups,
- parsed_args.server_group)
- info = {}
- info.update(group._info)
- columns = _get_columns(info)
- data = utils.get_dict_properties(
- info, columns, formatters=_formatters)
- return columns, data
+ compute_client = self.app.client_manager.sdk_connection.compute
+ group = compute_client.find_server_group(parsed_args.server_group)
+ display_columns, columns = _get_server_group_columns(
+ group,
+ compute_client,
+ )
+ data = utils.get_item_properties(
+ group, columns, formatters=_formatters
+ )
+ return display_columns, data
diff --git a/openstackclient/compute/v2/server_migration.py b/openstackclient/compute/v2/server_migration.py
index 919b67bd..91575c1e 100644
--- a/openstackclient/compute/v2/server_migration.py
+++ b/openstackclient/compute/v2/server_migration.py
@@ -14,7 +14,7 @@
import uuid
-from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
@@ -130,22 +130,22 @@ class ListMigration(command.Lister):
# the same as the column header names.
columns = [
'source_node', 'dest_node', 'source_compute', 'dest_compute',
- 'dest_host', 'status', 'instance_uuid', 'old_instance_type_id',
- 'new_instance_type_id', 'created_at', 'updated_at',
+ 'dest_host', 'status', 'server_id', 'old_flavor_id',
+ 'new_flavor_id', 'created_at', 'updated_at',
]
# Insert migrations UUID after ID
- if compute_client.api_version >= api_versions.APIVersion("2.59"):
+ if sdk_utils.supports_microversion(compute_client, "2.59"):
column_headers.insert(0, "UUID")
columns.insert(0, "uuid")
- if compute_client.api_version >= api_versions.APIVersion("2.23"):
+ if sdk_utils.supports_microversion(compute_client, "2.23"):
column_headers.insert(0, "Id")
columns.insert(0, "id")
column_headers.insert(len(column_headers) - 2, "Type")
columns.insert(len(columns) - 2, "migration_type")
- if compute_client.api_version >= api_versions.APIVersion("2.80"):
+ if sdk_utils.supports_microversion(compute_client, "2.80"):
if parsed_args.project:
column_headers.insert(len(column_headers) - 2, "Project")
columns.insert(len(columns) - 2, "project_id")
@@ -159,19 +159,23 @@ class ListMigration(command.Lister):
)
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
identity_client = self.app.client_manager.identity
- search_opts = {
- 'host': parsed_args.host,
- 'status': parsed_args.status,
- }
+ search_opts = {}
+
+ if parsed_args.host is not None:
+ search_opts['host'] = parsed_args.host
+
+ if parsed_args.status is not None:
+ search_opts['status'] = parsed_args.status
if parsed_args.server:
- search_opts['instance_uuid'] = utils.find_resource(
- compute_client.servers,
- parsed_args.server,
- ).id
+ server = compute_client.find_server(parsed_args.server)
+ if server is None:
+ msg = _('Unable to find server: %s') % parsed_args.server
+ raise exceptions.CommandError(msg)
+ search_opts['instance_uuid'] = server.id
if parsed_args.type:
migration_type = parsed_args.type
@@ -181,7 +185,7 @@ class ListMigration(command.Lister):
search_opts['migration_type'] = migration_type
if parsed_args.marker:
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, "2.59"):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'support the --marker option'
@@ -190,16 +194,17 @@ class ListMigration(command.Lister):
search_opts['marker'] = parsed_args.marker
if parsed_args.limit:
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, "2.59"):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'support the --limit option'
)
raise exceptions.CommandError(msg)
search_opts['limit'] = parsed_args.limit
+ search_opts['paginated'] = False
if parsed_args.changes_since:
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, "2.59"):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'support the --changes-since option'
@@ -208,7 +213,7 @@ class ListMigration(command.Lister):
search_opts['changes_since'] = parsed_args.changes_since
if parsed_args.changes_before:
- if compute_client.api_version < api_versions.APIVersion('2.66'):
+ if not sdk_utils.supports_microversion(compute_client, "2.66"):
msg = _(
'--os-compute-api-version 2.66 or greater is required to '
'support the --changes-before option'
@@ -217,7 +222,7 @@ class ListMigration(command.Lister):
search_opts['changes_before'] = parsed_args.changes_before
if parsed_args.project:
- if compute_client.api_version < api_versions.APIVersion('2.80'):
+ if not sdk_utils.supports_microversion(compute_client, "2.80"):
msg = _(
'--os-compute-api-version 2.80 or greater is required to '
'support the --project option'
@@ -231,7 +236,7 @@ class ListMigration(command.Lister):
).id
if parsed_args.user:
- if compute_client.api_version < api_versions.APIVersion('2.80'):
+ if not sdk_utils.supports_microversion(compute_client, "2.80"):
msg = _(
'--os-compute-api-version 2.80 or greater is required to '
'support the --user option'
@@ -244,13 +249,13 @@ class ListMigration(command.Lister):
parsed_args.user_domain,
).id
- migrations = compute_client.migrations.list(**search_opts)
+ migrations = list(compute_client.migrations(**search_opts))
return self.print_migrations(parsed_args, compute_client, migrations)
def _get_migration_by_uuid(compute_client, server_id, migration_uuid):
- for migration in compute_client.server_migrations.list(server_id):
+ for migration in compute_client.server_migrations(server_id):
if migration.uuid == migration_uuid:
return migration
break
@@ -284,9 +289,9 @@ class ShowMigration(command.ShowOne):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
- if compute_client.api_version < api_versions.APIVersion('2.24'):
+ if not sdk_utils.supports_microversion(compute_client, '2.24'):
msg = _(
'--os-compute-api-version 2.24 or greater is required to '
'support the server migration show command'
@@ -302,16 +307,16 @@ class ShowMigration(command.ShowOne):
)
raise exceptions.CommandError(msg)
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, '2.59'):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'retrieve server migrations by UUID'
)
raise exceptions.CommandError(msg)
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
# the nova API doesn't currently allow retrieval by UUID but it's a
@@ -322,11 +327,13 @@ class ShowMigration(command.ShowOne):
compute_client, server.id, parsed_args.migration,
)
else:
- server_migration = compute_client.server_migrations.get(
- server.id, parsed_args.migration,
+ server_migration = compute_client.get_server_migration(
+ server.id,
+ parsed_args.migration,
+ ignore_missing=False,
)
- columns = (
+ column_headers = (
'ID',
'Server UUID',
'Status',
@@ -345,14 +352,35 @@ class ShowMigration(command.ShowOne):
'Updated At',
)
- if compute_client.api_version >= api_versions.APIVersion('2.59'):
- columns += ('UUID',)
+ columns = (
+ 'id',
+ 'server_id',
+ 'status',
+ 'source_compute',
+ 'source_node',
+ 'dest_compute',
+ 'dest_host',
+ 'dest_node',
+ 'memory_total_bytes',
+ 'memory_processed_bytes',
+ 'memory_remaining_bytes',
+ 'disk_total_bytes',
+ 'disk_processed_bytes',
+ 'disk_remaining_bytes',
+ 'created_at',
+ 'updated_at',
+ )
+
+ if sdk_utils.supports_microversion(compute_client, '2.59'):
+ column_headers += ('UUID',)
+ columns += ('uuid',)
- if compute_client.api_version >= api_versions.APIVersion('2.80'):
- columns += ('User ID', 'Project ID')
+ if sdk_utils.supports_microversion(compute_client, '2.80'):
+ column_headers += ('User ID', 'Project ID')
+ columns += ('user_id', 'project_id')
data = utils.get_item_properties(server_migration, columns)
- return columns, data
+ return column_headers, data
class AbortMigration(command.Command):
@@ -376,9 +404,9 @@ class AbortMigration(command.Command):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
- if compute_client.api_version < api_versions.APIVersion('2.24'):
+ if not sdk_utils.supports_microversion(compute_client, '2.24'):
msg = _(
'--os-compute-api-version 2.24 or greater is required to '
'support the server migration abort command'
@@ -394,16 +422,16 @@ class AbortMigration(command.Command):
)
raise exceptions.CommandError(msg)
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, '2.59'):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'abort server migrations by UUID'
)
raise exceptions.CommandError(msg)
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
# the nova API doesn't currently allow retrieval by UUID but it's a
@@ -415,8 +443,10 @@ class AbortMigration(command.Command):
compute_client, server.id, parsed_args.migration,
).id
- compute_client.server_migrations.live_migration_abort(
- server.id, migration_id,
+ compute_client.abort_server_migration(
+ migration_id,
+ server.id,
+ ignore_missing=False,
)
@@ -441,9 +471,9 @@ class ForceCompleteMigration(command.Command):
return parser
def take_action(self, parsed_args):
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
- if compute_client.api_version < api_versions.APIVersion('2.22'):
+ if not sdk_utils.supports_microversion(compute_client, '2.22'):
msg = _(
'--os-compute-api-version 2.22 or greater is required to '
'support the server migration force complete command'
@@ -459,16 +489,16 @@ class ForceCompleteMigration(command.Command):
)
raise exceptions.CommandError(msg)
- if compute_client.api_version < api_versions.APIVersion('2.59'):
+ if not sdk_utils.supports_microversion(compute_client, '2.59'):
msg = _(
'--os-compute-api-version 2.59 or greater is required to '
'abort server migrations by UUID'
)
raise exceptions.CommandError(msg)
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
# the nova API doesn't currently allow retrieval by UUID but it's a
@@ -480,6 +510,6 @@ class ForceCompleteMigration(command.Command):
compute_client, server.id, parsed_args.migration,
).id
- compute_client.server_migrations.live_migrate_force_complete(
- server.id, migration_id,
+ compute_client.force_complete_server_migration(
+ migration_id, server.id
)
diff --git a/openstackclient/compute/v2/server_volume.py b/openstackclient/compute/v2/server_volume.py
index d53cec93..b4322c0b 100644
--- a/openstackclient/compute/v2/server_volume.py
+++ b/openstackclient/compute/v2/server_volume.py
@@ -14,7 +14,7 @@
"""Compute v2 Server action implementations"""
-from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
@@ -34,27 +34,25 @@ class ListServerVolume(command.Lister):
return parser
def take_action(self, parsed_args):
+ compute_client = self.app.client_manager.sdk_connection.compute
- compute_client = self.app.client_manager.compute
-
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
-
- volumes = compute_client.volumes.get_server_volumes(server.id)
+ volumes = compute_client.volume_attachments(server)
columns = ()
column_headers = ()
- if compute_client.api_version < api_versions.APIVersion('2.89'):
+ if not sdk_utils.supports_microversion(compute_client, '2.89'):
columns += ('id',)
column_headers += ('ID',)
columns += (
'device',
- 'serverId',
- 'volumeId',
+ 'server_id',
+ 'volume_id',
)
column_headers += (
'Device',
@@ -62,40 +60,36 @@ class ListServerVolume(command.Lister):
'Volume ID',
)
- if compute_client.api_version >= api_versions.APIVersion('2.70'):
+ if sdk_utils.supports_microversion(compute_client, '2.70'):
columns += ('tag',)
column_headers += ('Tag',)
- if compute_client.api_version >= api_versions.APIVersion('2.79'):
+ if sdk_utils.supports_microversion(compute_client, '2.79'):
columns += ('delete_on_termination',)
column_headers += ('Delete On Termination?',)
- if compute_client.api_version >= api_versions.APIVersion('2.89'):
- columns += ('attachment_id', 'bdm_uuid')
+ if sdk_utils.supports_microversion(compute_client, '2.89'):
+ columns += ('attachment_id', 'bdm_id')
column_headers += ('Attachment ID', 'BlockDeviceMapping UUID')
return (
column_headers,
- (
- utils.get_item_properties(
- s, columns, mixed_case_fields=('serverId', 'volumeId')
- ) for s in volumes
- ),
+ (utils.get_item_properties(s, columns) for s in volumes),
)
-class UpdateServerVolume(command.Command):
+class SetServerVolume(command.Command):
"""Update a volume attachment on the server."""
def get_parser(self, prog_name):
- parser = super(UpdateServerVolume, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
'server',
help=_('Server to update volume for (name or ID)'),
)
parser.add_argument(
'volume',
- help=_('Volume (ID)'),
+ help=_('Volume to update attachment for (name or ID)'),
)
termination_group = parser.add_mutually_exclusive_group()
termination_group.add_argument(
@@ -120,31 +114,34 @@ class UpdateServerVolume(command.Command):
return parser
def take_action(self, parsed_args):
-
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
+ volume_client = self.app.client_manager.sdk_connection.volume
if parsed_args.delete_on_termination is not None:
- if compute_client.api_version < api_versions.APIVersion('2.85'):
+ if not sdk_utils.supports_microversion(compute_client, '2.85'):
msg = _(
'--os-compute-api-version 2.85 or greater is required to '
- 'support the --(no-)delete-on-termination option'
+ 'support the -delete-on-termination or '
+ '--preserve-on-termination option'
)
raise exceptions.CommandError(msg)
- server = utils.find_resource(
- compute_client.servers,
+ server = compute_client.find_server(
parsed_args.server,
+ ignore_missing=False,
)
-
- # NOTE(stephenfin): This may look silly, and that's because it is.
- # This API was originally used only for the swapping volumes, which
- # is an internal operation that should only be done by
- # orchestration software rather than a human. We're not going to
- # expose that, but we are going to expose the ability to change the
- # delete on termination behavior.
- compute_client.volumes.update_server_volume(
- server.id,
- parsed_args.volume,
+ volume = volume_client.find_volume(
parsed_args.volume,
+ ignore_missing=False,
+ )
+
+ compute_client.update_volume_attachment(
+ server,
+ volume,
delete_on_termination=parsed_args.delete_on_termination,
)
+
+
+# Legacy alias
+class UpdateServerVolume(SetServerVolume):
+ """DEPRECATED: Use 'server volume set' instead."""
diff --git a/openstackclient/compute/v2/usage.py b/openstackclient/compute/v2/usage.py
index 69fa04e8..86f538a7 100644
--- a/openstackclient/compute/v2/usage.py
+++ b/openstackclient/compute/v2/usage.py
@@ -15,12 +15,10 @@
"""Usage action implementations"""
-import collections
import datetime
import functools
from cliff import columns as cliff_columns
-from novaclient import api_versions
from osc_lib.command import command
from osc_lib import utils
@@ -58,7 +56,7 @@ class ProjectColumn(cliff_columns.FormattableColumn):
class CountColumn(cliff_columns.FormattableColumn):
def human_readable(self):
- return len(self._value)
+ return len(self._value) if self._value is not None else None
class FloatColumn(cliff_columns.FormattableColumn):
@@ -69,7 +67,7 @@ class FloatColumn(cliff_columns.FormattableColumn):
def _formatters(project_cache):
return {
- 'tenant_id': functools.partial(
+ 'project_id': functools.partial(
ProjectColumn, project_cache=project_cache),
'server_usages': CountColumn,
'total_memory_mb_usage': FloatColumn,
@@ -102,10 +100,10 @@ def _merge_usage(usage, next_usage):
def _merge_usage_list(usages, next_usage_list):
for next_usage in next_usage_list:
- if next_usage.tenant_id in usages:
- _merge_usage(usages[next_usage.tenant_id], next_usage)
+ if next_usage.project_id in usages:
+ _merge_usage(usages[next_usage.project_id], next_usage)
else:
- usages[next_usage.tenant_id] = next_usage
+ usages[next_usage.project_id] = next_usage
class ListUsage(command.Lister):
@@ -138,9 +136,9 @@ class ListUsage(command.Lister):
else:
return project
- compute_client = self.app.client_manager.compute
+ compute_client = self.app.client_manager.sdk_connection.compute
columns = (
- "tenant_id",
+ "project_id",
"server_usages",
"total_memory_mb_usage",
"total_vcpus_usage",
@@ -154,36 +152,25 @@ class ListUsage(command.Lister):
"Disk GB-Hours"
)
- dateformat = "%Y-%m-%d"
+ date_cli_format = "%Y-%m-%d"
+ date_api_format = "%Y-%m-%dT%H:%M:%S"
now = datetime.datetime.utcnow()
if parsed_args.start:
- start = datetime.datetime.strptime(parsed_args.start, dateformat)
+ start = datetime.datetime.strptime(
+ parsed_args.start, date_cli_format)
else:
start = now - datetime.timedelta(weeks=4)
if parsed_args.end:
- end = datetime.datetime.strptime(parsed_args.end, dateformat)
+ end = datetime.datetime.strptime(parsed_args.end, date_cli_format)
else:
end = now + datetime.timedelta(days=1)
- if compute_client.api_version < api_versions.APIVersion("2.40"):
- usage_list = compute_client.usage.list(start, end, detailed=True)
- else:
- # If the number of instances used to calculate the usage is greater
- # than CONF.api.max_limit, the usage will be split across multiple
- # requests and the responses will need to be merged back together.
- usages = collections.OrderedDict()
- usage_list = compute_client.usage.list(start, end, detailed=True)
- _merge_usage_list(usages, usage_list)
- marker = _get_usage_list_marker(usage_list)
- while marker:
- next_usage_list = compute_client.usage.list(
- start, end, detailed=True, marker=marker)
- marker = _get_usage_list_marker(next_usage_list)
- if marker:
- _merge_usage_list(usages, next_usage_list)
- usage_list = list(usages.values())
+ usage_list = list(compute_client.usages(
+ start=start.strftime(date_api_format),
+ end=end.strftime(date_api_format),
+ detailed=True))
# Cache the project list
project_cache = {}
@@ -196,8 +183,8 @@ class ListUsage(command.Lister):
if parsed_args.formatter == 'table' and len(usage_list) > 0:
self.app.stdout.write(_("Usage from %(start)s to %(end)s: \n") % {
- "start": start.strftime(dateformat),
- "end": end.strftime(dateformat),
+ "start": start.strftime(date_cli_format),
+ "end": end.strftime(date_cli_format),
})
return (
@@ -239,17 +226,19 @@ class ShowUsage(command.ShowOne):
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
- compute_client = self.app.client_manager.compute
- dateformat = "%Y-%m-%d"
+ compute_client = self.app.client_manager.sdk_connection.compute
+ date_cli_format = "%Y-%m-%d"
+ date_api_format = "%Y-%m-%dT%H:%M:%S"
now = datetime.datetime.utcnow()
if parsed_args.start:
- start = datetime.datetime.strptime(parsed_args.start, dateformat)
+ start = datetime.datetime.strptime(
+ parsed_args.start, date_cli_format)
else:
start = now - datetime.timedelta(weeks=4)
if parsed_args.end:
- end = datetime.datetime.strptime(parsed_args.end, dateformat)
+ end = datetime.datetime.strptime(parsed_args.end, date_cli_format)
else:
end = now + datetime.timedelta(days=1)
@@ -262,19 +251,21 @@ class ShowUsage(command.ShowOne):
# Get the project from the current auth
project = self.app.client_manager.auth_ref.project_id
- usage = compute_client.usage.get(project, start, end)
+ usage = compute_client.get_usage(
+ project=project, start=start.strftime(date_api_format),
+ end=end.strftime(date_api_format))
if parsed_args.formatter == 'table':
self.app.stdout.write(_(
"Usage from %(start)s to %(end)s on project %(project)s: \n"
) % {
- "start": start.strftime(dateformat),
- "end": end.strftime(dateformat),
+ "start": start.strftime(date_cli_format),
+ "end": end.strftime(date_cli_format),
"project": project,
})
columns = (
- "tenant_id",
+ "project_id",
"server_usages",
"total_memory_mb_usage",
"total_vcpus_usage",
diff --git a/openstackclient/identity/v3/endpoint_group.py b/openstackclient/identity/v3/endpoint_group.py
index cbe27edb..9bb026a9 100644
--- a/openstackclient/identity/v3/endpoint_group.py
+++ b/openstackclient/identity/v3/endpoint_group.py
@@ -268,7 +268,7 @@ class SetEndpointGroup(command.Command, _FiltersReader):
parser.add_argument(
'--name',
metavar='<name>',
- help=_('New enpoint group name'),
+ help=_('New endpoint group name'),
)
parser.add_argument(
'--filters',
diff --git a/openstackclient/identity/v3/identity_provider.py b/openstackclient/identity/v3/identity_provider.py
index 7307cea0..19a62144 100644
--- a/openstackclient/identity/v3/identity_provider.py
+++ b/openstackclient/identity/v3/identity_provider.py
@@ -63,6 +63,16 @@ class CreateIdentityProvider(command.ShowOne):
'specified, a domain will be created automatically. '
'(Name or ID)'),
)
+ parser.add_argument(
+ '--authorization-ttl',
+ metavar='<authorization-ttl>',
+ type=int,
+ help=_('Time to keep the role assignments for users '
+ 'authenticating via this identity provider. '
+ 'When not provided, global default configured in the '
+ 'Identity service will be used. '
+ 'Available since Identity API version 3.14 (Ussuri).'),
+ )
enable_identity_provider = parser.add_mutually_exclusive_group()
enable_identity_provider.add_argument(
'--enable',
@@ -95,12 +105,23 @@ class CreateIdentityProvider(command.ShowOne):
domain_id = common.find_domain(identity_client,
parsed_args.domain).id
+ # TODO(pas-ha) actually check for 3.14 microversion
+ kwargs = {}
+ auth_ttl = parsed_args.authorization_ttl
+ if auth_ttl is not None:
+ if auth_ttl < 0:
+ msg = (_("%(param)s must be positive integer or zero."
+ ) % {"param": "authorization-ttl"})
+ raise exceptions.CommandError(msg)
+ kwargs['authorization_ttl'] = auth_ttl
+
idp = identity_client.federation.identity_providers.create(
id=parsed_args.identity_provider_id,
remote_ids=remote_ids,
description=parsed_args.description,
domain_id=domain_id,
- enabled=parsed_args.enabled)
+ enabled=parsed_args.enabled,
+ **kwargs)
idp._info.pop('links', None)
remote_ids = format_columns.ListColumn(idp._info.pop('remote_ids', []))
@@ -205,6 +226,14 @@ class SetIdentityProvider(command.Command):
help=_('Name of a file that contains many remote IDs to associate '
'with the identity provider, one per line'),
)
+ parser.add_argument(
+ '--authorization-ttl',
+ metavar='<authorization-ttl>',
+ type=int,
+ help=_('Time to keep the role assignments for users '
+ 'authenticating via this identity provider. '
+ 'Available since Identity API version 3.14 (Ussuri).'),
+ )
enable_identity_provider = parser.add_mutually_exclusive_group()
enable_identity_provider.add_argument(
'--enable',
@@ -241,6 +270,20 @@ class SetIdentityProvider(command.Command):
if parsed_args.remote_id_file or parsed_args.remote_id:
kwargs['remote_ids'] = remote_ids
+ # TODO(pas-ha) actually check for 3.14 microversion
+ # TODO(pas-ha) make it possible to reset authorization_ttl
+ # back to None value.
+ # Currently not possible as filter_kwargs decorator in
+ # keystoneclient/base.py explicitly drops the None-valued keys
+ # from kwargs, and 'update' method is wrapped in this decorator.
+ auth_ttl = parsed_args.authorization_ttl
+ if auth_ttl is not None:
+ if auth_ttl < 0:
+ msg = (_("%(param)s must be positive integer or zero."
+ ) % {"param": "authorization-ttl"})
+ raise exceptions.CommandError(msg)
+ kwargs['authorization_ttl'] = auth_ttl
+
federation_client.identity_providers.update(
parsed_args.identity_provider,
**kwargs
diff --git a/openstackclient/identity/v3/trust.py b/openstackclient/identity/v3/trust.py
index cd3a65d0..61273f41 100644
--- a/openstackclient/identity/v3/trust.py
+++ b/openstackclient/identity/v3/trust.py
@@ -176,10 +176,95 @@ class DeleteTrust(command.Command):
class ListTrust(command.Lister):
_description = _("List trusts")
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ '--trustor',
+ metavar='<trustor-user>',
+ help=_('Trustor user to filter (name or ID)'),
+ )
+ parser.add_argument(
+ '--trustee',
+ metavar='<trustee-user>',
+ help=_('Trustee user to filter (name or ID)'),
+ )
+ parser.add_argument(
+ '--trustor-domain',
+ metavar='<trustor-domain>',
+ help=_('Domain that contains <trustor> (name or ID)'),
+ )
+ parser.add_argument(
+ '--trustee-domain',
+ metavar='<trustee-domain>',
+ help=_('Domain that contains <trustee> (name or ID)'),
+ )
+ parser.add_argument(
+ '--auth-user',
+ action="store_true",
+ dest='authuser',
+ help=_('Only list trusts related to the authenticated user'),
+ )
+ return parser
+
def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+ auth_ref = self.app.client_manager.auth_ref
+
+ if parsed_args.authuser and any([
+ parsed_args.trustor,
+ parsed_args.trustor_domain,
+ parsed_args.trustee,
+ parsed_args.trustee_domain,
+ ]):
+ msg = _("--authuser cannot be used with --trustee or --trustor")
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.trustee_domain and not parsed_args.trustee:
+ msg = _("Using --trustee-domain mandates the use of --trustee")
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.trustor_domain and not parsed_args.trustor:
+ msg = _("Using --trustor-domain mandates the use of --trustor")
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.authuser:
+ if auth_ref:
+ user = common.find_user(
+ identity_client,
+ auth_ref.user_id
+ )
+ # We need two calls here as we want trusts with
+ # either the trustor or the trustee set to current user
+ # using a single call would give us trusts with both
+ # trustee and trustor set to current user
+ data1 = identity_client.trusts.list(trustor_user=user)
+ data2 = identity_client.trusts.list(trustee_user=user)
+ data = set(data1 + data2)
+ else:
+ trustor = None
+ if parsed_args.trustor:
+ trustor = common.find_user(
+ identity_client,
+ parsed_args.trustor,
+ parsed_args.trustor_domain,
+ )
+
+ trustee = None
+ if parsed_args.trustee:
+ trustee = common.find_user(
+ identity_client,
+ parsed_args.trustor,
+ parsed_args.trustor_domain,
+ )
+
+ data = self.app.client_manager.identity.trusts.list(
+ trustor_user=trustor,
+ trustee_user=trustee,
+ )
+
columns = ('ID', 'Expires At', 'Impersonation', 'Project ID',
'Trustee User ID', 'Trustor User ID')
- data = self.app.client_manager.identity.trusts.list()
+
return (columns,
(utils.get_item_properties(
s, columns,
diff --git a/openstackclient/image/v2/image.py b/openstackclient/image/v2/image.py
index 407c1292..4adaadda 100644
--- a/openstackclient/image/v2/image.py
+++ b/openstackclient/image/v2/image.py
@@ -21,7 +21,8 @@ import logging
import os
import sys
-import openstack.cloud._utils
+from cinderclient import api_versions
+from openstack import exceptions as sdk_exceptions
from openstack.image import image_signer
from osc_lib.api import utils as api_utils
from osc_lib.cli import format_columns
@@ -32,7 +33,7 @@ from osc_lib import utils
from openstackclient.common import progressbar
from openstackclient.i18n import _
-from openstackclient.identity import common
+from openstackclient.identity import common as identity_common
if os.name == "nt":
import msvcrt
@@ -43,8 +44,19 @@ else:
CONTAINER_CHOICES = ["ami", "ari", "aki", "bare", "docker", "ova", "ovf"]
DEFAULT_CONTAINER_FORMAT = 'bare'
DEFAULT_DISK_FORMAT = 'raw'
-DISK_CHOICES = ["ami", "ari", "aki", "vhd", "vmdk", "raw", "qcow2", "vhdx",
- "vdi", "iso", "ploop"]
+DISK_CHOICES = [
+ "ami",
+ "ari",
+ "aki",
+ "vhd",
+ "vmdk",
+ "raw",
+ "qcow2",
+ "vhdx",
+ "vdi",
+ "iso",
+ "ploop",
+]
MEMBER_STATUS_CHOICES = ["accepted", "pending", "rejected", "all"]
@@ -58,10 +70,25 @@ def _format_image(image, human_readable=False):
properties = {}
# the only fields we're not including is "links", "tags" and the properties
- fields_to_show = ['status', 'name', 'container_format', 'created_at',
- 'size', 'disk_format', 'updated_at', 'visibility',
- 'min_disk', 'protected', 'id', 'file', 'checksum',
- 'owner', 'virtual_size', 'min_ram', 'schema']
+ fields_to_show = [
+ 'status',
+ 'name',
+ 'container_format',
+ 'created_at',
+ 'size',
+ 'disk_format',
+ 'updated_at',
+ 'visibility',
+ 'min_disk',
+ 'protected',
+ 'id',
+ 'file',
+ 'checksum',
+ 'owner',
+ 'virtual_size',
+ 'min_ram',
+ 'schema',
+ ]
# TODO(gtema/anybody): actually it should be possible to drop this method,
# since SDK already delivers a proper object
@@ -98,50 +125,48 @@ _formatters = {
def _get_member_columns(item):
- column_map = {
- 'image_id': 'image_id'
- }
+ column_map = {'image_id': 'image_id'}
hidden_columns = ['id', 'location', 'name']
return utils.get_osc_show_columns_for_sdk_resource(
- item.to_dict(), column_map, hidden_columns,
+ item.to_dict(),
+ column_map,
+ hidden_columns,
)
-def get_data_file(args):
- if args.file:
- return (open(args.file, 'rb'), args.file)
+def get_data_from_stdin():
+ # distinguish cases where:
+ # (1) stdin is not valid (as in cron jobs):
+ # openstack ... <&-
+ # (2) image data is provided through stdin:
+ # openstack ... < /tmp/file
+ # (3) no image data provided
+ # openstack ...
+ try:
+ os.fstat(0)
+ except OSError:
+ # (1) stdin is not valid
+ return None
+
+ if not sys.stdin.isatty():
+ # (2) image data is provided through stdin
+ image = sys.stdin
+ if hasattr(sys.stdin, 'buffer'):
+ image = sys.stdin.buffer
+ if msvcrt:
+ msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
+
+ return image
else:
- # distinguish cases where:
- # (1) stdin is not valid (as in cron jobs):
- # openstack ... <&-
- # (2) image data is provided through stdin:
- # openstack ... < /tmp/file
- # (3) no image data provided
- # openstack ...
- try:
- os.fstat(0)
- except OSError:
- # (1) stdin is not valid
- return (None, None)
- if not sys.stdin.isatty():
- # (2) image data is provided through stdin
- image = sys.stdin
- if hasattr(sys.stdin, 'buffer'):
- image = sys.stdin.buffer
- if msvcrt:
- msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
-
- return (image, None)
- else:
- # (3)
- return (None, None)
+ # (3)
+ return None
class AddProjectToImage(command.ShowOne):
_description = _("Associate project with image")
def get_parser(self, prog_name):
- parser = super(AddProjectToImage, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"image",
metavar="<image>",
@@ -152,23 +177,23 @@ class AddProjectToImage(command.ShowOne):
metavar="<project>",
help=_("Project to associate with image (ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
identity_client = self.app.client_manager.identity
- if openstack.cloud._utils._is_uuid_like(parsed_args.project):
- project_id = parsed_args.project
- else:
- project_id = common.find_project(
- identity_client,
- parsed_args.project,
- parsed_args.project_domain).id
+ project_id = identity_common.find_project(
+ identity_client,
+ parsed_args.project,
+ parsed_args.project_domain,
+ ).id
- image = image_client.find_image(parsed_args.image,
- ignore_missing=False)
+ image = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ )
obj = image_client.add_member(
image=image.id,
@@ -187,7 +212,7 @@ class CreateImage(command.ShowOne):
deadopts = ('size', 'location', 'copy-from', 'checksum', 'store')
def get_parser(self, prog_name):
- parser = super(CreateImage, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
# TODO(bunting): There are additional arguments that v1 supported
# that v2 either doesn't support or supports weirdly.
# --checksum - could be faked clientside perhaps?
@@ -210,19 +235,28 @@ class CreateImage(command.ShowOne):
default=DEFAULT_CONTAINER_FORMAT,
choices=CONTAINER_CHOICES,
metavar="<container-format>",
- help=(_("Image container format. "
+ help=(
+ _(
+ "Image container format. "
"The supported options are: %(option_list)s. "
- "The default format is: %(default_opt)s") %
- {'option_list': ', '.join(CONTAINER_CHOICES),
- 'default_opt': DEFAULT_CONTAINER_FORMAT})
+ "The default format is: %(default_opt)s"
+ )
+ % {
+ 'option_list': ', '.join(CONTAINER_CHOICES),
+ 'default_opt': DEFAULT_CONTAINER_FORMAT,
+ }
+ ),
)
parser.add_argument(
"--disk-format",
default=DEFAULT_DISK_FORMAT,
choices=DISK_CHOICES,
metavar="<disk-format>",
- help=_("Image disk format. The supported options are: %s. "
- "The default format is: raw") % ', '.join(DISK_CHOICES)
+ help=_(
+ "Image disk format. The supported options are: %s. "
+ "The default format is: raw"
+ )
+ % ', '.join(DISK_CHOICES),
)
parser.add_argument(
"--min-disk",
@@ -239,6 +273,7 @@ class CreateImage(command.ShowOne):
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument(
"--file",
+ dest="filename",
metavar="<file>",
help=_("Upload image from local file"),
)
@@ -252,61 +287,82 @@ class CreateImage(command.ShowOne):
dest='force',
action='store_true',
default=False,
- help=_("Force image creation if volume is in use "
- "(only meaningful with --volume)"),
+ help=_(
+ "Force image creation if volume is in use "
+ "(only meaningful with --volume)"
+ ),
)
parser.add_argument(
"--progress",
action="store_true",
default=False,
- help=_("Show upload progress bar."),
+ help=_(
+ "Show upload progress bar "
+ "(ignored if passing data via stdin)"
+ ),
)
parser.add_argument(
'--sign-key-path',
metavar="<sign-key-path>",
default=[],
- help=_("Sign the image using the specified private key. "
- "Only use in combination with --sign-cert-id")
+ help=_(
+ "Sign the image using the specified private key. "
+ "Only use in combination with --sign-cert-id"
+ ),
)
parser.add_argument(
'--sign-cert-id',
metavar="<sign-cert-id>",
default=[],
- help=_("The specified certificate UUID is a reference to "
- "the certificate in the key manager that corresponds "
- "to the public key and is used for signature validation. "
- "Only use in combination with --sign-key-path")
+ help=_(
+ "The specified certificate UUID is a reference to "
+ "the certificate in the key manager that corresponds "
+ "to the public key and is used for signature validation. "
+ "Only use in combination with --sign-key-path"
+ ),
)
protected_group = parser.add_mutually_exclusive_group()
protected_group.add_argument(
"--protected",
action="store_true",
+ dest="is_protected",
+ default=None,
help=_("Prevent image from being deleted"),
)
protected_group.add_argument(
"--unprotected",
- action="store_true",
+ action="store_false",
+ dest="is_protected",
+ default=None,
help=_("Allow image to be deleted (default)"),
)
public_group = parser.add_mutually_exclusive_group()
public_group.add_argument(
"--public",
- action="store_true",
+ action="store_const",
+ const="public",
+ dest="visibility",
help=_("Image is accessible to the public"),
)
public_group.add_argument(
"--private",
- action="store_true",
+ action="store_const",
+ const="private",
+ dest="visibility",
help=_("Image is inaccessible to the public (default)"),
)
public_group.add_argument(
"--community",
- action="store_true",
+ action="store_const",
+ const="community",
+ dest="visibility",
help=_("Image is accessible to the community"),
)
public_group.add_argument(
"--shared",
- action="store_true",
+ action="store_const",
+ const="shared",
+ dest="visibility",
help=_("Image can be shared"),
)
parser.add_argument(
@@ -314,16 +370,20 @@ class CreateImage(command.ShowOne):
dest="properties",
metavar="<key=value>",
action=parseractions.KeyValueAction,
- help=_("Set a property on this image "
- "(repeat option to set multiple properties)"),
+ help=_(
+ "Set a property on this image "
+ "(repeat option to set multiple properties)"
+ ),
)
parser.add_argument(
"--tag",
dest="tags",
metavar="<tag>",
action='append',
- help=_("Set a tag on this image "
- "(repeat option to set multiple tags)"),
+ help=_(
+ "Set a tag on this image "
+ "(repeat option to set multiple tags)"
+ ),
)
parser.add_argument(
"--project",
@@ -335,10 +395,10 @@ class CreateImage(command.ShowOne):
dest="use_import",
action="store_true",
help=_(
- "Force the use of glance image import instead of"
- " direct upload")
+ "Force the use of glance image import instead of direct upload"
+ ),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
for deadopt in self.deadopts:
parser.add_argument(
"--%s" % deadopt,
@@ -348,22 +408,23 @@ class CreateImage(command.ShowOne):
)
return parser
- def take_action(self, parsed_args):
+ def _take_action_image(self, parsed_args):
identity_client = self.app.client_manager.identity
image_client = self.app.client_manager.image
- for deadopt in self.deadopts:
- if getattr(parsed_args, deadopt.replace('-', '_'), None):
- raise exceptions.CommandError(
- _("ERROR: --%s was given, which is an Image v1 option"
- " that is no longer supported in Image v2") % deadopt)
-
# Build an attribute dict from the parsed args, only include
# attributes that were actually set on the command line
kwargs = {'allow_duplicates': True}
- copy_attrs = ('name', 'id',
- 'container_format', 'disk_format',
- 'min_disk', 'min_ram', 'tags', 'visibility')
+ copy_attrs = (
+ 'name',
+ 'id',
+ 'container_format',
+ 'disk_format',
+ 'min_disk',
+ 'min_ram',
+ 'tags',
+ 'visibility',
+ )
for attr in copy_attrs:
if attr in parsed_args:
val = getattr(parsed_args, attr, None)
@@ -383,20 +444,14 @@ class CreateImage(command.ShowOne):
# a single value for the pair of options because the default must be
# to do nothing when no options are present as opposed to always
# setting a default.
- if parsed_args.protected:
- kwargs['is_protected'] = True
- if parsed_args.unprotected:
- kwargs['is_protected'] = False
- if parsed_args.public:
- kwargs['visibility'] = 'public'
- if parsed_args.private:
- kwargs['visibility'] = 'private'
- if parsed_args.community:
- kwargs['visibility'] = 'community'
- if parsed_args.shared:
- kwargs['visibility'] = 'shared'
+ if parsed_args.is_protected is not None:
+ kwargs['is_protected'] = parsed_args.is_protected
+
+ if parsed_args.visibility is not None:
+ kwargs['visibility'] = parsed_args.visibility
+
if parsed_args.project:
- kwargs['owner_id'] = common.find_project(
+ kwargs['owner_id'] = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
@@ -408,100 +463,184 @@ class CreateImage(command.ShowOne):
# open the file first to ensure any failures are handled before the
# image is created. Get the file name (if it is file, and not stdin)
# for easier further handling.
- (fp, fname) = get_data_file(parsed_args)
- info = {}
+ if parsed_args.filename:
+ try:
+ fp = open(parsed_args.filename, 'rb')
+ except FileNotFoundError:
+ raise exceptions.CommandError(
+ '%r is not a valid file' % parsed_args.filename,
+ )
+ else:
+ fp = get_data_from_stdin()
if fp is not None and parsed_args.volume:
- raise exceptions.CommandError(_("Uploading data and using "
- "container are not allowed at "
- "the same time"))
- if fp is None and parsed_args.file:
- LOG.warning(_("Failed to get an image file."))
- return {}, {}
- if fp is not None and parsed_args.progress:
- filesize = os.path.getsize(fname)
+ msg = _(
+ "Uploading data and using container are not allowed at "
+ "the same time"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.progress and parsed_args.filename:
+ # NOTE(stephenfin): we only show a progress bar if the user
+ # requested it *and* we're reading from a file (not stdin)
+ filesize = os.path.getsize(parsed_args.filename)
if filesize is not None:
kwargs['validate_checksum'] = False
kwargs['data'] = progressbar.VerboseFileWrapper(fp, filesize)
- elif fname:
- kwargs['filename'] = fname
+ else:
+ kwargs['data'] = fp
+ elif parsed_args.filename:
+ kwargs['filename'] = parsed_args.filename
elif fp:
kwargs['validate_checksum'] = False
kwargs['data'] = fp
# sign an image using a given local private key file
if parsed_args.sign_key_path or parsed_args.sign_cert_id:
- if not parsed_args.file:
- msg = (_("signing an image requires the --file option, "
- "passing files via stdin when signing is not "
- "supported."))
+ if not parsed_args.filename:
+ msg = _(
+ "signing an image requires the --file option, "
+ "passing files via stdin when signing is not "
+ "supported."
+ )
raise exceptions.CommandError(msg)
- if (len(parsed_args.sign_key_path) < 1 or
- len(parsed_args.sign_cert_id) < 1):
- msg = (_("'sign-key-path' and 'sign-cert-id' must both be "
- "specified when attempting to sign an image."))
+
+ if (
+ len(parsed_args.sign_key_path) < 1 or
+ len(parsed_args.sign_cert_id) < 1
+ ):
+ msg = _(
+ "'sign-key-path' and 'sign-cert-id' must both be "
+ "specified when attempting to sign an image."
+ )
raise exceptions.CommandError(msg)
- else:
- sign_key_path = parsed_args.sign_key_path
- sign_cert_id = parsed_args.sign_cert_id
- signer = image_signer.ImageSigner()
- try:
- pw = utils.get_password(
- self.app.stdin,
- prompt=("Please enter private key password, leave "
- "empty if none: "),
- confirm=False)
-
- if not pw or len(pw) < 1:
- pw = None
- else:
- # load_private_key() requires the password to be
- # passed as bytes
- pw = pw.encode()
-
- signer.load_private_key(
- sign_key_path,
- password=pw)
- except Exception:
- msg = (_("Error during sign operation: private key "
- "could not be loaded."))
- raise exceptions.CommandError(msg)
-
- signature = signer.generate_signature(fp)
- signature_b64 = b64encode(signature)
- kwargs['img_signature'] = signature_b64
- kwargs['img_signature_certificate_uuid'] = sign_cert_id
- kwargs['img_signature_hash_method'] = signer.hash_method
- if signer.padding_method:
- kwargs['img_signature_key_type'] = \
- signer.padding_method
-
- # If a volume is specified.
- if parsed_args.volume:
- volume_client = self.app.client_manager.volume
- source_volume = utils.find_resource(
- volume_client.volumes,
- parsed_args.volume,
- )
- response, body = volume_client.volumes.upload_to_image(
- source_volume.id,
- parsed_args.force,
- parsed_args.name,
- parsed_args.container_format,
- parsed_args.disk_format,
- visibility=kwargs.get('visibility', 'private'),
- protected=True if parsed_args.protected else False
- )
- info = body['os-volume_upload_image']
+
+ sign_key_path = parsed_args.sign_key_path
+ sign_cert_id = parsed_args.sign_cert_id
+ signer = image_signer.ImageSigner()
try:
- info['volume_type'] = info['volume_type']['name']
- except TypeError:
- info['volume_type'] = None
+ pw = utils.get_password(
+ self.app.stdin,
+ prompt=(
+ "Please enter private key password, leave "
+ "empty if none: "
+ ),
+ confirm=False,
+ )
+
+ if not pw or len(pw) < 1:
+ pw = None
+ else:
+ # load_private_key() requires the password to be
+ # passed as bytes
+ pw = pw.encode()
+
+ signer.load_private_key(sign_key_path, password=pw)
+ except Exception:
+ msg = _(
+ "Error during sign operation: private key "
+ "could not be loaded."
+ )
+ raise exceptions.CommandError(msg)
+
+ signature = signer.generate_signature(fp)
+ signature_b64 = b64encode(signature)
+ kwargs['img_signature'] = signature_b64
+ kwargs['img_signature_certificate_uuid'] = sign_cert_id
+ kwargs['img_signature_hash_method'] = signer.hash_method
+ if signer.padding_method:
+ kwargs['img_signature_key_type'] = signer.padding_method
+
+ image = image_client.create_image(**kwargs)
+
+ if parsed_args.filename:
+ fp.close()
+
+ return _format_image(image)
+
+ def _take_action_volume(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ unsupported_opts = {
+ # 'name', # 'name' is a positional argument and will always exist
+ 'id',
+ 'min_disk',
+ 'min_ram',
+ 'file',
+ 'force',
+ 'progress',
+ 'sign_key_path',
+ 'sign_cert_id',
+ 'properties',
+ 'tags',
+ 'project',
+ 'use_import',
+ }
+ for unsupported_opt in unsupported_opts:
+ if getattr(parsed_args, unsupported_opt, None):
+ opt_name = unsupported_opt.replace('-', '_')
+ if unsupported_opt == 'use_import':
+ opt_name = 'import'
+ msg = _(
+ "'--%s' was given, which is not supported when "
+ "creating an image from a volume. "
+ "This will be an error in a future version."
+ )
+ # TODO(stephenfin): These should be an error in a future
+ # version
+ LOG.warning(msg % opt_name)
+
+ source_volume = utils.find_resource(
+ volume_client.volumes,
+ parsed_args.volume,
+ )
+ kwargs = {}
+ if volume_client.api_version < api_versions.APIVersion('3.1'):
+ if (
+ parsed_args.visibility or
+ parsed_args.is_protected is not None
+ ):
+ msg = _(
+ '--os-volume-api-version 3.1 or greater is required '
+ 'to support the --public, --private, --community, '
+ '--shared or --protected option.'
+ )
+ raise exceptions.CommandError(msg)
else:
- image = image_client.create_image(**kwargs)
+ kwargs.update(
+ visibility=parsed_args.visibility or 'private',
+ protected=parsed_args.is_protected or False,
+ )
- if not info:
- info = _format_image(image)
+ response, body = volume_client.volumes.upload_to_image(
+ source_volume.id,
+ parsed_args.force,
+ parsed_args.name,
+ parsed_args.container_format,
+ parsed_args.disk_format,
+ **kwargs
+ )
+ info = body['os-volume_upload_image']
+ try:
+ info['volume_type'] = info['volume_type']['name']
+ except TypeError:
+ info['volume_type'] = None
+
+ return info
+
+ def take_action(self, parsed_args):
+ for deadopt in self.deadopts:
+ if getattr(parsed_args, deadopt.replace('-', '_'), None):
+ msg = _(
+ "ERROR: --%s was given, which is an Image v1 option "
+ "that is no longer supported in Image v2"
+ )
+ raise exceptions.CommandError(msg % deadopt)
+
+ if parsed_args.volume:
+ info = self._take_action_volume(parsed_args)
+ else:
+ info = self._take_action_image(parsed_args)
return zip(*sorted(info.items()))
@@ -510,7 +649,7 @@ class DeleteImage(command.Command):
_description = _("Delete image(s)")
def get_parser(self, prog_name):
- parser = super(DeleteImage, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"images",
metavar="<image>",
@@ -525,19 +664,25 @@ class DeleteImage(command.Command):
image_client = self.app.client_manager.image
for image in parsed_args.images:
try:
- image_obj = image_client.find_image(image,
- ignore_missing=False)
+ image_obj = image_client.find_image(
+ image,
+ ignore_missing=False,
+ )
image_client.delete_image(image_obj.id)
except Exception as e:
del_result += 1
- LOG.error(_("Failed to delete image with name or "
- "ID '%(image)s': %(e)s"),
- {'image': image, 'e': e})
+ msg = _(
+ "Failed to delete image with name or "
+ "ID '%(image)s': %(e)s"
+ )
+ LOG.error(msg, {'image': image, 'e': e})
total = len(parsed_args.images)
- if (del_result > 0):
- msg = (_("Failed to delete %(dresult)s of %(total)s images.")
- % {'dresult': del_result, 'total': total})
+ if del_result > 0:
+ msg = _("Failed to delete %(dresult)s of %(total)s images.") % {
+ 'dresult': del_result,
+ 'total': total,
+ }
raise exceptions.CommandError(msg)
@@ -545,54 +690,63 @@ class ListImage(command.Lister):
_description = _("List available images")
def get_parser(self, prog_name):
- parser = super(ListImage, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
public_group = parser.add_mutually_exclusive_group()
public_group.add_argument(
"--public",
- dest="public",
- action="store_true",
- default=False,
+ action="store_const",
+ const="public",
+ dest="visibility",
help=_("List only public images"),
)
public_group.add_argument(
"--private",
- dest="private",
- action="store_true",
- default=False,
+ action="store_const",
+ const="private",
+ dest="visibility",
help=_("List only private images"),
)
public_group.add_argument(
"--community",
- dest="community",
- action="store_true",
- default=False,
+ action="store_const",
+ const="community",
+ dest="visibility",
help=_("List only community images"),
)
public_group.add_argument(
"--shared",
- dest="shared",
- action="store_true",
- default=False,
+ action="store_const",
+ const="shared",
+ dest="visibility",
help=_("List only shared images"),
)
+ public_group.add_argument(
+ "--all",
+ action="store_const",
+ const="all",
+ dest="visibility",
+ help=_("List all images"),
+ )
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
- help=_('Filter output based on property '
- '(repeat option to filter on multiple properties)'),
+ help=_(
+ 'Filter output based on property '
+ '(repeat option to filter on multiple properties)'
+ ),
)
parser.add_argument(
'--name',
metavar='<name>',
default=None,
- help=_("Filter images based on name.")
+ help=_("Filter images based on name."),
)
parser.add_argument(
'--status',
metavar='<status>',
default=None,
- help=_("Filter images based on status.")
+ help=_("Filter images based on status."),
)
parser.add_argument(
'--member-status',
@@ -600,25 +754,34 @@ class ListImage(command.Lister):
default=None,
type=lambda s: s.lower(),
choices=MEMBER_STATUS_CHOICES,
- help=(_("Filter images based on member status. "
- "The supported options are: %s. ") %
- ', '.join(MEMBER_STATUS_CHOICES))
+ help=(
+ _(
+ "Filter images based on member status. "
+ "The supported options are: %s. "
+ )
+ % ', '.join(MEMBER_STATUS_CHOICES)
+ ),
)
parser.add_argument(
'--project',
metavar='<project>',
- help=_("Search by project (admin only) (name or ID)")
+ help=_("Search by project (admin only) (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--tag',
metavar='<tag>',
- default=None,
- help=_('Filter images based on tag.'),
+ action='append',
+ default=[],
+ help=_(
+ 'Filter images based on tag. '
+ '(repeat option to filter on multiple tags)'
+ ),
)
parser.add_argument(
'--hidden',
action='store_true',
+ dest='is_hidden',
default=False,
help=_('List hidden images'),
)
@@ -640,9 +803,11 @@ class ListImage(command.Lister):
'--sort',
metavar="<key>[:<direction>]",
default='name:asc',
- help=_("Sort output by selected keys and directions(asc or desc) "
- "(default: name:asc), multiple keys and directions can be "
- "specified separated by comma"),
+ help=_(
+ "Sort output by selected keys and directions (asc or desc) "
+ "(default: name:asc), multiple keys and directions can be "
+ "specified separated by comma"
+ ),
)
parser.add_argument(
"--limit",
@@ -654,9 +819,11 @@ class ListImage(command.Lister):
'--marker',
metavar='<image>',
default=None,
- help=_("The last image of the previous page. Display "
- "list of images after marker. Display all images if not "
- "specified. (name or ID)"),
+ help=_(
+ "The last image of the previous page. Display "
+ "list of images after marker. Display all images if not "
+ "specified. (name or ID)"
+ ),
)
return parser
@@ -665,18 +832,15 @@ class ListImage(command.Lister):
image_client = self.app.client_manager.image
kwargs = {}
- if parsed_args.public:
- kwargs['visibility'] = 'public'
- if parsed_args.private:
- kwargs['visibility'] = 'private'
- if parsed_args.community:
- kwargs['visibility'] = 'community'
- if parsed_args.shared:
- kwargs['visibility'] = 'shared'
+ if parsed_args.visibility is not None:
+ kwargs['visibility'] = parsed_args.visibility
if parsed_args.limit:
kwargs['limit'] = parsed_args.limit
if parsed_args.marker:
- kwargs['marker'] = image_client.find_image(parsed_args.marker).id
+ kwargs['marker'] = image_client.find_image(
+ parsed_args.marker,
+ ignore_missing=False,
+ ).id
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.status:
@@ -687,14 +851,14 @@ class ListImage(command.Lister):
kwargs['tag'] = parsed_args.tag
project_id = None
if parsed_args.project:
- project_id = common.find_project(
+ project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
kwargs['owner'] = project_id
- if parsed_args.hidden:
- kwargs['is_hidden'] = True
+ if parsed_args.is_hidden:
+ kwargs['is_hidden'] = parsed_args.is_hidden
if parsed_args.long:
columns = (
'ID',
@@ -745,11 +909,14 @@ class ListImage(command.Lister):
return (
column_headers,
- (utils.get_item_properties(
- s,
- columns,
- formatters=_formatters,
- ) for s in data)
+ (
+ utils.get_item_properties(
+ s,
+ columns,
+ formatters=_formatters,
+ )
+ for s in data
+ ),
)
@@ -757,38 +924,43 @@ class ListImageProjects(command.Lister):
_description = _("List projects associated with image")
def get_parser(self, prog_name):
- parser = super(ListImageProjects, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"image",
metavar="<image>",
help=_("Image (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
- columns = (
- "Image ID",
- "Member ID",
- "Status"
- )
+ columns = ("Image ID", "Member ID", "Status")
- image_id = image_client.find_image(parsed_args.image).id
+ image_id = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ ).id
data = image_client.members(image=image_id)
- return (columns,
- (utils.get_item_properties(
- s, columns,
- ) for s in data))
+ return (
+ columns,
+ (
+ utils.get_item_properties(
+ s,
+ columns,
+ )
+ for s in data
+ ),
+ )
class RemoveProjectImage(command.Command):
_description = _("Disassociate project with image")
def get_parser(self, prog_name):
- parser = super(RemoveProjectImage, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"image",
metavar="<image>",
@@ -799,33 +971,36 @@ class RemoveProjectImage(command.Command):
metavar="<project>",
help=_("Project to disassociate with image (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
identity_client = self.app.client_manager.identity
- project_id = common.find_project(identity_client,
- parsed_args.project,
- parsed_args.project_domain).id
+ project_id = identity_common.find_project(
+ identity_client,
+ parsed_args.project,
+ parsed_args.project_domain,
+ ).id
- image = image_client.find_image(parsed_args.image,
- ignore_missing=False)
+ image = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ )
- image_client.remove_member(
- member=project_id,
- image=image.id)
+ image_client.remove_member(member=project_id, image=image.id)
class SaveImage(command.Command):
_description = _("Save an image locally")
def get_parser(self, prog_name):
- parser = super(SaveImage, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"--file",
metavar="<filename>",
+ dest="filename",
help=_("Downloaded image save filename (default: stdout)"),
)
parser.add_argument(
@@ -837,9 +1012,12 @@ class SaveImage(command.Command):
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
- image = image_client.find_image(parsed_args.image)
+ image = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ )
- output_file = parsed_args.file
+ output_file = parsed_args.filename
if output_file is None:
output_file = getattr(sys.stdout, "buffer", sys.stdout)
@@ -852,7 +1030,7 @@ class SetImage(command.Command):
deadopts = ('visibility',)
def get_parser(self, prog_name):
- parser = super(SetImage, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
# TODO(bunting): There are additional arguments that v1 supported
# --size - does not exist in v2
# --store - does not exist in v2
@@ -864,20 +1042,16 @@ class SetImage(command.Command):
# --checksum - maybe could be done client side
# --stdin - could be implemented
parser.add_argument(
- "image",
- metavar="<image>",
- help=_("Image to modify (name or ID)")
+ "image", metavar="<image>", help=_("Image to modify (name or ID)")
)
parser.add_argument(
- "--name",
- metavar="<name>",
- help=_("New image name")
+ "--name", metavar="<name>", help=_("New image name")
)
parser.add_argument(
"--min-disk",
type=int,
metavar="<disk-gb>",
- help=_("Minimum disk size needed to boot image, in gigabytes")
+ help=_("Minimum disk size needed to boot image, in gigabytes"),
)
parser.add_argument(
"--min-ram",
@@ -889,46 +1063,58 @@ class SetImage(command.Command):
"--container-format",
metavar="<container-format>",
choices=CONTAINER_CHOICES,
- help=_("Image container format. The supported options are: %s") %
- ', '.join(CONTAINER_CHOICES)
+ help=_("Image container format. The supported options are: %s")
+ % ', '.join(CONTAINER_CHOICES),
)
parser.add_argument(
"--disk-format",
metavar="<disk-format>",
choices=DISK_CHOICES,
- help=_("Image disk format. The supported options are: %s") %
- ', '.join(DISK_CHOICES)
+ help=_("Image disk format. The supported options are: %s")
+ % ', '.join(DISK_CHOICES),
)
protected_group = parser.add_mutually_exclusive_group()
protected_group.add_argument(
"--protected",
action="store_true",
+ dest="is_protected",
+ default=None,
help=_("Prevent image from being deleted"),
)
protected_group.add_argument(
"--unprotected",
- action="store_true",
+ action="store_false",
+ dest="is_protected",
+ default=None,
help=_("Allow image to be deleted (default)"),
)
public_group = parser.add_mutually_exclusive_group()
public_group.add_argument(
"--public",
- action="store_true",
+ action="store_const",
+ const="public",
+ dest="visibility",
help=_("Image is accessible to the public"),
)
public_group.add_argument(
"--private",
- action="store_true",
+ action="store_const",
+ const="private",
+ dest="visibility",
help=_("Image is inaccessible to the public (default)"),
)
public_group.add_argument(
"--community",
- action="store_true",
+ action="store_const",
+ const="community",
+ dest="visibility",
help=_("Image is accessible to the community"),
)
public_group.add_argument(
"--shared",
- action="store_true",
+ action="store_const",
+ const="shared",
+ dest="visibility",
help=_("Image can be shared"),
)
parser.add_argument(
@@ -936,8 +1122,10 @@ class SetImage(command.Command):
dest="properties",
metavar="<key=value>",
action=parseractions.KeyValueAction,
- help=_("Set a property on this image "
- "(repeat option to set multiple properties)"),
+ help=_(
+ "Set a property on this image "
+ "(repeat option to set multiple properties)"
+ ),
)
parser.add_argument(
"--tag",
@@ -945,8 +1133,10 @@ class SetImage(command.Command):
metavar="<tag>",
default=None,
action='append',
- help=_("Set a tag on this image "
- "(repeat option to set multiple tags)"),
+ help=_(
+ "Set a tag on this image "
+ "(repeat option to set multiple tags)"
+ ),
)
parser.add_argument(
"--architecture",
@@ -1000,12 +1190,12 @@ class SetImage(command.Command):
metavar="<project>",
help=_("Set an alternate project on this image (name or ID)"),
)
- common.add_project_domain_option_to_parser(parser)
+ identity_common.add_project_domain_option_to_parser(parser)
for deadopt in self.deadopts:
parser.add_argument(
"--%s" % deadopt,
metavar="<%s>" % deadopt,
- dest=deadopt.replace('-', '_'),
+ dest=f"dead_{deadopt.replace('-', '_')}",
help=argparse.SUPPRESS,
)
@@ -1038,14 +1228,14 @@ class SetImage(command.Command):
hidden_group = parser.add_mutually_exclusive_group()
hidden_group.add_argument(
"--hidden",
- dest='hidden',
+ dest="is_hidden",
default=None,
action="store_true",
help=_("Hide the image"),
)
hidden_group.add_argument(
"--unhidden",
- dest='hidden',
+ dest="is_hidden",
default=None,
action="store_false",
help=_("Unhide the image"),
@@ -1057,17 +1247,22 @@ class SetImage(command.Command):
image_client = self.app.client_manager.image
for deadopt in self.deadopts:
- if getattr(parsed_args, deadopt.replace('-', '_'), None):
+ if getattr(parsed_args, f"dead_{deadopt.replace('-', '_')}", None):
raise exceptions.CommandError(
- _("ERROR: --%s was given, which is an Image v1 option"
- " that is no longer supported in Image v2") % deadopt)
+ _(
+ "ERROR: --%s was given, which is an Image v1 option"
+ " that is no longer supported in Image v2"
+ )
+ % deadopt
+ )
image = image_client.find_image(
- parsed_args.image, ignore_missing=False,
+ parsed_args.image,
+ ignore_missing=False,
)
project_id = None
if parsed_args.project:
- project_id = common.find_project(
+ project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
@@ -1100,10 +1295,25 @@ class SetImage(command.Command):
# handle everything else
kwargs = {}
- copy_attrs = ('architecture', 'container_format', 'disk_format',
- 'file', 'instance_id', 'kernel_id', 'locations',
- 'min_disk', 'min_ram', 'name', 'os_distro', 'os_version',
- 'prefix', 'progress', 'ramdisk_id', 'tags', 'visibility')
+ copy_attrs = (
+ 'architecture',
+ 'container_format',
+ 'disk_format',
+ 'file',
+ 'instance_id',
+ 'kernel_id',
+ 'locations',
+ 'min_disk',
+ 'min_ram',
+ 'name',
+ 'os_distro',
+ 'os_version',
+ 'prefix',
+ 'progress',
+ 'ramdisk_id',
+ 'tags',
+ 'visibility',
+ )
for attr in copy_attrs:
if attr in parsed_args:
val = getattr(parsed_args, attr, None)
@@ -1123,33 +1333,31 @@ class SetImage(command.Command):
# a single value for the pair of options because the default must be
# to do nothing when no options are present as opposed to always
# setting a default.
- if parsed_args.protected:
- kwargs['is_protected'] = True
- if parsed_args.unprotected:
- kwargs['is_protected'] = False
- if parsed_args.public:
- kwargs['visibility'] = 'public'
- if parsed_args.private:
- kwargs['visibility'] = 'private'
- if parsed_args.community:
- kwargs['visibility'] = 'community'
- if parsed_args.shared:
- kwargs['visibility'] = 'shared'
+ if parsed_args.is_protected is not None:
+ kwargs['is_protected'] = parsed_args.is_protected
+
+ if parsed_args.visibility is not None:
+ kwargs['visibility'] = parsed_args.visibility
+
if parsed_args.project:
# We already did the project lookup above
kwargs['owner_id'] = project_id
+
if parsed_args.tags:
# Tags should be extended, but duplicates removed
kwargs['tags'] = list(set(image.tags).union(set(parsed_args.tags)))
- if parsed_args.hidden is not None:
- kwargs['is_hidden'] = parsed_args.hidden
+
+ if parsed_args.is_hidden is not None:
+ kwargs['is_hidden'] = parsed_args.is_hidden
try:
image = image_client.update_image(image.id, **kwargs)
except Exception:
if activation_status is not None:
- LOG.info(_("Image %(id)s was %(status)s."),
- {'id': image.id, 'status': activation_status})
+ LOG.info(
+ _("Image %(id)s was %(status)s."),
+ {'id': image.id, 'status': activation_status},
+ )
raise
@@ -1157,7 +1365,7 @@ class ShowImage(command.ShowOne):
_description = _("Display image details")
def get_parser(self, prog_name):
- parser = super(ShowImage, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"--human-readable",
default=False,
@@ -1174,8 +1382,10 @@ class ShowImage(command.ShowOne):
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
- image = image_client.find_image(parsed_args.image,
- ignore_missing=False)
+ image = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ )
info = _format_image(image, parsed_args.human_readable)
return zip(*sorted(info.items()))
@@ -1185,7 +1395,7 @@ class UnsetImage(command.Command):
_description = _("Unset image tags and properties")
def get_parser(self, prog_name):
- parser = super(UnsetImage, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"image",
metavar="<image>",
@@ -1197,8 +1407,10 @@ class UnsetImage(command.Command):
metavar="<tag>",
default=[],
action='append',
- help=_("Unset a tag on this image "
- "(repeat option to unset multiple tags)"),
+ help=_(
+ "Unset a tag on this image "
+ "(repeat option to unset multiple tags)"
+ ),
)
parser.add_argument(
"--property",
@@ -1206,15 +1418,19 @@ class UnsetImage(command.Command):
metavar="<property-key>",
default=[],
action='append',
- help=_("Unset a property on this image "
- "(repeat option to unset multiple properties)"),
+ help=_(
+ "Unset a property on this image "
+ "(repeat option to unset multiple properties)"
+ ),
)
return parser
def take_action(self, parsed_args):
image_client = self.app.client_manager.image
- image = image_client.find_image(parsed_args.image,
- ignore_missing=False)
+ image = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ )
kwargs = {}
tagret = 0
@@ -1224,8 +1440,9 @@ class UnsetImage(command.Command):
try:
image_client.remove_tag(image.id, k)
except Exception:
- LOG.error(_("tag unset failed, '%s' is a "
- "nonexistent tag "), k)
+ LOG.error(
+ _("tag unset failed, '%s' is a " "nonexistent tag "), k
+ )
tagret += 1
if parsed_args.properties:
@@ -1237,35 +1454,365 @@ class UnsetImage(command.Command):
# pass modified properties object, so that SDK can figure
# out, what was changed inside
# NOTE: ping gtema to improve that in SDK
- new_props = kwargs.get('properties',
- image.get('properties').copy())
+ new_props = kwargs.get(
+ 'properties', image.get('properties').copy()
+ )
new_props.pop(k, None)
kwargs['properties'] = new_props
else:
- LOG.error(_("property unset failed, '%s' is a "
- "nonexistent property "), k)
+ LOG.error(
+ _(
+ "property unset failed, '%s' is a "
+ "nonexistent property "
+ ),
+ k,
+ )
propret += 1
# We must give to update a current image for the reference on what
# has changed
- image_client.update_image(
- image,
- **kwargs)
+ image_client.update_image(image, **kwargs)
tagtotal = len(parsed_args.tags)
proptotal = len(parsed_args.properties)
- if (tagret > 0 and propret > 0):
- msg = (_("Failed to unset %(tagret)s of %(tagtotal)s tags,"
- "Failed to unset %(propret)s of %(proptotal)s properties.")
- % {'tagret': tagret, 'tagtotal': tagtotal,
- 'propret': propret, 'proptotal': proptotal})
+ if tagret > 0 and propret > 0:
+ msg = _(
+ "Failed to unset %(tagret)s of %(tagtotal)s tags,"
+ "Failed to unset %(propret)s of %(proptotal)s properties."
+ ) % {
+ 'tagret': tagret,
+ 'tagtotal': tagtotal,
+ 'propret': propret,
+ 'proptotal': proptotal,
+ }
raise exceptions.CommandError(msg)
elif tagret > 0:
- msg = (_("Failed to unset %(tagret)s of %(tagtotal)s tags.")
- % {'tagret': tagret, 'tagtotal': tagtotal})
+ msg = _("Failed to unset %(tagret)s of %(tagtotal)s tags.") % {
+ 'tagret': tagret,
+ 'tagtotal': tagtotal,
+ }
raise exceptions.CommandError(msg)
elif propret > 0:
- msg = (_("Failed to unset %(propret)s of %(proptotal)s"
- " properties.")
- % {'propret': propret, 'proptotal': proptotal})
+ msg = _(
+ "Failed to unset %(propret)s of %(proptotal)s" " properties."
+ ) % {'propret': propret, 'proptotal': proptotal}
+ raise exceptions.CommandError(msg)
+
+
+class StageImage(command.Command):
+ _description = _(
+ "Upload data for a specific image to staging.\n"
+ "This requires support for the interoperable image import process, "
+ "which was first introduced in Image API version 2.6 "
+ "(Glance 16.0.0 (Queens))"
+ )
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+
+ parser.add_argument(
+ '--file',
+ metavar='<file>',
+ dest='filename',
+ help=_(
+ 'Local file that contains disk image to be uploaded. '
+ 'Alternatively, images can be passed via stdin.'
+ ),
+ )
+ # NOTE(stephenfin): glanceclient had a --size argument but it didn't do
+ # anything so we have chosen not to port this
+ parser.add_argument(
+ '--progress',
+ action='store_true',
+ default=False,
+ help=_(
+ 'Show upload progress bar '
+ '(ignored if passing data via stdin)'
+ ),
+ )
+ parser.add_argument(
+ 'image',
+ metavar='<image>',
+ help=_('Image to upload data for (name or ID)'),
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ image = image_client.find_image(
+ parsed_args.image,
+ ignore_missing=False,
+ )
+ # open the file first to ensure any failures are handled before the
+ # image is created. Get the file name (if it is file, and not stdin)
+ # for easier further handling.
+ if parsed_args.filename:
+ try:
+ fp = open(parsed_args.filename, 'rb')
+ except FileNotFoundError:
+ raise exceptions.CommandError(
+ '%r is not a valid file' % parsed_args.filename,
+ )
+ else:
+ fp = get_data_from_stdin()
+
+ kwargs = {}
+
+ if parsed_args.progress and parsed_args.filename:
+ # NOTE(stephenfin): we only show a progress bar if the user
+ # requested it *and* we're reading from a file (not stdin)
+ filesize = os.path.getsize(parsed_args.filename)
+ if filesize is not None:
+ kwargs['data'] = progressbar.VerboseFileWrapper(fp, filesize)
+ else:
+ kwargs['data'] = fp
+ elif parsed_args.filename:
+ kwargs['filename'] = parsed_args.filename
+ elif fp:
+ kwargs['data'] = fp
+
+ image_client.stage_image(image, **kwargs)
+
+
+class ImportImage(command.ShowOne):
+ _description = _(
+ "Initiate the image import process.\n"
+ "This requires support for the interoperable image import process, "
+ "which was first introduced in Image API version 2.6 "
+ "(Glance 16.0.0 (Queens))"
+ )
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+
+ parser.add_argument(
+ 'image',
+ metavar='<image>',
+ help=_('Image to initiate import process for (name or ID)'),
+ )
+ # TODO(stephenfin): Uncomment help text when we have this command
+ # implemented
+ parser.add_argument(
+ '--method',
+ metavar='<method>',
+ default='glance-direct',
+ dest='import_method',
+ choices=[
+ 'glance-direct',
+ 'web-download',
+ 'glance-download',
+ 'copy-image',
+ ],
+ help=_(
+ "Import method used for image import process. "
+ "Not all deployments will support all methods. "
+ # "Valid values can be retrieved with the 'image import "
+ # "methods' command. "
+ "The 'glance-direct' method (default) requires images be "
+ "first staged using the 'image-stage' command."
+ ),
+ )
+ parser.add_argument(
+ '--uri',
+ metavar='<uri>',
+ help=_(
+ "URI to download the external image "
+ "(only valid with the 'web-download' import method)"
+ ),
+ )
+ parser.add_argument(
+ '--remote-image',
+ metavar='<REMOTE_IMAGE>',
+ help=_(
+ "The image of remote glance (ID only) to be imported "
+ "(only valid with the 'glance-download' import method)"
+ ),
+ )
+ parser.add_argument(
+ '--remote-region',
+ metavar='<REMOTE_GLANCE_REGION>',
+ help=_(
+ "The remote Glance region to download the image from "
+ "(only valid with the 'glance-download' import method)"
+ ),
+ )
+ parser.add_argument(
+ '--remote-service-interface',
+ metavar='<REMOTE_SERVICE_INTERFACE>',
+ help=_(
+ "The remote Glance service interface to use when importing "
+ "images "
+ "(only valid with the 'glance-download' import method)"
+ ),
+ )
+ stores_group = parser.add_mutually_exclusive_group()
+ stores_group.add_argument(
+ '--store',
+ metavar='<STORE>',
+ dest='stores',
+ nargs='*',
+ help=_(
+ "Backend store to upload image to "
+ "(specify multiple times to upload to multiple stores) "
+ "(either '--store' or '--all-stores' required with the "
+ "'copy-image' import method)"
+ ),
+ )
+ stores_group.add_argument(
+ '--all-stores',
+ help=_(
+ "Make image available to all stores "
+ "(either '--store' or '--all-stores' required with the "
+ "'copy-image' import method)"
+ ),
+ )
+ parser.add_argument(
+ '--allow-failure',
+ action='store_true',
+ dest='allow_failure',
+ default=True,
+ help=_(
+ 'When uploading to multiple stores, indicate that the import '
+ 'should be continue should any of the uploads fail. '
+ 'Only usable with --stores or --all-stores'
+ ),
+ )
+ parser.add_argument(
+ '--disallow-failure',
+ action='store_true',
+ dest='allow_failure',
+ default=True,
+ help=_(
+ 'When uploading to multiple stores, indicate that the import '
+ 'should be reverted should any of the uploads fail. '
+ 'Only usable with --stores or --all-stores'
+ ),
+ )
+ parser.add_argument(
+ '--wait',
+ action='store_true',
+ help=_('Wait for operation to complete'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ try:
+ import_info = image_client.get_import_info()
+ except sdk_exceptions.ResourceNotFound:
+ msg = _(
+ 'The Image Import feature is not supported by this deployment'
+ )
+ raise exceptions.CommandError(msg)
+
+ import_methods = import_info.import_methods['value']
+
+ if parsed_args.import_method not in import_methods:
+ msg = _(
+ "The '%s' import method is not supported by this deployment. "
+ "Supported: %s"
+ )
+ raise exceptions.CommandError(
+ msg % (parsed_args.import_method, ', '.join(import_methods)),
+ )
+
+ if parsed_args.import_method == 'web-download':
+ if not parsed_args.uri:
+ msg = _(
+ "The '--uri' option is required when using "
+ "'--method=web-download'"
+ )
+ raise exceptions.CommandError(msg)
+ else:
+ if parsed_args.uri:
+ msg = _(
+ "The '--uri' option is only supported when using "
+ "'--method=web-download'"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.import_method == 'glance-download':
+ if not (parsed_args.remote_region and parsed_args.remote_image):
+ msg = _(
+ "The '--remote-region' and '--remote-image' options are "
+ "required when using '--method=web-download'"
+ )
+ raise exceptions.CommandError(msg)
+ else:
+ if parsed_args.remote_region:
+ msg = _(
+ "The '--remote-region' option is only supported when "
+ "using '--method=glance-download'"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.remote_image:
+ msg = _(
+ "The '--remote-image' option is only supported when using "
+ "'--method=glance-download'"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.remote_service_interface:
+ msg = _(
+ "The '--remote-service-interface' option is only "
+ "supported when using '--method=glance-download'"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.import_method == 'copy-image':
+ if not (parsed_args.stores or parsed_args.all_stores):
+ msg = _(
+ "The '--stores' or '--all-stores' options are required "
+ "when using '--method=copy-image'"
+ )
+ raise exceptions.CommandError(msg)
+
+ image = image_client.find_image(parsed_args.image)
+
+ if not image.container_format and not image.disk_format:
+ msg = _(
+ "The 'container_format' and 'disk_format' properties "
+ "must be set on an image before it can be imported"
+ )
raise exceptions.CommandError(msg)
+
+ if parsed_args.import_method == 'glance-direct':
+ if image.status != 'uploading':
+ msg = _(
+ "The 'glance-direct' import method can only be used with "
+ "an image in status 'uploading'"
+ )
+ raise exceptions.CommandError(msg)
+ elif parsed_args.import_method == 'web-download':
+ if image.status != 'queued':
+ msg = _(
+ "The 'web-download' import method can only be used with "
+ "an image in status 'queued'"
+ )
+ raise exceptions.CommandError(msg)
+ elif parsed_args.import_method == 'copy-image':
+ if image.status != 'active':
+ msg = _(
+ "The 'copy-image' import method can only be used with "
+ "an image in status 'active'"
+ )
+ raise exceptions.CommandError(msg)
+
+ image_client.import_image(
+ image,
+ method=parsed_args.import_method,
+ uri=parsed_args.uri,
+ remote_region=parsed_args.remote_region,
+ remote_image=parsed_args.remote_image,
+ remote_service_interface=parsed_args.remote_service_interface,
+ stores=parsed_args.stores,
+ all_stores=parsed_args.all_stores,
+ all_stores_must_succeed=not parsed_args.allow_failure,
+ )
+
+ info = _format_image(image)
+ return zip(*sorted(info.items()))
diff --git a/openstackclient/image/v2/metadef_namespaces.py b/openstackclient/image/v2/metadef_namespaces.py
new file mode 100644
index 00000000..f09f2002
--- /dev/null
+++ b/openstackclient/image/v2/metadef_namespaces.py
@@ -0,0 +1,312 @@
+# Copyright 2012-2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Image V2 Action Implementations"""
+
+import logging
+
+from osc_lib.cli import format_columns
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+_formatters = {
+ 'tags': format_columns.ListColumn,
+}
+
+LOG = logging.getLogger(__name__)
+
+
+def _format_namespace(namespace):
+ info = {}
+
+ fields_to_show = [
+ 'created_at',
+ 'description',
+ 'display_name',
+ 'namespace',
+ 'owner',
+ 'protected',
+ 'schema',
+ 'visibility',
+ ]
+
+ namespace = namespace.to_dict(ignore_none=True, original_names=True)
+
+ # split out the usual key and the properties which are top-level
+ for key in namespace:
+ if key in fields_to_show:
+ info[key] = namespace.get(key)
+ elif key == "resource_type_associations":
+ info[key] = [resource_type['name']
+ for resource_type in namespace.get(key)]
+ elif key == 'properties':
+ info['properties'] = list(namespace.get(key).keys())
+
+ return info
+
+
+class CreateMetadefNameSpace(command.ShowOne):
+ _description = _("Create a metadef namespace")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "namespace",
+ metavar="<namespace>",
+ help=_("New metadef namespace name"),
+ )
+ parser.add_argument(
+ "--display-name",
+ metavar="<display_name>",
+ help=_("A user-friendly name for the namespace."),
+ )
+ parser.add_argument(
+ "--description",
+ metavar="<description>",
+ help=_("A description of the namespace"),
+ )
+ visibility_group = parser.add_mutually_exclusive_group()
+ visibility_group.add_argument(
+ "--public",
+ action="store_const",
+ const="public",
+ dest="visibility",
+ help=_("Set namespace visibility 'public'"),
+ )
+ visibility_group.add_argument(
+ "--private",
+ action="store_const",
+ const="private",
+ dest="visibility",
+ help=_("Set namespace visibility 'private'"),
+ )
+ protected_group = parser.add_mutually_exclusive_group()
+ protected_group.add_argument(
+ "--protected",
+ action="store_const",
+ const=True,
+ dest="is_protected",
+ help=_("Prevent metadef namespace from being deleted"),
+ )
+ protected_group.add_argument(
+ "--unprotected",
+ action="store_const",
+ const=False,
+ dest="is_protected",
+ help=_("Allow metadef namespace to be deleted (default)"),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+ filter_keys = [
+ 'namespace',
+ 'display_name',
+ 'description'
+ ]
+ kwargs = {}
+
+ for key in filter_keys:
+ argument = getattr(parsed_args, key, None)
+ if argument is not None:
+ kwargs[key] = argument
+
+ if parsed_args.is_protected is not None:
+ kwargs['protected'] = parsed_args.is_protected
+
+ if parsed_args.visibility is not None:
+ kwargs['visibility'] = parsed_args.visibility
+
+ data = image_client.create_metadef_namespace(**kwargs)
+
+ return zip(*sorted(data.items()))
+
+
+class DeleteMetadefNameSpace(command.Command):
+ _description = _("Delete metadef namespace")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "namespace_name",
+ metavar="<namespace_name>",
+ nargs="+",
+ help=_("An identifier (a name) for the namespace"),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ result = 0
+ for i in parsed_args.namespace_name:
+ try:
+ namespace = image_client.get_metadef_namespace(i)
+ image_client.delete_metadef_namespace(namespace.id)
+ except Exception as e:
+ result += 1
+ LOG.error(_("Failed to delete namespace with name or "
+ "ID '%(namespace)s': %(e)s"),
+ {'namespace': i, 'e': e}
+ )
+
+ if result > 0:
+ total = len(parsed_args.namespace_name)
+ msg = (_("%(result)s of %(total)s namespace failed "
+ "to delete.") % {'result': result, 'total': total})
+ raise exceptions.CommandError(msg)
+
+
+class ListMetadefNameSpaces(command.Lister):
+ _description = _("List metadef namespaces")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "--resource-types",
+ metavar="<resource_types>",
+ help=_("filter resource types"),
+ )
+ parser.add_argument(
+ "--visibility",
+ metavar="<visibility>",
+ help=_("filter on visibility"),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+ filter_keys = ['resource_types', 'visibility']
+ kwargs = {}
+ for key in filter_keys:
+ argument = getattr(parsed_args, key, None)
+ if argument is not None:
+ kwargs[key] = argument
+ # List of namespace data received
+ data = list(image_client.metadef_namespaces(**kwargs))
+ columns = ['namespace']
+ column_headers = columns
+ return (
+ column_headers,
+ (utils.get_item_properties(
+ s,
+ columns,
+ formatters=_formatters,
+ ) for s in data)
+ )
+
+
+class SetMetadefNameSpace(command.Command):
+ _description = _("Set metadef namespace properties")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "namespace",
+ metavar="<namespace>",
+ help=_("Namespace (name) for the namespace"),
+ )
+ parser.add_argument(
+ "--display-name",
+ metavar="<display_name>",
+ help=_("Set a user-friendly name for the namespace."),
+ )
+ parser.add_argument(
+ "--description",
+ metavar="<description>",
+ help=_("Set the description of the namespace"),
+ )
+ visibility_group = parser.add_mutually_exclusive_group()
+ visibility_group.add_argument(
+ "--public",
+ action="store_const",
+ const="public",
+ dest="visibility",
+ help=_("Set namespace visibility 'public'"),
+ )
+ visibility_group.add_argument(
+ "--private",
+ action="store_const",
+ const="private",
+ dest="visibility",
+ help=_("Set namespace visibility 'private'"),
+ )
+ protected_group = parser.add_mutually_exclusive_group()
+ protected_group.add_argument(
+ "--protected",
+ action="store_const",
+ const=True,
+ dest="is_protected",
+ help=_("Prevent metadef namespace from being deleted"),
+ )
+ protected_group.add_argument(
+ "--unprotected",
+ action="store_const",
+ const=False,
+ dest="is_protected",
+ help=_("Allow metadef namespace to be deleted (default)"),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ namespace = parsed_args.namespace
+
+ filter_keys = [
+ 'namespace',
+ 'display_name',
+ 'description'
+ ]
+ kwargs = {}
+
+ for key in filter_keys:
+ argument = getattr(parsed_args, key, None)
+ if argument is not None:
+ kwargs[key] = argument
+
+ if parsed_args.is_protected is not None:
+ kwargs['protected'] = parsed_args.is_protected
+
+ if parsed_args.visibility is not None:
+ kwargs['visibility'] = parsed_args.visibility
+
+ image_client.update_metadef_namespace(namespace, **kwargs)
+
+
+class ShowMetadefNameSpace(command.ShowOne):
+ _description = _("Show a metadef namespace")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "namespace_name",
+ metavar="<namespace_name>",
+ help=_("Namespace (name) for the namespace"),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ namespace_name = parsed_args.namespace_name
+
+ data = image_client.get_metadef_namespace(namespace_name)
+ info = _format_namespace(data)
+
+ return zip(*sorted(info.items()))
diff --git a/openstackclient/image/v2/task.py b/openstackclient/image/v2/task.py
new file mode 100644
index 00000000..924eaaf1
--- /dev/null
+++ b/openstackclient/image/v2/task.py
@@ -0,0 +1,179 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from osc_lib.cli import format_columns
+from osc_lib.command import command
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+_formatters = {
+ 'tags': format_columns.ListColumn,
+}
+
+
+def _format_task(task):
+ """Format an task to make it more consistent with OSC operations."""
+
+ info = {}
+ properties = {}
+
+ # the only fields we're not including is "links", "tags" and the properties
+ fields_to_show = [
+ 'created_at',
+ 'expires_at',
+ 'id',
+ 'input',
+ 'message',
+ 'owner_id',
+ 'result',
+ 'status',
+ 'type',
+ 'updated_at',
+ ]
+
+ # split out the usual key and the properties which are top-level
+ for field in fields_to_show:
+ info[field] = task.get(field)
+
+ for key in task:
+ if key in fields_to_show:
+ continue
+
+ if key in {'location', 'name', 'schema'}:
+ continue
+
+ properties[key] = task.get(key)
+
+ # add properties back into the dictionary as a top-level key
+ info['properties'] = format_columns.DictColumn(properties)
+
+ return info
+
+
+class ShowTask(command.ShowOne):
+ _description = _('Display task details')
+
+ def get_parser(self, prog_name):
+ parser = super(ShowTask, self).get_parser(prog_name)
+
+ parser.add_argument(
+ 'task',
+ metavar='<Task ID>',
+ help=_('Task to display (ID)'),
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ task = image_client.get_task(parsed_args.task)
+ info = _format_task(task)
+
+ return zip(*sorted(info.items()))
+
+
+class ListTask(command.Lister):
+ _description = _('List tasks')
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+
+ parser.add_argument(
+ '--sort-key',
+ metavar='<key>[:<field>]',
+ help=_(
+ 'Sorts the response by one of the following attributes: '
+ 'created_at, expires_at, id, status, type, updated_at. '
+ '(default is created_at) '
+ '(multiple keys and directions can be specified separated '
+ 'by comma)'
+ ),
+ )
+ parser.add_argument(
+ '--sort-dir',
+ metavar='<key>[:<direction>]',
+ help=_(
+ 'Sort output by selected keys and directions (asc or desc) '
+ '(default: name:desc) '
+ '(multiple keys and directions can be specified separated '
+ 'by comma)'
+ ),
+ )
+ parser.add_argument(
+ '--limit',
+ metavar='<num-tasks>',
+ type=int,
+ help=_('Maximum number of tasks to display.'),
+ )
+ parser.add_argument(
+ '--marker',
+ metavar='<task>',
+ help=_(
+ 'The last task of the previous page. '
+ 'Display list of tasks after marker. '
+ 'Display all tasks if not specified. '
+ '(name or ID)'
+ ),
+ )
+ parser.add_argument(
+ '--type',
+ metavar='<type>',
+ choices=['import'],
+ help=_('Filters the response by a task type.'),
+ )
+ parser.add_argument(
+ '--status',
+ metavar='<status>',
+ choices=[
+ 'pending',
+ 'processing',
+ 'success',
+ 'failure',
+ ],
+ help=_('Filter tasks based on status.'),
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ image_client = self.app.client_manager.image
+
+ columns = ('id', 'type', 'status', 'owner_id')
+ column_headers = ('ID', 'Type', 'Status', 'Owner')
+
+ kwargs = {}
+ copy_attrs = {
+ 'sort_key',
+ 'sort_dir',
+ 'limit',
+ 'marker',
+ 'type',
+ 'status',
+ }
+ for attr in copy_attrs:
+ val = getattr(parsed_args, attr, None)
+ if val is not None:
+ # Only include a value in kwargs for attributes that are
+ # actually present on the command line
+ kwargs[attr] = val
+
+ data = image_client.tasks(**kwargs)
+
+ return (
+ column_headers,
+ (
+ utils.get_item_properties(s, columns, formatters=_formatters)
+ for s in data
+ ),
+ )
diff --git a/openstackclient/network/v2/floating_ip.py b/openstackclient/network/v2/floating_ip.py
index 4c03074d..22096bc4 100644
--- a/openstackclient/network/v2/floating_ip.py
+++ b/openstackclient/network/v2/floating_ip.py
@@ -28,7 +28,7 @@ _formatters = {
def _get_network_columns(item):
column_map = {}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/floating_ip_port_forwarding.py b/openstackclient/network/v2/floating_ip_port_forwarding.py
index cd71c05a..0156af8e 100644
--- a/openstackclient/network/v2/floating_ip_port_forwarding.py
+++ b/openstackclient/network/v2/floating_ip_port_forwarding.py
@@ -82,7 +82,7 @@ def validate_port(port):
def _get_columns(item):
column_map = {}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/l3_conntrack_helper.py b/openstackclient/network/v2/l3_conntrack_helper.py
index 1de5b785..df153dd8 100644
--- a/openstackclient/network/v2/l3_conntrack_helper.py
+++ b/openstackclient/network/v2/l3_conntrack_helper.py
@@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/local_ip.py b/openstackclient/network/v2/local_ip.py
index 08735553..e8fb5f8a 100644
--- a/openstackclient/network/v2/local_ip.py
+++ b/openstackclient/network/v2/local_ip.py
@@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item, column_map, hidden_columns)
diff --git a/openstackclient/network/v2/local_ip_association.py b/openstackclient/network/v2/local_ip_association.py
index 9e123f05..4cd7707a 100644
--- a/openstackclient/network/v2/local_ip_association.py
+++ b/openstackclient/network/v2/local_ip_association.py
@@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {}
- hidden_columns = ['location', 'name', 'id']
+ hidden_columns = ['location', 'name', 'id', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item, column_map, hidden_columns)
diff --git a/openstackclient/network/v2/ndp_proxy.py b/openstackclient/network/v2/ndp_proxy.py
new file mode 100644
index 00000000..25b287f3
--- /dev/null
+++ b/openstackclient/network/v2/ndp_proxy.py
@@ -0,0 +1,269 @@
+# Copyright (c) 2020 Troila.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Router NDP proxy action implementations"""
+import logging
+
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+from openstackclient.identity import common as identity_common
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _get_columns(item):
+ column_map = {}
+ hidden_columns = ['location']
+ return utils.get_osc_show_columns_for_sdk_resource(
+ item, column_map, hidden_columns)
+
+
+class CreateNDPProxy(command.ShowOne):
+ _description = _("Create NDP proxy")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'router',
+ metavar='<router>',
+ help=_("The name or ID of a router"))
+ parser.add_argument(
+ '--name',
+ metavar='<name>',
+ help=_("New NDP proxy name")
+ )
+ parser.add_argument(
+ '--port',
+ metavar='<port>',
+ required=True,
+ help=_("The name or ID of the network port associated "
+ "to the NDP proxy"))
+ parser.add_argument(
+ '--ip-address',
+ metavar='<ip-address>',
+ help=_("The IPv6 address that is to be proxied. In case the port "
+ "has multiple addresses assigned, use this option to "
+ "select which address is to be used."))
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("A text to describe/contextualize the use of the "
+ "NDP proxy configuration")
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ attrs = {'name': parsed_args.name}
+ client = self.app.client_manager.network
+ router = client.find_router(
+ parsed_args.router,
+ ignore_missing=False,
+ )
+ attrs['router_id'] = router.id
+
+ if parsed_args.ip_address:
+ attrs['ip_address'] = parsed_args.ip_address
+
+ port = client.find_port(parsed_args.port,
+ ignore_missing=False)
+ attrs['port_id'] = port.id
+
+ if parsed_args.description is not None:
+ attrs['description'] = parsed_args.description
+
+ obj = client.create_ndp_proxy(**attrs)
+ display_columns, columns = _get_columns(obj)
+ data = utils.get_item_properties(obj, columns)
+ return (display_columns, data)
+
+
+class DeleteNDPProxy(command.Command):
+ _description = _("Delete NDP proxy")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'ndp_proxy',
+ nargs="+",
+ metavar="<ndp-proxy>",
+ help=_("NDP proxy(s) to delete (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ result = 0
+
+ for ndp_proxy in parsed_args.ndp_proxy:
+ try:
+ obj = client.find_ndp_proxy(ndp_proxy, ignore_missing=False)
+ client.delete_ndp_proxy(obj)
+ except Exception as e:
+ result += 1
+ LOG.error(_("Failed to delete NDP proxy "
+ "'%(ndp_proxy)s': %(e)s"),
+ {'ndp_proxy': ndp_proxy, 'e': e})
+ if result > 0:
+ total = len(parsed_args.ndp_proxy)
+ msg = (_("%(result)s of %(total)s NDP Proxy failed "
+ "to delete.") % {'result': result, 'total': total})
+ raise exceptions.CommandError(msg)
+
+
+class ListNDPProxy(command.Lister):
+ _description = _("List NDP proxies")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ '--router',
+ metavar='<router>',
+ help=_("List only NDP proxies belong to this router (name or ID)")
+ )
+ parser.add_argument(
+ '--port',
+ metavar='<port>',
+ help=_("List only NDP proxies assocate to this port (name or ID)")
+ )
+ parser.add_argument(
+ '--ip-address',
+ metavar='ip-address',
+ help=_("List only NDP proxies according to their IPv6 address")
+ )
+ parser.add_argument(
+ '--project',
+ metavar='<project>',
+ help=_("List NDP proxies according to their project (name or ID)")
+ )
+ parser.add_argument(
+ '--name',
+ metavar='<name>',
+ help=_("List NDP proxies according to their name")
+ )
+
+ identity_common.add_project_domain_option_to_parser(parser)
+
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ identity_client = self.app.client_manager.identity
+
+ columns = (
+ 'id',
+ 'name',
+ 'router_id',
+ 'ip_address',
+ 'project_id',
+ )
+ headers = (
+ 'ID',
+ 'Name',
+ 'Router ID',
+ 'IP Address',
+ 'Project',
+ )
+
+ query = {}
+
+ if parsed_args.router:
+ router = client.find_router(parsed_args.router,
+ ignore_missing=False)
+ query['router_id'] = router.id
+ if parsed_args.port:
+ port = client.find_port(parsed_args.port,
+ ignore_missing=False)
+ query['port_id'] = port.id
+ if parsed_args.ip_address is not None:
+ query['ip_address'] = parsed_args.ip_address
+ if parsed_args.project:
+ project_id = identity_common.find_project(
+ identity_client,
+ parsed_args.project,
+ parsed_args.project_domain,
+ ).id
+ query['project_id'] = project_id
+ if parsed_args.name:
+ query['name'] = parsed_args.name
+
+ data = client.ndp_proxies(**query)
+
+ return (headers,
+ (utils.get_item_properties(
+ s, columns,
+ formatters={},
+ ) for s in data))
+
+
+class SetNDPProxy(command.Command):
+ _description = _("Set NDP proxy properties")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'ndp_proxy',
+ metavar='<ndp-proxy>',
+ help=_("The ID or name of the NDP proxy to update")
+ )
+ parser.add_argument(
+ '--name',
+ metavar='<name>',
+ help=_("Set NDP proxy name")
+ )
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("A text to describe/contextualize the use of "
+ "the NDP proxy configuration")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ attrs = {}
+ if parsed_args.description is not None:
+ attrs['description'] = parsed_args.description
+ if parsed_args.name is not None:
+ attrs['name'] = parsed_args.name
+
+ obj = client.find_ndp_proxy(
+ parsed_args.ndp_proxy, ignore_missing=False)
+ client.update_ndp_proxy(obj, **attrs)
+
+
+class ShowNDPProxy(command.ShowOne):
+ _description = _("Display NDP proxy details")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'ndp_proxy',
+ metavar="<ndp-proxy>",
+ help=_("The ID or name of the NDP proxy")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ obj = client.find_ndp_proxy(parsed_args.ndp_proxy,
+ ignore_missing=False)
+ display_columns, columns = _get_columns(obj)
+ data = utils.get_item_properties(obj, columns)
+ return (display_columns, data)
diff --git a/openstackclient/network/v2/network.py b/openstackclient/network/v2/network.py
index a239e0fe..54e2821c 100644
--- a/openstackclient/network/v2/network.py
+++ b/openstackclient/network/v2/network.py
@@ -60,6 +60,7 @@ def _get_columns_network(item):
'ipv6_address_scope_id': 'ipv6_address_scope',
'tags': 'tags',
}
+ hidden_columns = ['location', 'tenant_id']
hidden_columns = ['location']
return utils.get_osc_show_columns_for_sdk_resource(
item,
@@ -270,14 +271,16 @@ class CreateNetwork(common.NetworkAndComputeShowOne,
'--external',
action='store_true',
help=self.enhance_help_neutron(
- _("Set this network as an external network "
+ _("The network has an external routing facility that's not "
+ "managed by Neutron and can be used as in: "
+ "openstack router set --external-gateway NETWORK "
"(external-net extension required)"))
)
external_router_grp.add_argument(
'--internal',
action='store_true',
help=self.enhance_help_neutron(
- _("Set this network as an internal network (default)"))
+ _("Opposite of '--external' (default)"))
)
default_router_grp = parser.add_mutually_exclusive_group()
default_router_grp.add_argument(
@@ -664,12 +667,12 @@ class SetNetwork(common.NeutronCommandWithExtraArgs):
)
parser.add_argument(
'--description',
- metavar="<description",
+ metavar="<description>",
help=_("Set network description")
)
parser.add_argument(
'--mtu',
- metavar="<mtu",
+ metavar="<mtu>",
help=_("Set network mtu")
)
port_security_group = parser.add_mutually_exclusive_group()
@@ -689,13 +692,15 @@ class SetNetwork(common.NeutronCommandWithExtraArgs):
external_router_grp.add_argument(
'--external',
action='store_true',
- help=_("Set this network as an external network "
+ help=_("The network has an external routing facility that's not "
+ "managed by Neutron and can be used as in: "
+ "openstack router set --external-gateway NETWORK "
"(external-net extension required)")
)
external_router_grp.add_argument(
'--internal',
action='store_true',
- help=_("Set this network as an internal network")
+ help=_("Opposite of '--external'")
)
default_router_grp = parser.add_mutually_exclusive_group()
default_router_grp.add_argument(
diff --git a/openstackclient/network/v2/network_agent.py b/openstackclient/network/v2/network_agent.py
index 3024d026..f67f67bd 100644
--- a/openstackclient/network/v2/network_agent.py
+++ b/openstackclient/network/v2/network_agent.py
@@ -50,7 +50,7 @@ def _get_network_columns(item):
'is_admin_state_up': 'admin_state_up',
'is_alive': 'alive',
}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'name', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
@@ -168,11 +168,11 @@ class ListNetworkAgent(command.Lister):
metavar='<agent-type>',
choices=["bgp", "dhcp", "open-vswitch", "linux-bridge", "ofa",
"l3", "loadbalancer", "metering", "metadata", "macvtap",
- "nic"],
+ "nic", "baremetal"],
help=_("List only agents with the specified agent type. "
"The supported agent types are: bgp, dhcp, open-vswitch, "
"linux-bridge, ofa, l3, loadbalancer, metering, "
- "metadata, macvtap, nic.")
+ "metadata, macvtap, nic, baremetal.")
)
parser.add_argument(
'--host',
@@ -231,7 +231,8 @@ class ListNetworkAgent(command.Lister):
'metering': 'Metering agent',
'metadata': 'Metadata agent',
'macvtap': 'Macvtap agent',
- 'nic': 'NIC Switch agent'
+ 'nic': 'NIC Switch agent',
+ 'baremetal': 'Baremetal Node'
}
filters = {}
diff --git a/openstackclient/network/v2/network_flavor.py b/openstackclient/network/v2/network_flavor.py
index 862155ce..864184c0 100644
--- a/openstackclient/network/v2/network_flavor.py
+++ b/openstackclient/network/v2/network_flavor.py
@@ -31,7 +31,7 @@ def _get_columns(item):
'is_enabled': 'enabled',
}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/network_flavor_profile.py b/openstackclient/network/v2/network_flavor_profile.py
index 719f955c..66c6dcff 100644
--- a/openstackclient/network/v2/network_flavor_profile.py
+++ b/openstackclient/network/v2/network_flavor_profile.py
@@ -29,7 +29,7 @@ def _get_columns(item):
'is_enabled': 'enabled',
}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'name', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
@@ -166,7 +166,7 @@ class ListNetworkFlavorProfile(command.Lister):
'id',
'driver',
'is_enabled',
- 'metainfo',
+ 'meta_info',
'description',
)
column_headers = (
diff --git a/openstackclient/network/v2/network_meter.py b/openstackclient/network/v2/network_meter.py
index b7b77fb1..99b0bdd4 100644
--- a/openstackclient/network/v2/network_meter.py
+++ b/openstackclient/network/v2/network_meter.py
@@ -30,7 +30,7 @@ def _get_columns(item):
column_map = {
'is_shared': 'shared',
}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/network_meter_rule.py b/openstackclient/network/v2/network_meter_rule.py
index 0f427275..2c50e5a6 100644
--- a/openstackclient/network/v2/network_meter_rule.py
+++ b/openstackclient/network/v2/network_meter_rule.py
@@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/network_qos_policy.py b/openstackclient/network/v2/network_qos_policy.py
index bc257901..d77e5db9 100644
--- a/openstackclient/network/v2/network_qos_policy.py
+++ b/openstackclient/network/v2/network_qos_policy.py
@@ -30,7 +30,7 @@ def _get_columns(item):
column_map = {
'is_shared': 'shared',
}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/network_qos_rule.py b/openstackclient/network/v2/network_qos_rule.py
index a4129b83..cb2d2339 100644
--- a/openstackclient/network/v2/network_qos_rule.py
+++ b/openstackclient/network/v2/network_qos_rule.py
@@ -25,16 +25,20 @@ from openstackclient.network import common
RULE_TYPE_BANDWIDTH_LIMIT = 'bandwidth-limit'
RULE_TYPE_DSCP_MARKING = 'dscp-marking'
RULE_TYPE_MINIMUM_BANDWIDTH = 'minimum-bandwidth'
+RULE_TYPE_MINIMUM_PACKET_RATE = 'minimum-packet-rate'
MANDATORY_PARAMETERS = {
RULE_TYPE_MINIMUM_BANDWIDTH: {'min_kbps', 'direction'},
+ RULE_TYPE_MINIMUM_PACKET_RATE: {'min_kpps', 'direction'},
RULE_TYPE_DSCP_MARKING: {'dscp_mark'},
RULE_TYPE_BANDWIDTH_LIMIT: {'max_kbps'}}
OPTIONAL_PARAMETERS = {
RULE_TYPE_MINIMUM_BANDWIDTH: set(),
+ RULE_TYPE_MINIMUM_PACKET_RATE: set(),
RULE_TYPE_DSCP_MARKING: set(),
RULE_TYPE_BANDWIDTH_LIMIT: {'direction', 'max_burst_kbps'}}
DIRECTION_EGRESS = 'egress'
DIRECTION_INGRESS = 'ingress'
+DIRECTION_ANY = 'any'
DSCP_VALID_MARKS = [0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32,
34, 36, 38, 40, 46, 48, 56]
@@ -47,7 +51,7 @@ ACTION_SHOW = 'get'
def _get_columns(item):
column_map = {}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
@@ -98,10 +102,20 @@ def _get_attrs(network_client, parsed_args, is_create=False):
attrs['dscp_mark'] = parsed_args.dscp_mark
if parsed_args.min_kbps is not None:
attrs['min_kbps'] = parsed_args.min_kbps
+ if parsed_args.min_kpps is not None:
+ attrs['min_kpps'] = parsed_args.min_kpps
if parsed_args.ingress:
- attrs['direction'] = 'ingress'
+ attrs['direction'] = DIRECTION_INGRESS
if parsed_args.egress:
- attrs['direction'] = 'egress'
+ attrs['direction'] = DIRECTION_EGRESS
+ if parsed_args.any:
+ if rule_type == RULE_TYPE_MINIMUM_PACKET_RATE:
+ attrs['direction'] = DIRECTION_ANY
+ else:
+ msg = (_('Direction "any" can only be used with '
+ '%(rule_type_min_pps)s rule type') %
+ {'rule_type_min_pps': RULE_TYPE_MINIMUM_PACKET_RATE})
+ raise exceptions.CommandError(msg)
_check_type_parameters(attrs, rule_type, is_create)
return attrs
@@ -160,6 +174,13 @@ def _add_rule_arguments(parser):
type=int,
help=_('Minimum guaranteed bandwidth in kbps')
)
+ parser.add_argument(
+ '--min-kpps',
+ dest='min_kpps',
+ metavar='<min-kpps>',
+ type=int,
+ help=_('Minimum guaranteed packet rate in kpps')
+ )
direction_group = parser.add_mutually_exclusive_group()
direction_group.add_argument(
'--ingress',
@@ -171,6 +192,12 @@ def _add_rule_arguments(parser):
action='store_true',
help=_("Egress traffic direction from the project point of view")
)
+ direction_group.add_argument(
+ '--any',
+ action='store_true',
+ help=_("Any traffic direction from the project point of view. Can be "
+ "used only with minimum packet rate rule.")
+ )
class CreateNetworkQosRule(command.ShowOne,
@@ -190,6 +217,7 @@ class CreateNetworkQosRule(command.ShowOne,
metavar='<type>',
required=True,
choices=[RULE_TYPE_MINIMUM_BANDWIDTH,
+ RULE_TYPE_MINIMUM_PACKET_RATE,
RULE_TYPE_DSCP_MARKING,
RULE_TYPE_BANDWIDTH_LIMIT],
help=(_('QoS rule type (%s)') %
@@ -200,10 +228,10 @@ class CreateNetworkQosRule(command.ShowOne,
def take_action(self, parsed_args):
network_client = self.app.client_manager.network
- attrs = _get_attrs(network_client, parsed_args, is_create=True)
- attrs.update(
- self._parse_extra_properties(parsed_args.extra_properties))
try:
+ attrs = _get_attrs(network_client, parsed_args, is_create=True)
+ attrs.update(
+ self._parse_extra_properties(parsed_args.extra_properties))
obj = _rule_action_call(
network_client, ACTION_CREATE, parsed_args.type)(
attrs.pop('qos_policy_id'), **attrs)
@@ -270,6 +298,7 @@ class ListNetworkQosRule(command.Lister):
'max_kbps',
'max_burst_kbps',
'min_kbps',
+ 'min_kpps',
'dscp_mark',
'direction',
)
@@ -280,6 +309,7 @@ class ListNetworkQosRule(command.Lister):
'Max Kbps',
'Max Burst Kbits',
'Min Kbps',
+ 'Min Kpps',
'DSCP mark',
'Direction',
)
diff --git a/openstackclient/network/v2/network_qos_rule_type.py b/openstackclient/network/v2/network_qos_rule_type.py
index 1bcfda82..3f4f6a19 100644
--- a/openstackclient/network/v2/network_qos_rule_type.py
+++ b/openstackclient/network/v2/network_qos_rule_type.py
@@ -24,14 +24,31 @@ def _get_columns(item):
"type": "rule_type_name",
"drivers": "drivers",
}
- invisible_columns = ["id", "location", "name"]
+ hidden_columns = ["id", "location", "name", 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
- item, column_map, invisible_columns)
+ item, column_map, hidden_columns)
class ListNetworkQosRuleType(command.Lister):
_description = _("List QoS rule types")
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ supported = parser.add_mutually_exclusive_group()
+ supported.add_argument(
+ '--all-supported',
+ action='store_true',
+ help=_("List all the QoS rule types supported by any loaded "
+ "mechanism drivers (the union of all sets of supported "
+ "rules)")
+ )
+ supported.add_argument(
+ '--all-rules',
+ action='store_true',
+ help=_("List all QoS rule types implemented in Neutron QoS driver")
+ )
+ return parser
+
def take_action(self, parsed_args):
client = self.app.client_manager.network
columns = (
@@ -40,7 +57,13 @@ class ListNetworkQosRuleType(command.Lister):
column_headers = (
'Type',
)
- data = client.qos_rule_types()
+
+ args = {}
+ if parsed_args.all_supported:
+ args['all_supported'] = True
+ elif parsed_args.all_rules:
+ args['all_rules'] = True
+ data = client.qos_rule_types(**args)
return (column_headers,
(utils.get_item_properties(
diff --git a/openstackclient/network/v2/network_rbac.py b/openstackclient/network/v2/network_rbac.py
index 00667395..fa4fca7c 100644
--- a/openstackclient/network/v2/network_rbac.py
+++ b/openstackclient/network/v2/network_rbac.py
@@ -30,7 +30,7 @@ def _get_columns(item):
column_map = {
'target_tenant': 'target_project_id',
}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'name', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
@@ -218,6 +218,11 @@ class ListNetworkRBAC(command.Lister):
'action ("access_as_external" or "access_as_shared")')
)
parser.add_argument(
+ '--target-project',
+ metavar='<target-project>',
+ help=_('List network RBAC policies for a specific target project')
+ )
+ parser.add_argument(
'--long',
action='store_true',
default=False,
@@ -247,6 +252,16 @@ class ListNetworkRBAC(command.Lister):
query['object_type'] = parsed_args.type
if parsed_args.action is not None:
query['action'] = parsed_args.action
+ if parsed_args.target_project is not None:
+ project_id = "*"
+
+ if parsed_args.target_project != "*":
+ identity_client = self.app.client_manager.identity
+ project_id = identity_common.find_project(
+ identity_client,
+ parsed_args.target_project,
+ ).id
+ query['target_project_id'] = project_id
data = client.rbac_policies(**query)
diff --git a/openstackclient/network/v2/network_segment.py b/openstackclient/network/v2/network_segment.py
index 0f64bd86..c6c88e30 100644
--- a/openstackclient/network/v2/network_segment.py
+++ b/openstackclient/network/v2/network_segment.py
@@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/network_segment_range.py b/openstackclient/network/v2/network_segment_range.py
index a95adb0a..1291d9d8 100644
--- a/openstackclient/network/v2/network_segment_range.py
+++ b/openstackclient/network/v2/network_segment_range.py
@@ -33,7 +33,7 @@ LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/network_trunk.py b/openstackclient/network/v2/network_trunk.py
new file mode 100644
index 00000000..c5f62901
--- /dev/null
+++ b/openstackclient/network/v2/network_trunk.py
@@ -0,0 +1,402 @@
+# Copyright 2016 ZTE Corporation.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Network trunk and subports action implementations"""
+import logging
+
+from cliff import columns as cliff_columns
+from osc_lib.cli import format_columns
+from osc_lib.cli import identity as identity_utils
+from osc_lib.cli import parseractions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils as osc_utils
+
+from openstackclient.i18n import _
+
+LOG = logging.getLogger(__name__)
+
+TRUNK = 'trunk'
+TRUNKS = 'trunks'
+SUB_PORTS = 'sub_ports'
+
+
+class AdminStateColumn(cliff_columns.FormattableColumn):
+ def human_readable(self):
+ return 'UP' if self._value else 'DOWN'
+
+
+class CreateNetworkTrunk(command.ShowOne):
+ """Create a network trunk for a given project"""
+
+ def get_parser(self, prog_name):
+ parser = super(CreateNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'name',
+ metavar='<name>',
+ help=_("Name of the trunk to create")
+ )
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("A description of the trunk")
+ )
+ parser.add_argument(
+ '--parent-port',
+ metavar='<parent-port>',
+ required=True,
+ help=_("Parent port belonging to this trunk (name or ID)")
+ )
+ parser.add_argument(
+ '--subport',
+ metavar='<port=,segmentation-type=,segmentation-id=>',
+ action=parseractions.MultiKeyValueAction, dest='add_subports',
+ optional_keys=['segmentation-id', 'segmentation-type'],
+ required_keys=['port'],
+ help=_("Subport to add. Subport is of form "
+ "\'port=<name or ID>,segmentation-type=<segmentation-type>,"
+ "segmentation-id=<segmentation-ID>\' (--subport) option "
+ "can be repeated")
+ )
+ admin_group = parser.add_mutually_exclusive_group()
+ admin_group.add_argument(
+ '--enable',
+ action='store_true',
+ default=True,
+ help=_("Enable trunk (default)")
+ )
+ admin_group.add_argument(
+ '--disable',
+ action='store_true',
+ help=_("Disable trunk")
+ )
+ identity_utils.add_project_owner_option_to_parser(parser)
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ attrs = _get_attrs_for_trunk(self.app.client_manager,
+ parsed_args)
+ obj = client.create_trunk(**attrs)
+ display_columns, columns = _get_columns(obj)
+ data = osc_utils.get_dict_properties(obj, columns,
+ formatters=_formatters)
+ return display_columns, data
+
+
+class DeleteNetworkTrunk(command.Command):
+ """Delete a given network trunk"""
+
+ def get_parser(self, prog_name):
+ parser = super(DeleteNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ nargs="+",
+ help=_("Trunk(s) to delete (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ result = 0
+ for trunk in parsed_args.trunk:
+ try:
+ trunk_id = client.find_trunk(trunk).id
+ client.delete_trunk(trunk_id)
+ except Exception as e:
+ result += 1
+ LOG.error(_("Failed to delete trunk with name "
+ "or ID '%(trunk)s': %(e)s"),
+ {'trunk': trunk, 'e': e})
+ if result > 0:
+ total = len(parsed_args.trunk)
+ msg = (_("%(result)s of %(total)s trunks failed "
+ "to delete.") % {'result': result, 'total': total})
+ raise exceptions.CommandError(msg)
+
+
+class ListNetworkTrunk(command.Lister):
+ """List all network trunks"""
+
+ def get_parser(self, prog_name):
+ parser = super(ListNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ '--long',
+ action='store_true',
+ default=False,
+ help=_("List additional fields in output")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ data = client.trunks()
+ headers = (
+ 'ID',
+ 'Name',
+ 'Parent Port',
+ 'Description'
+ )
+ columns = (
+ 'id',
+ 'name',
+ 'port_id',
+ 'description'
+ )
+ if parsed_args.long:
+ headers += (
+ 'Status',
+ 'State',
+ 'Created At',
+ 'Updated At',
+ )
+ columns += (
+ 'status',
+ 'admin_state_up',
+ 'created_at',
+ 'updated_at'
+ )
+ return (headers,
+ (osc_utils.get_item_properties(
+ s, columns,
+ formatters=_formatters,
+ ) for s in data))
+
+
+class SetNetworkTrunk(command.Command):
+ """Set network trunk properties"""
+
+ def get_parser(self, prog_name):
+ parser = super(SetNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ help=_("Trunk to modify (name or ID)")
+ )
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help=_("Set trunk name")
+ )
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("A description of the trunk")
+ )
+ parser.add_argument(
+ '--subport',
+ metavar='<port=,segmentation-type=,segmentation-id=>',
+ action=parseractions.MultiKeyValueAction, dest='set_subports',
+ optional_keys=['segmentation-id', 'segmentation-type'],
+ required_keys=['port'],
+ help=_("Subport to add. Subport is of form "
+ "\'port=<name or ID>,segmentation-type=<segmentation-type>"
+ ",segmentation-id=<segmentation-ID>\' (--subport) option "
+ "can be repeated")
+ )
+ admin_group = parser.add_mutually_exclusive_group()
+ admin_group.add_argument(
+ '--enable',
+ action='store_true',
+ help=_("Enable trunk")
+ )
+ admin_group.add_argument(
+ '--disable',
+ action='store_true',
+ help=_("Disable trunk")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ trunk_id = client.find_trunk(parsed_args.trunk)
+ attrs = _get_attrs_for_trunk(self.app.client_manager, parsed_args)
+ try:
+ client.update_trunk(trunk_id, **attrs)
+ except Exception as e:
+ msg = (_("Failed to set trunk '%(t)s': %(e)s")
+ % {'t': parsed_args.trunk, 'e': e})
+ raise exceptions.CommandError(msg)
+ if parsed_args.set_subports:
+ subport_attrs = _get_attrs_for_subports(self.app.client_manager,
+ parsed_args)
+ try:
+ client.add_trunk_subports(trunk_id, subport_attrs)
+ except Exception as e:
+ msg = (_("Failed to add subports to trunk '%(t)s': %(e)s")
+ % {'t': parsed_args.trunk, 'e': e})
+ raise exceptions.CommandError(msg)
+
+
+class ShowNetworkTrunk(command.ShowOne):
+ """Show information of a given network trunk"""
+ def get_parser(self, prog_name):
+ parser = super(ShowNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ help=_("Trunk to display (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ trunk_id = client.find_trunk(parsed_args.trunk).id
+ obj = client.get_trunk(trunk_id)
+ display_columns, columns = _get_columns(obj)
+ data = osc_utils.get_dict_properties(obj, columns,
+ formatters=_formatters)
+ return display_columns, data
+
+
+class ListNetworkSubport(command.Lister):
+ """List all subports for a given network trunk"""
+
+ def get_parser(self, prog_name):
+ parser = super(ListNetworkSubport, self).get_parser(prog_name)
+ parser.add_argument(
+ '--trunk',
+ required=True,
+ metavar="<trunk>",
+ help=_("List subports belonging to this trunk (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ trunk_id = client.find_trunk(parsed_args.trunk)
+ data = client.get_trunk_subports(trunk_id)
+ headers = ('Port', 'Segmentation Type', 'Segmentation ID')
+ columns = ('port_id', 'segmentation_type', 'segmentation_id')
+ return (headers,
+ (osc_utils.get_dict_properties(
+ s, columns,
+ ) for s in data[SUB_PORTS]))
+
+
+class UnsetNetworkTrunk(command.Command):
+ """Unset subports from a given network trunk"""
+
+ def get_parser(self, prog_name):
+ parser = super(UnsetNetworkTrunk, self).get_parser(prog_name)
+ parser.add_argument(
+ 'trunk',
+ metavar="<trunk>",
+ help=_("Unset subports from this trunk (name or ID)")
+ )
+ parser.add_argument(
+ '--subport',
+ metavar="<subport>",
+ required=True,
+ action='append', dest='unset_subports',
+ help=_("Subport to delete (name or ID of the port) "
+ "(--subport) option can be repeated")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ attrs = _get_attrs_for_subports(self.app.client_manager, parsed_args)
+ trunk_id = client.find_trunk(parsed_args.trunk)
+ client.delete_trunk_subports(trunk_id, attrs)
+
+
+_formatters = {
+ 'admin_state_up': AdminStateColumn,
+ 'sub_ports': format_columns.ListDictColumn,
+}
+
+
+def _get_columns(item):
+ column_map = {}
+ hidden_columns = ['location', 'tenant_id']
+ return osc_utils.get_osc_show_columns_for_sdk_resource(
+ item,
+ column_map,
+ hidden_columns
+ )
+
+
+def _get_attrs_for_trunk(client_manager, parsed_args):
+ attrs = {}
+ if parsed_args.name is not None:
+ attrs['name'] = str(parsed_args.name)
+ if parsed_args.description is not None:
+ attrs['description'] = str(parsed_args.description)
+ if parsed_args.enable:
+ attrs['admin_state_up'] = True
+ if parsed_args.disable:
+ attrs['admin_state_up'] = False
+ if 'parent_port' in parsed_args and parsed_args.parent_port is not None:
+ port_id = client_manager.network.find_port(
+ parsed_args.parent_port)['id']
+ attrs['port_id'] = port_id
+ if 'add_subports' in parsed_args and parsed_args.add_subports is not None:
+ attrs[SUB_PORTS] = _format_subports(client_manager,
+ parsed_args.add_subports)
+
+ # "trunk set" command doesn't support setting project.
+ if 'project' in parsed_args and parsed_args.project is not None:
+ identity_client = client_manager.identity
+ project_id = identity_utils.find_project(
+ identity_client,
+ parsed_args.project,
+ parsed_args.project_domain,
+ ).id
+ attrs['tenant_id'] = project_id
+
+ return attrs
+
+
+def _format_subports(client_manager, subports):
+ attrs = []
+ for subport in subports:
+ subport_attrs = {}
+ if subport.get('port'):
+ port_id = client_manager.network.find_port(subport['port'])['id']
+ subport_attrs['port_id'] = port_id
+ if subport.get('segmentation-id'):
+ try:
+ subport_attrs['segmentation_id'] = int(
+ subport['segmentation-id'])
+ except ValueError:
+ msg = (_("Segmentation-id '%s' is not an integer") %
+ subport['segmentation-id'])
+ raise exceptions.CommandError(msg)
+ if subport.get('segmentation-type'):
+ subport_attrs['segmentation_type'] = subport['segmentation-type']
+ attrs.append(subport_attrs)
+ return attrs
+
+
+def _get_attrs_for_subports(client_manager, parsed_args):
+ attrs = {}
+ if 'set_subports' in parsed_args and parsed_args.set_subports is not None:
+ attrs = _format_subports(client_manager,
+ parsed_args.set_subports)
+ if ('unset_subports' in parsed_args and
+ parsed_args.unset_subports is not None):
+ subports_list = []
+ for subport in parsed_args.unset_subports:
+ port_id = client_manager.network.find_port(subport)['id']
+ subports_list.append({'port_id': port_id})
+ attrs = subports_list
+ return attrs
+
+
+def _get_id(client, id_or_name, resource):
+ return client.find_resource(resource, str(id_or_name))['id']
diff --git a/openstackclient/network/v2/port.py b/openstackclient/network/v2/port.py
index 16072bc0..8bf14d6a 100644
--- a/openstackclient/network/v2/port.py
+++ b/openstackclient/network/v2/port.py
@@ -64,7 +64,7 @@ def _get_columns(item):
'is_admin_state_up': 'admin_state_up',
'is_port_security_enabled': 'port_security_enabled',
}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/router.py b/openstackclient/network/v2/router.py
index f1fce298..8302ee01 100644
--- a/openstackclient/network/v2/router.py
+++ b/openstackclient/network/v2/router.py
@@ -369,6 +369,21 @@ class CreateRouter(command.ShowOne, common.NeutronCommandWithExtraArgs):
action='store_true',
help=_("Disable Source NAT on external gateway")
)
+ ndp_proxy_group = parser.add_mutually_exclusive_group()
+ ndp_proxy_group.add_argument(
+ '--enable-ndp-proxy',
+ dest='enable_ndp_proxy',
+ default=None,
+ action='store_true',
+ help=_("Enable IPv6 NDP proxy on external gateway")
+ )
+ ndp_proxy_group.add_argument(
+ '--disable-ndp-proxy',
+ dest='enable_ndp_proxy',
+ default=None,
+ action='store_false',
+ help=_("Disable IPv6 NDP proxy on external gateway")
+ )
return parser
@@ -383,6 +398,14 @@ class CreateRouter(command.ShowOne, common.NeutronCommandWithExtraArgs):
attrs.update(
self._parse_extra_properties(parsed_args.extra_properties))
+ if parsed_args.enable_ndp_proxy and not parsed_args.external_gateway:
+ msg = (_("You must specify '--external-gateway' in order "
+ "to enable router's NDP proxy"))
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.enable_ndp_proxy is not None:
+ attrs['enable_ndp_proxy'] = parsed_args.enable_ndp_proxy
+
obj = client.create_router(**attrs)
# tags cannot be set when created, so tags need to be set later.
_tag.update_tags_for_set(client, obj, parsed_args)
@@ -737,6 +760,21 @@ class SetRouter(common.NeutronCommandWithExtraArgs):
action='store_true',
help=_("Disable Source NAT on external gateway")
)
+ ndp_proxy_group = parser.add_mutually_exclusive_group()
+ ndp_proxy_group.add_argument(
+ '--enable-ndp-proxy',
+ dest='enable_ndp_proxy',
+ default=None,
+ action='store_true',
+ help=_("Enable IPv6 NDP proxy on external gateway")
+ )
+ ndp_proxy_group.add_argument(
+ '--disable-ndp-proxy',
+ dest='enable_ndp_proxy',
+ default=None,
+ action='store_false',
+ help=_("Disable IPv6 NDP proxy on external gateway")
+ )
qos_policy_group = parser.add_mutually_exclusive_group()
qos_policy_group.add_argument(
'--qos-policy',
@@ -804,6 +842,9 @@ class SetRouter(common.NeutronCommandWithExtraArgs):
attrs.update(
self._parse_extra_properties(parsed_args.extra_properties))
+ if parsed_args.enable_ndp_proxy is not None:
+ attrs['enable_ndp_proxy'] = parsed_args.enable_ndp_proxy
+
if attrs:
client.update_router(obj, **attrs)
# tags is a subresource and it needs to be updated separately.
diff --git a/openstackclient/network/v2/security_group.py b/openstackclient/network/v2/security_group.py
index d8c38f45..ffad9988 100644
--- a/openstackclient/network/v2/security_group.py
+++ b/openstackclient/network/v2/security_group.py
@@ -35,6 +35,7 @@ def _format_network_security_group_rules(sg_rules):
for key in empty_keys:
sg_rule.pop(key)
sg_rule.pop('security_group_id', None)
+ sg_rule.pop('tenant_id', None)
sg_rule.pop('project_id', None)
return utils.format_list_of_dicts(sg_rules)
diff --git a/openstackclient/network/v2/security_group_rule.py b/openstackclient/network/v2/security_group_rule.py
index a1122616..2179f33d 100644
--- a/openstackclient/network/v2/security_group_rule.py
+++ b/openstackclient/network/v2/security_group_rule.py
@@ -72,7 +72,7 @@ def _format_remote_ip_prefix(rule):
def _get_columns(item):
column_map = {}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/network/v2/subnet.py b/openstackclient/network/v2/subnet.py
index bf6a46d4..8e3a877f 100644
--- a/openstackclient/network/v2/subnet.py
+++ b/openstackclient/network/v2/subnet.py
@@ -138,16 +138,17 @@ def _get_columns(item):
'is_dhcp_enabled': 'enable_dhcp',
'subnet_pool_id': 'subnetpool_id',
}
- # Do not show this column when displaying a subnet
- invisible_columns = [
+ # Do not show these columns when displaying a subnet
+ hidden_columns = [
'location',
'use_default_subnet_pool',
- 'prefix_length'
+ 'prefix_length',
+ 'tenant_id',
]
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
- invisible_columns
+ hidden_columns
)
diff --git a/openstackclient/network/v2/subnet_pool.py b/openstackclient/network/v2/subnet_pool.py
index b4142f37..2369960e 100644
--- a/openstackclient/network/v2/subnet_pool.py
+++ b/openstackclient/network/v2/subnet_pool.py
@@ -37,7 +37,7 @@ def _get_columns(item):
'maximum_prefix_length': 'max_prefixlen',
'minimum_prefix_length': 'min_prefixlen',
}
- hidden_columns = ['location']
+ hidden_columns = ['location', 'tenant_id']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
diff --git a/openstackclient/tests/functional/base.py b/openstackclient/tests/functional/base.py
index 0ed7dff8..0c430267 100644
--- a/openstackclient/tests/functional/base.py
+++ b/openstackclient/tests/functional/base.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
+import logging
import os
import shlex
import subprocess
@@ -18,54 +20,81 @@ from tempest.lib.cli import output_parser
from tempest.lib import exceptions
import testtools
-
ADMIN_CLOUD = os.environ.get('OS_ADMIN_CLOUD', 'devstack-admin')
+LOG = logging.getLogger(__name__)
def execute(cmd, fail_ok=False, merge_stderr=False):
"""Executes specified command for the given action."""
+ LOG.debug('Executing: %s', cmd)
cmdlist = shlex.split(cmd)
stdout = subprocess.PIPE
stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
+
proc = subprocess.Popen(cmdlist, stdout=stdout, stderr=stderr)
- result, result_err = proc.communicate()
- result = result.decode('utf-8')
+
+ result_out, result_err = proc.communicate()
+ result_out = result_out.decode('utf-8')
+ LOG.debug('stdout: %s', result_out)
+ LOG.debug('stderr: %s', result_err)
+
if not fail_ok and proc.returncode != 0:
- raise exceptions.CommandFailed(proc.returncode, cmd, result,
- result_err)
- return result
+ raise exceptions.CommandFailed(
+ proc.returncode, cmd, result_out, result_err,
+ )
+
+ return result_out
class TestCase(testtools.TestCase):
@classmethod
- def openstack(cls, cmd, cloud=ADMIN_CLOUD, fail_ok=False):
+ def openstack(
+ cls,
+ cmd,
+ *,
+ cloud=ADMIN_CLOUD,
+ fail_ok=False,
+ parse_output=False,
+ ):
"""Executes openstackclient command for the given action
- NOTE(dtroyer): There is a subtle distinction between pasing
- cloud=None and cloud='': for compatibility reasons passing
- cloud=None continues to include the option '--os-auth-type none'
- in the command while passing cloud='' omits the '--os-auth-type'
- option completely to let the default handlers be invoked.
+ :param cmd: A string representation of the command to execute.
+ :param cloud: The cloud to execute against. This can be a string, empty
+ string, or None. A string results in '--os-auth-type $cloud', an
+ empty string results in the '--os-auth-type' option being
+ omitted, and None resuts in '--os-auth-type none' for legacy
+ reasons.
+ :param fail_ok: If failure is permitted. If False (default), a command
+ failure will result in `~tempest.lib.exceptions.CommandFailed`
+ being raised.
+ :param parse_output: If true, pass the '-f json' parameter and decode
+ the output.
+ :returns: The output from the command.
+ :raises: `~tempest.lib.exceptions.CommandFailed` if the command failed
+ and ``fail_ok`` was ``False``.
"""
+ auth_args = []
if cloud is None:
# Execute command with no auth
- return execute(
- 'openstack --os-auth-type none ' + cmd,
- fail_ok=fail_ok
- )
- elif cloud == '':
- # Execute command with no auth options at all
- return execute(
- 'openstack ' + cmd,
- fail_ok=fail_ok
- )
+ auth_args.append('--os-auth-type none')
+ elif cloud != '':
+ # Execute command with an explicit cloud specified
+ auth_args.append(f'--os-cloud {cloud}')
+
+ format_args = []
+ if parse_output:
+ format_args.append('-f json')
+
+ output = execute(
+ ' '.join(['openstack'] + auth_args + [cmd] + format_args),
+ fail_ok=fail_ok,
+ )
+
+ if parse_output:
+ return json.loads(output)
else:
- # Execure command with an explicit cloud specified
- return execute(
- 'openstack --os-cloud=' + cloud + ' ' + cmd,
- fail_ok=fail_ok
- )
+ return output
@classmethod
def is_service_enabled(cls, service, version=None):
diff --git a/openstackclient/tests/functional/common/test_args.py b/openstackclient/tests/functional/common/test_args.py
index 02cad6c1..1f5ecc1c 100644
--- a/openstackclient/tests/functional/common/test_args.py
+++ b/openstackclient/tests/functional/common/test_args.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from tempest.lib import exceptions as tempest_exc
from openstackclient.tests.functional import base
@@ -21,10 +19,11 @@ class ArgumentTests(base.TestCase):
"""Functional tests for command line arguments"""
def test_default_auth_type(self):
- cmd_output = json.loads(self.openstack(
- 'configuration show -f json',
+ cmd_output = self.openstack(
+ 'configuration show',
cloud='',
- ))
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
self.assertIn(
'auth_type',
@@ -36,10 +35,11 @@ class ArgumentTests(base.TestCase):
)
def test_auth_type_none(self):
- cmd_output = json.loads(self.openstack(
- 'configuration show -f json',
+ cmd_output = self.openstack(
+ 'configuration show',
cloud=None,
- ))
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
self.assertIn(
'auth_type',
@@ -54,7 +54,7 @@ class ArgumentTests(base.TestCase):
# Make sure token_endpoint is really gone
try:
self.openstack(
- 'configuration show -f json --os-auth-type token_endpoint',
+ 'configuration show --os-auth-type token_endpoint',
cloud=None,
)
except tempest_exc.CommandFailed as e:
@@ -64,10 +64,11 @@ class ArgumentTests(base.TestCase):
self.fail('CommandFailed should be raised')
def test_auth_type_password_opt(self):
- cmd_output = json.loads(self.openstack(
- 'configuration show -f json --os-auth-type password',
+ cmd_output = self.openstack(
+ 'configuration show --os-auth-type password',
cloud=None,
- ))
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
self.assertIn(
'auth_type',
diff --git a/openstackclient/tests/functional/common/test_availability_zone.py b/openstackclient/tests/functional/common/test_availability_zone.py
index 025da95c..f319ffc5 100644
--- a/openstackclient/tests/functional/common/test_availability_zone.py
+++ b/openstackclient/tests/functional/common/test_availability_zone.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from openstackclient.tests.functional import base
@@ -19,8 +17,10 @@ class AvailabilityZoneTests(base.TestCase):
"""Functional tests for availability zone. """
def test_availability_zone_list(self):
- cmd_output = json.loads(self.openstack(
- 'availability zone list -f json'))
+ cmd_output = self.openstack(
+ 'availability zone list',
+ parse_output=True,
+ )
zones = [x['Zone Name'] for x in cmd_output]
self.assertIn(
'internal',
diff --git a/openstackclient/tests/functional/common/test_configuration.py b/openstackclient/tests/functional/common/test_configuration.py
index 17e0f45d..614b3e46 100644
--- a/openstackclient/tests/functional/common/test_configuration.py
+++ b/openstackclient/tests/functional/common/test_configuration.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import os
from openstackclient.common import configuration
@@ -30,9 +29,7 @@ class ConfigurationTests(base.TestCase):
items = self.parse_listing(raw_output)
self.assert_table_structure(items, BASIC_CONFIG_HEADERS)
- cmd_output = json.loads(self.openstack(
- 'configuration show -f json'
- ))
+ cmd_output = self.openstack('configuration show', parse_output=True)
self.assertEqual(
configuration.REDACTED,
cmd_output['auth.password']
@@ -43,18 +40,18 @@ class ConfigurationTests(base.TestCase):
)
# Test show --mask
- cmd_output = json.loads(self.openstack(
- 'configuration show --mask -f json'
- ))
+ cmd_output = self.openstack(
+ 'configuration show --mask', parse_output=True,
+ )
self.assertEqual(
configuration.REDACTED,
cmd_output['auth.password']
)
# Test show --unmask
- cmd_output = json.loads(self.openstack(
- 'configuration show --unmask -f json'
- ))
+ cmd_output = self.openstack(
+ 'configuration show --unmask', parse_output=True,
+ )
# If we are using os-client-config, this will not be set. Rather than
# parse clouds.yaml to get the right value, just make sure
# we are not getting redacted.
@@ -84,10 +81,11 @@ class ConfigurationTestsNoAuth(base.TestCase):
items = self.parse_listing(raw_output)
self.assert_table_structure(items, BASIC_CONFIG_HEADERS)
- cmd_output = json.loads(self.openstack(
- 'configuration show -f json',
+ cmd_output = self.openstack(
+ 'configuration show',
cloud=None,
- ))
+ parse_output=True,
+ )
self.assertNotIn(
'auth.password',
cmd_output,
diff --git a/openstackclient/tests/functional/common/test_extension.py b/openstackclient/tests/functional/common/test_extension.py
index 92efabef..8784c55b 100644
--- a/openstackclient/tests/functional/common/test_extension.py
+++ b/openstackclient/tests/functional/common/test_extension.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from tempest.lib import exceptions as tempest_exc
from openstackclient.tests.functional import base
@@ -30,11 +28,11 @@ class ExtensionTests(base.TestCase):
def test_extension_list_compute(self):
"""Test compute extension list"""
- json_output = json.loads(self.openstack(
- 'extension list -f json ' +
- '--compute'
- ))
- name_list = [item.get('Name') for item in json_output]
+ output = self.openstack(
+ 'extension list --compute',
+ parse_output=True,
+ )
+ name_list = [item.get('Name') for item in output]
self.assertIn(
'ImageSize',
name_list,
@@ -42,11 +40,11 @@ class ExtensionTests(base.TestCase):
def test_extension_list_volume(self):
"""Test volume extension list"""
- json_output = json.loads(self.openstack(
- 'extension list -f json ' +
- '--volume'
- ))
- name_list = [item.get('Name') for item in json_output]
+ output = self.openstack(
+ 'extension list --volume',
+ parse_output=True,
+ )
+ name_list = [item.get('Name') for item in output]
self.assertIn(
'TypesManage',
name_list,
@@ -57,43 +55,29 @@ class ExtensionTests(base.TestCase):
if not self.haz_network:
self.skipTest("No Network service present")
- json_output = json.loads(self.openstack(
- 'extension list -f json ' +
- '--network'
- ))
- name_list = [item.get('Name') for item in json_output]
+ output = self.openstack(
+ 'extension list --network',
+ parse_output=True,
+ )
+ name_list = [item.get('Name') for item in output]
self.assertIn(
'Default Subnetpools',
name_list,
)
- # NOTE(dtroyer): Only network extensions are currently supported but
- # I am going to leave this here anyway as a reminder
- # fix that.
- # def test_extension_show_compute(self):
- # """Test compute extension show"""
- # json_output = json.loads(self.openstack(
- # 'extension show -f json ' +
- # 'ImageSize'
- # ))
- # self.assertEqual(
- # 'OS-EXT-IMG-SIZE',
- # json_output.get('Alias'),
- # )
-
def test_extension_show_network(self):
"""Test network extension show"""
if not self.haz_network:
self.skipTest("No Network service present")
name = 'agent'
- json_output = json.loads(self.openstack(
- 'extension show -f json ' +
- name
- ))
+ output = self.openstack(
+ 'extension show ' + name,
+ parse_output=True,
+ )
self.assertEqual(
name,
- json_output.get('alias'),
+ output.get('alias'),
)
def test_extension_show_not_exist(self):
diff --git a/openstackclient/tests/functional/common/test_help.py b/openstackclient/tests/functional/common/test_help.py
index c55741f1..e84c22e0 100644
--- a/openstackclient/tests/functional/common/test_help.py
+++ b/openstackclient/tests/functional/common/test_help.py
@@ -30,9 +30,7 @@ class HelpTests(base.TestCase):
('server image create',
'Create a new server disk image from an existing server'),
('server list', 'List servers'),
- ('server lock',
- 'Lock server(s). '
- 'A non-admin user will not be able to execute actions'),
+ ('server lock', 'Lock server(s)'),
('server migrate', 'Migrate server to different host'),
('server pause', 'Pause server(s)'),
('server reboot', 'Perform a hard or soft server reboot'),
@@ -46,8 +44,8 @@ class HelpTests(base.TestCase):
('server shelve', 'Shelve and optionally offload server(s)'),
('server show', 'Show server details'),
('server ssh', 'SSH to server'),
- ('server start', 'Start server(s).'),
- ('server stop', 'Stop server(s).'),
+ ('server start', 'Start server(s)'),
+ ('server stop', 'Stop server(s)'),
('server suspend', 'Suspend server(s)'),
('server unlock', 'Unlock server(s)'),
('server unpause', 'Unpause server(s)'),
diff --git a/openstackclient/tests/functional/common/test_module.py b/openstackclient/tests/functional/common/test_module.py
index 41aabb7f..967d3b49 100644
--- a/openstackclient/tests/functional/common/test_module.py
+++ b/openstackclient/tests/functional/common/test_module.py
@@ -11,9 +11,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-#
-
-import json
from openstackclient.tests.functional import base
@@ -31,14 +28,14 @@ class ModuleTest(base.TestCase):
def test_module_list(self):
# Test module list
- cmd_output = json.loads(self.openstack('module list -f json'))
+ cmd_output = self.openstack('module list', parse_output=True)
for one_module in self.CLIENTS:
self.assertIn(one_module, cmd_output.keys())
for one_module in self.LIBS:
self.assertNotIn(one_module, cmd_output.keys())
# Test module list --all
- cmd_output = json.loads(self.openstack('module list --all -f json'))
+ cmd_output = self.openstack('module list --all', parse_output=True)
for one_module in self.CLIENTS + self.LIBS:
self.assertIn(one_module, cmd_output.keys())
@@ -56,7 +53,7 @@ class CommandTest(base.TestCase):
]
def test_command_list_no_option(self):
- cmd_output = json.loads(self.openstack('command list -f json'))
+ cmd_output = self.openstack('command list', parse_output=True)
group_names = [each.get('Command Group') for each in cmd_output]
for one_group in self.GROUPS:
self.assertIn(one_group, group_names)
@@ -70,9 +67,10 @@ class CommandTest(base.TestCase):
'compute.v2'
]
for each_input in input_groups:
- cmd_output = json.loads(self.openstack(
- 'command list --group %s -f json' % each_input
- ))
+ cmd_output = self.openstack(
+ 'command list --group %s' % each_input,
+ parse_output=True,
+ )
group_names = [each.get('Command Group') for each in cmd_output]
for each_name in group_names:
self.assertIn(each_input, each_name)
diff --git a/openstackclient/tests/functional/common/test_quota.py b/openstackclient/tests/functional/common/test_quota.py
index 5096fa06..6e48df1d 100644
--- a/openstackclient/tests/functional/common/test_quota.py
+++ b/openstackclient/tests/functional/common/test_quota.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from tempest.lib import exceptions
@@ -36,9 +35,10 @@ class QuotaTests(base.TestCase):
def test_quota_list_details_compute(self):
expected_headers = ["Resource", "In Use", "Reserved", "Limit"]
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --detail --compute'
- ))
+ cmd_output = self.openstack(
+ 'quota list --detail --compute',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
resources = []
for row in cmd_output:
@@ -52,9 +52,10 @@ class QuotaTests(base.TestCase):
def test_quota_list_details_network(self):
expected_headers = ["Resource", "In Use", "Reserved", "Limit"]
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --detail --network'
- ))
+ cmd_output = self.openstack(
+ 'quota list --detail --network',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
resources = []
for row in cmd_output:
@@ -70,9 +71,10 @@ class QuotaTests(base.TestCase):
if not self.haz_network:
self.skipTest("No Network service present")
self.openstack('quota set --networks 40 ' + self.PROJECT_NAME)
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --network'
- ))
+ cmd_output = self.openstack(
+ 'quota list --network',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
self.assertEqual(
40,
@@ -81,9 +83,10 @@ class QuotaTests(base.TestCase):
def test_quota_list_compute_option(self):
self.openstack('quota set --instances 30 ' + self.PROJECT_NAME)
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --compute'
- ))
+ cmd_output = self.openstack(
+ 'quota list --compute',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
self.assertEqual(
30,
@@ -92,9 +95,10 @@ class QuotaTests(base.TestCase):
def test_quota_list_volume_option(self):
self.openstack('quota set --volumes 20 ' + self.PROJECT_NAME)
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --volume'
- ))
+ cmd_output = self.openstack(
+ 'quota list --volume',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
self.assertEqual(
20,
@@ -111,9 +115,11 @@ class QuotaTests(base.TestCase):
network_option +
self.PROJECT_NAME
)
- cmd_output = json.loads(self.openstack(
- 'quota show -f json ' + self.PROJECT_NAME
- ))
+ cmd_output = self.openstack(
+ 'quota show ' + self.PROJECT_NAME,
+ parse_output=True,
+ )
+ cmd_output = {x['Resource']: x['Limit'] for x in cmd_output}
self.assertIsNotNone(cmd_output)
self.assertEqual(
31,
@@ -130,12 +136,14 @@ class QuotaTests(base.TestCase):
)
# Check default quotas
- cmd_output = json.loads(self.openstack(
- 'quota show -f json --default'
- ))
+ cmd_output = self.openstack(
+ 'quota show --default',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
# We don't necessarily know the default quotas, we're checking the
# returned attributes
+ cmd_output = {x['Resource']: x['Limit'] for x in cmd_output}
self.assertTrue(cmd_output["cores"] >= 0)
self.assertTrue(cmd_output["backups"] >= 0)
if self.haz_network:
@@ -146,10 +154,12 @@ class QuotaTests(base.TestCase):
'quota set --key-pairs 33 --snapshots 43 ' +
'--class default'
)
- cmd_output = json.loads(self.openstack(
- 'quota show -f json --class default'
- ))
+ cmd_output = self.openstack(
+ 'quota show --class default',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
+ cmd_output = {x['Resource']: x['Limit'] for x in cmd_output}
self.assertEqual(
33,
cmd_output["key-pairs"],
@@ -160,34 +170,38 @@ class QuotaTests(base.TestCase):
)
# Check default quota class
- cmd_output = json.loads(self.openstack(
- 'quota show -f json --class'
- ))
+ cmd_output = self.openstack(
+ 'quota show --class',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
# We don't necessarily know the default quotas, we're checking the
# returned attributes
+ cmd_output = {x['Resource']: x['Limit'] for x in cmd_output}
self.assertTrue(cmd_output["key-pairs"] >= 0)
self.assertTrue(cmd_output["snapshots"] >= 0)
def _restore_quota_limit(self, resource, limit, project):
self.openstack('quota set --%s %s %s' % (resource, limit, project))
- def test_quota_network_set_with_check_limit(self):
+ def test_quota_network_set_with_no_force(self):
if not self.haz_network:
self.skipTest('No Network service present')
if not self.is_extension_enabled('quota-check-limit'):
self.skipTest('No "quota-check-limit" extension present')
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --network'
- ))
+ cmd_output = self.openstack(
+ 'quota list --network',
+ parse_output=True,
+ )
self.addCleanup(self._restore_quota_limit, 'network',
cmd_output[0]['Networks'], self.PROJECT_NAME)
self.openstack('quota set --networks 40 ' + self.PROJECT_NAME)
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --network'
- ))
+ cmd_output = self.openstack(
+ 'quota list --network',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
self.assertEqual(40, cmd_output[0]['Networks'])
@@ -197,10 +211,11 @@ class QuotaTests(base.TestCase):
(self.PROJECT_NAME, uuid.uuid4().hex))
self.assertRaises(exceptions.CommandFailed, self.openstack,
- 'quota set --networks 1 --check-limit ' +
+ 'quota set --networks 1 --no-force ' +
self.PROJECT_NAME)
def test_quota_network_set_with_force(self):
+ self.skipTest('story 2010110')
if not self.haz_network:
self.skipTest('No Network service present')
# NOTE(ralonsoh): the Neutron support for the flag "check-limit" was
@@ -213,16 +228,18 @@ class QuotaTests(base.TestCase):
if not self.is_extension_enabled('quota-check-limit'):
self.skipTest('No "quota-check-limit" extension present')
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --network'
- ))
+ cmd_output = self.openstack(
+ 'quota list --network',
+ parse_output=True,
+ )
self.addCleanup(self._restore_quota_limit, 'network',
cmd_output[0]['Networks'], self.PROJECT_NAME)
self.openstack('quota set --networks 40 ' + self.PROJECT_NAME)
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --network'
- ))
+ cmd_output = self.openstack(
+ 'quota list --network',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
self.assertEqual(40, cmd_output[0]['Networks'])
@@ -232,8 +249,9 @@ class QuotaTests(base.TestCase):
(self.PROJECT_NAME, uuid.uuid4().hex))
self.openstack('quota set --networks 1 --force ' + self.PROJECT_NAME)
- cmd_output = json.loads(self.openstack(
- 'quota list -f json --network'
- ))
+ cmd_output = self.openstack(
+ 'quota list --network',
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output)
self.assertEqual(1, cmd_output[0]['Networks'])
diff --git a/openstackclient/tests/functional/common/test_versions.py b/openstackclient/tests/functional/common/test_versions.py
index adc74ebc..6575671a 100644
--- a/openstackclient/tests/functional/common/test_versions.py
+++ b/openstackclient/tests/functional/common/test_versions.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from openstackclient.tests.functional import base
@@ -21,9 +19,7 @@ class VersionsTests(base.TestCase):
def test_versions_show(self):
# TODO(mordred) Make this better. The trick is knowing what in the
# payload to test for.
- cmd_output = json.loads(self.openstack(
- 'versions show -f json'
- ))
+ cmd_output = self.openstack('versions show', parse_output=True)
self.assertIsNotNone(cmd_output)
self.assertIn(
"Region Name",
diff --git a/openstackclient/tests/functional/compute/v2/common.py b/openstackclient/tests/functional/compute/v2/common.py
index 851664c8..7eca4603 100644
--- a/openstackclient/tests/functional/compute/v2/common.py
+++ b/openstackclient/tests/functional/compute/v2/common.py
@@ -11,7 +11,6 @@
# under the License.
#
-import json
import time
import uuid
@@ -38,9 +37,7 @@ class ComputeTestCase(base.TestCase):
def get_flavor(cls):
# NOTE(rtheis): Get cirros256 or m1.tiny flavors since functional
# tests may create other flavors.
- flavors = json.loads(cls.openstack(
- "flavor list -f json "
- ))
+ flavors = cls.openstack("flavor list", parse_output=True)
server_flavor = None
for flavor in flavors:
if flavor['Name'] in ['m1.tiny', 'cirros256']:
@@ -53,9 +50,7 @@ class ComputeTestCase(base.TestCase):
# NOTE(rtheis): Get first Cirros image since functional tests may
# create other images. Image may be named '-uec' or
# '-disk'.
- images = json.loads(cls.openstack(
- "image list -f json "
- ))
+ images = cls.openstack("image list", parse_output=True)
server_image = None
for image in images:
if (image['Name'].startswith('cirros-') and
@@ -70,9 +65,10 @@ class ComputeTestCase(base.TestCase):
try:
# NOTE(rtheis): Get private network since functional tests may
# create other networks.
- cmd_output = json.loads(cls.openstack(
- 'network show private -f json'
- ))
+ cmd_output = cls.openstack(
+ 'network show private',
+ parse_output=True,
+ )
except exceptions.CommandFailed:
return ''
return '--nic net-id=' + cmd_output['id']
@@ -86,14 +82,15 @@ class ComputeTestCase(base.TestCase):
if not self.network_arg:
self.network_arg = self.get_network()
name = name or uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'server create -f json ' +
+ cmd_output = self.openstack(
+ 'server create ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
self.network_arg + ' ' +
'--wait ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
name,
@@ -120,10 +117,11 @@ class ComputeTestCase(base.TestCase):
failures = ['ERROR']
total_sleep = 0
while total_sleep < wait:
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ name,
+ parse_output=True,
+ )
status = cmd_output['status']
if status == expected_status:
print('Server {} now has status {}'.format(
@@ -135,10 +133,11 @@ class ComputeTestCase(base.TestCase):
time.sleep(interval)
total_sleep += interval
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ name,
+ parse_output=True,
+ )
status = cmd_output['status']
self.assertEqual(status, expected_status)
# give it a little bit more time
diff --git a/openstackclient/tests/functional/compute/v2/test_aggregate.py b/openstackclient/tests/functional/compute/v2/test_aggregate.py
index 1de53099..80750faf 100644
--- a/openstackclient/tests/functional/compute/v2/test_aggregate.py
+++ b/openstackclient/tests/functional/compute/v2/test_aggregate.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional import base
@@ -27,12 +26,13 @@ class AggregateTests(base.TestCase):
'aggregate delete ' + name1,
fail_ok=True,
)
- cmd_output = json.loads(self.openstack(
- 'aggregate create -f json ' +
+ cmd_output = self.openstack(
+ 'aggregate create ' +
'--zone nova ' +
'--property a=b ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name']
@@ -45,8 +45,10 @@ class AggregateTests(base.TestCase):
'a',
cmd_output['properties']
)
- cmd_output = json.loads(self.openstack(
- 'aggregate show -f json ' + name1))
+ cmd_output = self.openstack(
+ 'aggregate show ' + name1,
+ parse_output=True,
+ )
self.assertEqual(name1, cmd_output['name'])
name2 = uuid.uuid4().hex
@@ -55,11 +57,12 @@ class AggregateTests(base.TestCase):
'aggregate delete ' + name2,
fail_ok=True,
)
- cmd_output = json.loads(self.openstack(
- 'aggregate create -f json ' +
+ cmd_output = self.openstack(
+ 'aggregate create ' +
'--zone external ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name']
@@ -68,8 +71,10 @@ class AggregateTests(base.TestCase):
'external',
cmd_output['availability_zone']
)
- cmd_output = json.loads(self.openstack(
- 'aggregate show -f json ' + name2))
+ cmd_output = self.openstack(
+ 'aggregate show ' + name2,
+ parse_output=True,
+ )
self.assertEqual(name2, cmd_output['name'])
# Test aggregate set
@@ -89,10 +94,11 @@ class AggregateTests(base.TestCase):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'aggregate show -f json ' +
- name3
- ))
+ cmd_output = self.openstack(
+ 'aggregate show ' +
+ name3,
+ parse_output=True,
+ )
self.assertEqual(
name3,
cmd_output['name']
@@ -111,9 +117,10 @@ class AggregateTests(base.TestCase):
)
# Test aggregate list
- cmd_output = json.loads(self.openstack(
- 'aggregate list -f json'
- ))
+ cmd_output = self.openstack(
+ 'aggregate list',
+ parse_output=True,
+ )
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
@@ -122,9 +129,10 @@ class AggregateTests(base.TestCase):
self.assertIn('internal', zones)
# Test aggregate list --long
- cmd_output = json.loads(self.openstack(
- 'aggregate list --long -f json'
- ))
+ cmd_output = self.openstack(
+ 'aggregate list --long',
+ parse_output=True,
+ )
names = [x['Name'] for x in cmd_output]
self.assertIn(name3, names)
self.assertIn(name2, names)
@@ -143,10 +151,11 @@ class AggregateTests(base.TestCase):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'aggregate show -f json ' +
- name3
- ))
+ cmd_output = self.openstack(
+ 'aggregate show ' +
+ name3,
+ parse_output=True,
+ )
self.assertNotIn(
"c='d'",
cmd_output['properties']
@@ -163,9 +172,10 @@ class AggregateTests(base.TestCase):
def test_aggregate_add_and_remove_host(self):
"""Test aggregate add and remove host"""
# Get a host
- cmd_output = json.loads(self.openstack(
- 'host list -f json'
- ))
+ cmd_output = self.openstack(
+ 'host list',
+ parse_output=True,
+ )
host_name = cmd_output[0]['Host Name']
# NOTE(dtroyer): Cells v1 is not operable with aggregates. Hostnames
@@ -181,22 +191,24 @@ class AggregateTests(base.TestCase):
)
# Test add host
- cmd_output = json.loads(self.openstack(
- 'aggregate add host -f json ' +
+ cmd_output = self.openstack(
+ 'aggregate add host ' +
name + ' ' +
- host_name
- ))
+ host_name,
+ parse_output=True,
+ )
self.assertIn(
host_name,
cmd_output['hosts']
)
# Test remove host
- cmd_output = json.loads(self.openstack(
- 'aggregate remove host -f json ' +
+ cmd_output = self.openstack(
+ 'aggregate remove host ' +
name + ' ' +
- host_name
- ))
+ host_name,
+ parse_output=True,
+ )
self.assertNotIn(
host_name,
cmd_output['hosts']
diff --git a/openstackclient/tests/functional/compute/v2/test_flavor.py b/openstackclient/tests/functional/compute/v2/test_flavor.py
index 162d4287..98bf1ca5 100644
--- a/openstackclient/tests/functional/compute/v2/test_flavor.py
+++ b/openstackclient/tests/functional/compute/v2/test_flavor.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional import base
@@ -25,9 +24,10 @@ class FlavorTests(base.TestCase):
def setUpClass(cls):
super(FlavorTests, cls).setUpClass()
# Make a project
- cmd_output = json.loads(cls.openstack(
- "project create -f json --enable " + cls.PROJECT_NAME
- ))
+ cmd_output = cls.openstack(
+ "project create --enable " + cls.PROJECT_NAME,
+ parse_output=True,
+ )
cls.project_id = cmd_output["id"]
@classmethod
@@ -41,22 +41,24 @@ class FlavorTests(base.TestCase):
def test_flavor_delete(self):
"""Test create w/project, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- "flavor create -f json " +
+ cmd_output = self.openstack(
+ "flavor create " +
"--project " + self.PROJECT_NAME + " " +
"--private " +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- "flavor create -f json " +
+ cmd_output = self.openstack(
+ "flavor create " +
"--id qaz " +
"--project " + self.PROJECT_NAME + " " +
"--private " +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
"qaz",
@@ -71,12 +73,13 @@ class FlavorTests(base.TestCase):
def test_flavor_list(self):
"""Test create defaults, list filters, delete"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- "flavor create -f json " +
+ cmd_output = self.openstack(
+ "flavor create " +
"--property a=b " +
"--property c=d " +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, "flavor delete " + name1)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
@@ -85,15 +88,16 @@ class FlavorTests(base.TestCase):
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- "flavor create -f json " +
+ cmd_output = self.openstack(
+ "flavor create " +
"--id qaz " +
"--ram 123 " +
"--private " +
"--property a=b2 " +
"--property b=d2 " +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, "flavor delete " + name2)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
@@ -121,18 +125,20 @@ class FlavorTests(base.TestCase):
)
# Test list
- cmd_output = json.loads(self.openstack(
- "flavor list -f json"
- ))
+ cmd_output = self.openstack(
+ "flavor list",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
# Test list --long
- cmd_output = json.loads(self.openstack(
- "flavor list -f json " +
- "--long"
- ))
+ cmd_output = self.openstack(
+ "flavor list " +
+ "--long",
+ parse_output=True,
+ )
# We have list of complex json objects
# Iterate through the list setting flags
found_expected = False
@@ -147,28 +153,31 @@ class FlavorTests(base.TestCase):
self.assertTrue(found_expected)
# Test list --public
- cmd_output = json.loads(self.openstack(
- "flavor list -f json " +
- "--public"
- ))
+ cmd_output = self.openstack(
+ "flavor list " +
+ "--public",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
# Test list --private
- cmd_output = json.loads(self.openstack(
- "flavor list -f json " +
- "--private"
- ))
+ cmd_output = self.openstack(
+ "flavor list " +
+ "--private",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --all
- cmd_output = json.loads(self.openstack(
- "flavor list -f json " +
- "--all"
- ))
+ cmd_output = self.openstack(
+ "flavor list " +
+ "--all",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertIn(name2, col_name)
@@ -176,16 +185,17 @@ class FlavorTests(base.TestCase):
def test_flavor_properties(self):
"""Test create defaults, list filters, delete"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- "flavor create -f json " +
+ cmd_output = self.openstack(
+ "flavor create " +
"--id qaz " +
"--ram 123 " +
"--disk 20 " +
"--private " +
"--property a=first " +
"--property b=second " +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, "flavor delete " + name1)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
@@ -220,10 +230,11 @@ class FlavorTests(base.TestCase):
)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- "flavor show -f json " +
- name1
- ))
+ cmd_output = self.openstack(
+ "flavor show " +
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
"qaz",
cmd_output["id"],
@@ -245,9 +256,10 @@ class FlavorTests(base.TestCase):
)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- "flavor show -f json " +
- name1
- ))
+ cmd_output = self.openstack(
+ "flavor show " +
+ name1,
+ parse_output=True,
+ )
self.assertNotIn('b', cmd_output['properties'])
diff --git a/openstackclient/tests/functional/compute/v2/test_hypervisor.py b/openstackclient/tests/functional/compute/v2/test_hypervisor.py
new file mode 100644
index 00000000..9bc23280
--- /dev/null
+++ b/openstackclient/tests/functional/compute/v2/test_hypervisor.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from openstackclient.tests.functional import base
+
+
+class HypervisorTests(base.TestCase):
+ """Functional tests for hypervisor."""
+
+ def test_hypervisor_list(self):
+ """Test create defaults, list filters, delete"""
+ # Test list
+ cmd_output = json.loads(self.openstack(
+ "hypervisor list -f json --os-compute-api-version 2.1"
+ ))
+ ids1 = [x["ID"] for x in cmd_output]
+ self.assertIsNotNone(cmd_output)
+
+ cmd_output = json.loads(self.openstack(
+ "hypervisor list -f json"
+ ))
+ ids2 = [x["ID"] for x in cmd_output]
+ self.assertIsNotNone(cmd_output)
+
+ # Show test - old microversion
+ for i in ids1:
+ cmd_output = json.loads(self.openstack(
+ "hypervisor show %s -f json "
+ " --os-compute-api-version 2.1"
+ % (i)
+ ))
+ self.assertIsNotNone(cmd_output)
+ # When we list hypervisors with older MV we get ids as integers. We
+ # need to verify that show finds resources independently
+ # Show test - latest microversion
+ for i in ids2:
+ cmd_output = json.loads(self.openstack(
+ "hypervisor show %s -f json"
+ % (i)
+ ))
+ self.assertIsNotNone(cmd_output)
diff --git a/openstackclient/tests/functional/compute/v2/test_keypair.py b/openstackclient/tests/functional/compute/v2/test_keypair.py
index 42f334a4..828d5dad 100644
--- a/openstackclient/tests/functional/compute/v2/test_keypair.py
+++ b/openstackclient/tests/functional/compute/v2/test_keypair.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import tempfile
from tempest.lib.common.utils import data_utils
@@ -109,9 +108,10 @@ class KeypairTests(KeypairBase):
2) Delete keypair
"""
with tempfile.NamedTemporaryFile(mode='w+') as f:
- cmd_output = json.loads(self.openstack(
- 'keypair create -f json --private-key %s tmpkey' % f.name,
- ))
+ cmd_output = self.openstack(
+ 'keypair create --private-key %s tmpkey' % f.name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'keypair delete tmpkey')
self.assertEqual('tmpkey', cmd_output.get('name'))
self.assertIsNotNone(cmd_output.get('user_id'))
diff --git a/openstackclient/tests/functional/compute/v2/test_server.py b/openstackclient/tests/functional/compute/v2/test_server.py
index cf4bcbc2..05945a02 100644
--- a/openstackclient/tests/functional/compute/v2/test_server.py
+++ b/openstackclient/tests/functional/compute/v2/test_server.py
@@ -39,9 +39,10 @@ class ServerTests(common.ComputeTestCase):
self.wait_for_status(name1, "ACTIVE")
self.wait_for_status(name2, "ACTIVE")
- cmd_output = json.loads(self.openstack(
- 'server list -f json'
- ))
+ cmd_output = self.openstack(
+ 'server list',
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertIn(name2, col_name)
@@ -50,17 +51,19 @@ class ServerTests(common.ComputeTestCase):
raw_output = self.openstack('server pause ' + name2)
self.assertEqual("", raw_output)
self.wait_for_status(name2, "PAUSED")
- cmd_output = json.loads(self.openstack(
- 'server list -f json ' +
- '--status ACTIVE'
- ))
+ cmd_output = self.openstack(
+ 'server list ' +
+ '--status ACTIVE',
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
- cmd_output = json.loads(self.openstack(
- 'server list -f json ' +
- '--status PAUSED'
- ))
+ cmd_output = self.openstack(
+ 'server list ' +
+ '--status PAUSED',
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, col_name)
self.assertIn(name2, col_name)
@@ -76,16 +79,18 @@ class ServerTests(common.ComputeTestCase):
self.wait_for_status(name2, "ACTIVE")
# Test list --marker with ID
- cmd_output = json.loads(self.openstack(
- 'server list -f json --marker ' + id2
- ))
+ cmd_output = self.openstack(
+ 'server list --marker ' + id2,
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
# Test list --marker with Name
- cmd_output = json.loads(self.openstack(
- 'server list -f json --marker ' + name2
- ))
+ cmd_output = self.openstack(
+ 'server list --marker ' + name2,
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
@@ -93,19 +98,21 @@ class ServerTests(common.ComputeTestCase):
self.openstack('server delete --wait ' + name2)
# Test list --deleted --marker with ID
- cmd_output = json.loads(self.openstack(
- 'server list -f json --deleted --marker ' + id2
- ))
+ cmd_output = self.openstack(
+ 'server list --deleted --marker ' + id2,
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
# Test list --deleted --marker with Name
try:
- cmd_output = json.loads(self.openstack(
- 'server list -f json --deleted --marker ' + name2
- ))
+ cmd_output = self.openstack(
+ 'server list --deleted --marker ' + name2,
+ parse_output=True,
+ )
except exceptions.CommandFailed as e:
- self.assertIn('marker [%s] not found (HTTP 400)' % (name2),
+ self.assertIn('marker [%s] not found' % (name2),
e.stderr.decode('utf-8'))
def test_server_list_with_changes_before(self):
@@ -124,11 +131,12 @@ class ServerTests(common.ComputeTestCase):
cmd_output = self.server_create()
server_name3 = cmd_output['name']
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-compute-api-version 2.66 ' +
- 'server list -f json '
- '--changes-before ' + updated_at2
- ))
+ 'server list '
+ '--changes-before ' + updated_at2,
+ parse_output=True,
+ )
col_updated = [server["Name"] for server in cmd_output]
self.assertIn(server_name1, col_updated)
@@ -149,10 +157,11 @@ class ServerTests(common.ComputeTestCase):
cmd_output = self.server_create()
server_name3 = cmd_output['name']
- cmd_output = json.loads(self.openstack(
- 'server list -f json '
- '--changes-since ' + updated_at2
- ))
+ cmd_output = self.openstack(
+ 'server list '
+ '--changes-since ' + updated_at2,
+ parse_output=True,
+ )
col_updated = [server["Name"] for server in cmd_output]
self.assertNotIn(server_name1, col_updated)
@@ -174,12 +183,13 @@ class ServerTests(common.ComputeTestCase):
server_name3 = cmd_output['name']
updated_at3 = cmd_output['updated']
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-compute-api-version 2.66 ' +
- 'server list -f json ' +
+ 'server list ' +
'--changes-since ' + updated_at2 +
- ' --changes-before ' + updated_at3
- ))
+ ' --changes-before ' + updated_at3,
+ parse_output=True,
+ )
col_updated = [server["Name"] for server in cmd_output]
self.assertNotIn(server_name1, col_updated)
@@ -193,10 +203,11 @@ class ServerTests(common.ComputeTestCase):
# self.wait_for_status(name, "ACTIVE")
# Have a look at some other fields
- flavor = json.loads(self.openstack(
- 'flavor show -f json ' +
- self.flavor_name
- ))
+ flavor = self.openstack(
+ 'flavor show ' +
+ self.flavor_name,
+ parse_output=True,
+ )
self.assertEqual(
self.flavor_name,
flavor['name'],
@@ -205,10 +216,11 @@ class ServerTests(common.ComputeTestCase):
'%s (%s)' % (flavor['name'], flavor['id']),
cmd_output["flavor"],
)
- image = json.loads(self.openstack(
- 'image show -f json ' +
- self.image_name
- ))
+ image = self.openstack(
+ 'image show ' +
+ self.image_name,
+ parse_output=True,
+ )
self.assertEqual(
self.image_name,
image['name'],
@@ -226,10 +238,11 @@ class ServerTests(common.ComputeTestCase):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ name,
+ parse_output=True,
+ )
# Really, shouldn't this be a list?
self.assertEqual(
{'a': 'b', 'c': 'd'},
@@ -241,10 +254,11 @@ class ServerTests(common.ComputeTestCase):
'--property a ' +
name
)
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
{'c': 'd'},
cmd_output['properties'],
@@ -258,10 +272,11 @@ class ServerTests(common.ComputeTestCase):
name
)
self.assertOutput("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["name"],
@@ -274,6 +289,33 @@ class ServerTests(common.ComputeTestCase):
)
self.assertOutput("", raw_output)
+ def test_server_show(self):
+ """Test server show"""
+ cmd_output = self.server_create()
+ name = cmd_output['name']
+
+ # Simple show
+ cmd_output = json.loads(self.openstack(
+ f'server show -f json {name}'
+ ))
+ self.assertEqual(
+ name,
+ cmd_output["name"],
+ )
+
+ # Show diagnostics
+ cmd_output = json.loads(self.openstack(
+ f'server show -f json {name} --diagnostics'
+ ))
+ self.assertIn('driver', cmd_output)
+
+ # Show topology
+ cmd_output = json.loads(self.openstack(
+ f'server show -f json {name} --topology '
+ f'--os-compute-api-version 2.78'
+ ))
+ self.assertIn('topology', cmd_output)
+
def test_server_actions(self):
"""Test server action pairs
@@ -360,10 +402,11 @@ class ServerTests(common.ComputeTestCase):
self.wait_for_status(name, "ACTIVE")
# attach ip
- cmd_output = json.loads(self.openstack(
- 'floating ip create -f json ' +
- 'public'
- ))
+ cmd_output = self.openstack(
+ 'floating ip create ' +
+ 'public',
+ parse_output=True,
+ )
# Look for Neutron value first, then nova-net
floating_ip = cmd_output.get(
@@ -392,10 +435,11 @@ class ServerTests(common.ComputeTestCase):
# racy we shouldn't have to wait too long, a minute seems reasonable
wait_time = 0
while wait_time < 60:
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ name,
+ parse_output=True,
+ )
if floating_ip not in _chain_addresses(cmd_output['addresses']):
# Hang out for a bit and try again
print('retrying floating IP check')
@@ -422,10 +466,11 @@ class ServerTests(common.ComputeTestCase):
# racy we shouldn't have to wait too long, a minute seems reasonable
wait_time = 0
while wait_time < 60:
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ name,
+ parse_output=True,
+ )
if floating_ip in _chain_addresses(cmd_output['addresses']):
# Hang out for a bit and try again
print('retrying floating IP check')
@@ -434,10 +479,11 @@ class ServerTests(common.ComputeTestCase):
else:
break
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ name,
+ parse_output=True,
+ )
self.assertNotIn(
floating_ip,
_chain_addresses(cmd_output['addresses']),
@@ -459,10 +505,11 @@ class ServerTests(common.ComputeTestCase):
volume_wait_for = volume_common.BaseVolumeTests.wait_for_status
# get image size
- cmd_output = json.loads(self.openstack(
- 'image show -f json ' +
- self.image_name
- ))
+ cmd_output = self.openstack(
+ 'image show ' +
+ self.image_name,
+ parse_output=True,
+ )
try:
image_size = cmd_output['min_disk']
if image_size < 1:
@@ -472,12 +519,13 @@ class ServerTests(common.ComputeTestCase):
# create volume from image
volume_name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--image ' + self.image_name + ' ' +
'--size ' + str(image_size) + ' ' +
- volume_name
- ))
+ volume_name,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
self.addCleanup(self.openstack, 'volume delete ' + volume_name)
self.assertEqual(
@@ -488,11 +536,12 @@ class ServerTests(common.ComputeTestCase):
# create empty volume
empty_volume_name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size ' + str(image_size) + ' ' +
- empty_volume_name
- ))
+ empty_volume_name,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
self.addCleanup(self.openstack, 'volume delete ' + empty_volume_name)
self.assertEqual(
@@ -503,15 +552,16 @@ class ServerTests(common.ComputeTestCase):
# create server
server_name = uuid.uuid4().hex
- server = json.loads(self.openstack(
- 'server create -f json ' +
+ server = self.openstack(
+ 'server create ' +
'--flavor ' + self.flavor_name + ' ' +
'--volume ' + volume_name + ' ' +
'--block-device-mapping vdb=' + empty_volume_name + ' ' +
self.network_arg + ' ' +
'--wait ' +
- server_name
- ))
+ server_name,
+ parse_output=True,
+ )
self.assertIsNotNone(server["id"])
self.addCleanup(self.openstack, 'server delete --wait ' + server_name)
self.assertEqual(
@@ -525,19 +575,21 @@ class ServerTests(common.ComputeTestCase):
server['image'],
)
# check server list too
- servers = json.loads(self.openstack(
- 'server list -f json'
- ))
+ servers = self.openstack(
+ 'server list',
+ parse_output=True,
+ )
self.assertEqual(
v2_server.IMAGE_STRING_FOR_BFV,
servers[0]['Image']
)
# check volumes
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- volume_name
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ volume_name,
+ parse_output=True,
+ )
attachments = cmd_output['attachments']
self.assertEqual(
1,
@@ -556,10 +608,11 @@ class ServerTests(common.ComputeTestCase):
# --block-device-mapping was ignored if --volume
# present on the command line. Now we should see the
# attachment.
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- empty_volume_name
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ empty_volume_name,
+ parse_output=True,
+ )
attachments = cmd_output['attachments']
self.assertEqual(
1,
@@ -581,11 +634,12 @@ class ServerTests(common.ComputeTestCase):
# create source empty volume
volume_name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- volume_name
- ))
+ volume_name,
+ parse_output=True,
+ )
volume_id = cmd_output["id"]
self.assertIsNotNone(volume_id)
self.addCleanup(self.openstack, 'volume delete ' + volume_name)
@@ -603,15 +657,16 @@ class ServerTests(common.ComputeTestCase):
# create server
server_name = uuid.uuid4().hex
- server = json.loads(self.openstack(
- 'server create -f json ' +
+ server = self.openstack(
+ 'server create ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
bdm_arg + ' ' +
self.network_arg + ' ' +
'--wait ' +
- server_name
- ))
+ server_name,
+ parse_output=True,
+ )
self.assertIsNotNone(server["id"])
self.addCleanup(self.openstack, 'server delete --wait ' + server_name)
self.assertEqual(
@@ -621,18 +676,20 @@ class ServerTests(common.ComputeTestCase):
# check server volumes_attached, format is
# {"volumes_attached": "id='2518bc76-bf0b-476e-ad6b-571973745bb5'",}
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- server_name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ server_name,
+ parse_output=True,
+ )
volumes_attached = cmd_output['volumes_attached']
self.assertIsNotNone(volumes_attached)
# check volumes
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- volume_name
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ volume_name,
+ parse_output=True,
+ )
attachments = cmd_output['attachments']
self.assertEqual(
1,
@@ -665,11 +722,12 @@ class ServerTests(common.ComputeTestCase):
# create source empty volume
empty_volume_name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- empty_volume_name
- ))
+ empty_volume_name,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
self.addCleanup(self.openstack, 'volume delete ' + empty_volume_name)
self.assertEqual(empty_volume_name, cmd_output['name'])
@@ -677,11 +735,12 @@ class ServerTests(common.ComputeTestCase):
# create snapshot of source empty volume
empty_snapshot_name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
'--volume ' + empty_volume_name + ' ' +
- empty_snapshot_name
- ))
+ empty_snapshot_name,
+ parse_output=True,
+ )
empty_snapshot_id = cmd_output["id"]
self.assertIsNotNone(empty_snapshot_id)
# Deleting volume snapshot take time, so we need to wait until the
@@ -712,15 +771,16 @@ class ServerTests(common.ComputeTestCase):
# create server with bdm snapshot
server_name = uuid.uuid4().hex
- server = json.loads(self.openstack(
- 'server create -f json ' +
+ server = self.openstack(
+ 'server create ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
bdm_arg + ' ' +
self.network_arg + ' ' +
'--wait ' +
- server_name
- ))
+ server_name,
+ parse_output=True,
+ )
self.assertIsNotNone(server["id"])
self.assertEqual(
server_name,
@@ -730,19 +790,21 @@ class ServerTests(common.ComputeTestCase):
# check server volumes_attached, format is
# {"volumes_attached": "id='2518bc76-bf0b-476e-ad6b-571973745bb5'",}
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- server_name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ server_name,
+ parse_output=True,
+ )
volumes_attached = cmd_output['volumes_attached']
self.assertIsNotNone(volumes_attached)
attached_volume_id = volumes_attached[0]["id"]
# check the volume that attached on server
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- attached_volume_id
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ attached_volume_id,
+ parse_output=True,
+ )
attachments = cmd_output['attachments']
self.assertEqual(
1,
@@ -760,9 +822,10 @@ class ServerTests(common.ComputeTestCase):
# delete server, then check the attached volume had been deleted,
# <delete-on-terminate>=true
self.openstack('server delete --wait ' + server_name)
- cmd_output = json.loads(self.openstack(
- 'volume list -f json'
- ))
+ cmd_output = self.openstack(
+ 'volume list',
+ parse_output=True,
+ )
target_volume = [each_volume
for each_volume in cmd_output
if each_volume['ID'] == attached_volume_id]
@@ -801,10 +864,11 @@ class ServerTests(common.ComputeTestCase):
)
else:
# get image ID
- cmd_output = json.loads(self.openstack(
- 'image show -f json ' +
- self.image_name
- ))
+ cmd_output = self.openstack(
+ 'image show ' +
+ self.image_name,
+ parse_output=True,
+ )
image_id = cmd_output['id']
# This means create a 1GB volume from the specified image, attach
@@ -824,15 +888,16 @@ class ServerTests(common.ComputeTestCase):
# as expected where nova creates a volume from the image and attaches
# that volume to the server.
server_name = uuid.uuid4().hex
- server = json.loads(self.openstack(
- 'server create -f json ' +
+ server = self.openstack(
+ 'server create ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
bdm_arg + ' ' +
self.network_arg + ' ' +
'--wait ' +
- server_name
- ))
+ server_name,
+ parse_output=True,
+ )
self.assertIsNotNone(server["id"])
self.assertEqual(
server_name,
@@ -842,19 +907,21 @@ class ServerTests(common.ComputeTestCase):
# check server volumes_attached, format is
# {"volumes_attached": "id='2518bc76-bf0b-476e-ad6b-571973745bb5'",}
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- server_name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ server_name,
+ parse_output=True,
+ )
volumes_attached = cmd_output['volumes_attached']
self.assertIsNotNone(volumes_attached)
attached_volume_id = volumes_attached[0]["id"]
# check the volume that attached on server
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- attached_volume_id
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ attached_volume_id,
+ parse_output=True,
+ )
attachments = cmd_output['attachments']
self.assertEqual(
1,
@@ -879,9 +946,10 @@ class ServerTests(common.ComputeTestCase):
# delete server, then check the attached volume has been deleted
self.openstack('server delete --wait ' + server_name)
- cmd_output = json.loads(self.openstack(
- 'volume list -f json'
- ))
+ cmd_output = self.openstack(
+ 'volume list',
+ parse_output=True,
+ )
target_volume = [each_volume
for each_volume in cmd_output
if each_volume['ID'] == attached_volume_id]
@@ -906,15 +974,16 @@ class ServerTests(common.ComputeTestCase):
# using the provided image, attach it as the root disk for the server
# and not delete the volume when the server is deleted.
server_name = uuid.uuid4().hex
- server = json.loads(self.openstack(
- 'server create -f json ' +
+ server = self.openstack(
+ 'server create ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
'--boot-from-volume 1 ' + # create a 1GB volume from the image
self.network_arg + ' ' +
'--wait ' +
- server_name
- ))
+ server_name,
+ parse_output=True,
+ )
self.assertIsNotNone(server["id"])
self.assertEqual(
server_name,
@@ -924,10 +993,11 @@ class ServerTests(common.ComputeTestCase):
# check server volumes_attached, format is
# {"volumes_attached": "id='2518bc76-bf0b-476e-ad6b-571973745bb5'",}
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- server_name
- ))
+ cmd_output = self.openstack(
+ 'server show ' +
+ server_name,
+ parse_output=True,
+ )
volumes_attached = cmd_output['volumes_attached']
self.assertIsNotNone(volumes_attached)
attached_volume_id = volumes_attached[0]["id"]
@@ -941,10 +1011,11 @@ class ServerTests(common.ComputeTestCase):
self.assertEqual(v2_server.IMAGE_STRING_FOR_BFV, cmd_output['image'])
# check the volume that attached on server
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- volumes_attached[0]["id"]
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ volumes_attached[0]["id"],
+ parse_output=True,
+ )
# The volume size should be what we specified on the command line.
self.assertEqual(1, int(cmd_output['size']))
attachments = cmd_output['attachments']
@@ -971,32 +1042,35 @@ class ServerTests(common.ComputeTestCase):
# delete server, then check the attached volume was not deleted
self.openstack('server delete --wait ' + server_name)
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- attached_volume_id
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ attached_volume_id,
+ parse_output=True,
+ )
# check the volume is in 'available' status
self.assertEqual('available', cmd_output['status'])
def test_server_create_with_none_network(self):
"""Test server create with none network option."""
server_name = uuid.uuid4().hex
- server = json.loads(self.openstack(
+ server = self.openstack(
# auto/none enable in nova micro version (v2.37+)
'--os-compute-api-version 2.37 ' +
- 'server create -f json ' +
+ 'server create ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
'--nic none ' +
- server_name
- ))
+ server_name,
+ parse_output=True,
+ )
self.assertIsNotNone(server["id"])
self.addCleanup(self.openstack, 'server delete --wait ' + server_name)
self.assertEqual(server_name, server['name'])
self.wait_for_status(server_name, "ACTIVE")
- server = json.loads(self.openstack(
- 'server show -f json ' + server_name
- ))
+ server = self.openstack(
+ 'server show ' + server_name,
+ parse_output=True,
+ )
self.assertEqual({}, server['addresses'])
def test_server_create_with_security_group(self):
@@ -1011,27 +1085,30 @@ class ServerTests(common.ComputeTestCase):
self.skipTest("No Network service present")
# Create two security group, use name and ID to create server
sg_name1 = uuid.uuid4().hex
- security_group1 = json.loads(self.openstack(
- 'security group create -f json ' + sg_name1
- ))
+ security_group1 = self.openstack(
+ 'security group create ' + sg_name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'security group delete ' + sg_name1)
sg_name2 = uuid.uuid4().hex
- security_group2 = json.loads(self.openstack(
- 'security group create -f json ' + sg_name2
- ))
+ security_group2 = self.openstack(
+ 'security group create ' + sg_name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'security group delete ' + sg_name2)
server_name = uuid.uuid4().hex
- server = json.loads(self.openstack(
- 'server create -f json ' +
+ server = self.openstack(
+ 'server create ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
# Security group id is integer in nova-network, convert to string
'--security-group ' + str(security_group1['id']) + ' ' +
'--security-group ' + security_group2['name'] + ' ' +
self.network_arg + ' ' +
- server_name
- ))
+ server_name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'server delete --wait ' + server_name)
self.assertIsNotNone(server['id'])
@@ -1042,9 +1119,10 @@ class ServerTests(common.ComputeTestCase):
self.assertIn(str(security_group1['id']), sec_grp)
self.assertIn(str(security_group2['id']), sec_grp)
self.wait_for_status(server_name, 'ACTIVE')
- server = json.loads(self.openstack(
- 'server show -f json ' + server_name
- ))
+ server = self.openstack(
+ 'server show ' + server_name,
+ parse_output=True,
+ )
# check if security group exists in list
sec_grp = ""
for sec in server['security_groups']:
@@ -1059,7 +1137,7 @@ class ServerTests(common.ComputeTestCase):
self.openstack(
# auto/none enable in nova micro version (v2.37+)
'--os-compute-api-version 2.37 ' +
- 'server create -f json ' +
+ 'server create ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
server_name
@@ -1074,14 +1152,15 @@ class ServerTests(common.ComputeTestCase):
def test_server_add_remove_network(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'server create -f json ' +
+ cmd_output = self.openstack(
+ 'server create ' +
'--network private ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
'--wait ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output['id'])
self.assertEqual(name, cmd_output['name'])
@@ -1093,9 +1172,10 @@ class ServerTests(common.ComputeTestCase):
wait_time = 0
while wait_time < 60:
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' + name
- ))
+ cmd_output = self.openstack(
+ 'server show ' + name,
+ parse_output=True,
+ )
if 'public' not in cmd_output['addresses']:
# Hang out for a bit and try again
print('retrying add network check')
@@ -1111,9 +1191,10 @@ class ServerTests(common.ComputeTestCase):
wait_time = 0
while wait_time < 60:
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' + name
- ))
+ cmd_output = self.openstack(
+ 'server show ' + name,
+ parse_output=True,
+ )
if 'public' in cmd_output['addresses']:
# Hang out for a bit and try again
print('retrying remove network check')
@@ -1127,14 +1208,15 @@ class ServerTests(common.ComputeTestCase):
def test_server_add_remove_port(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'server create -f json ' +
+ cmd_output = self.openstack(
+ 'server create ' +
'--network private ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
'--wait ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output['id'])
self.assertEqual(name, cmd_output['name'])
@@ -1143,15 +1225,17 @@ class ServerTests(common.ComputeTestCase):
# create port, record one of its ip address
port_name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'port list -f json'
- ))
+ cmd_output = self.openstack(
+ 'port list',
+ parse_output=True,
+ )
self.assertNotIn(port_name, cmd_output)
- cmd_output = json.loads(self.openstack(
- 'port create -f json ' +
- '--network private ' + port_name
- ))
+ cmd_output = self.openstack(
+ 'port create ' +
+ '--network private ' + port_name,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output['id'])
ip_address = cmd_output['fixed_ips'][0]['ip_address']
self.addCleanup(self.openstack, 'port delete ' + port_name)
@@ -1161,9 +1245,10 @@ class ServerTests(common.ComputeTestCase):
wait_time = 0
while wait_time < 60:
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' + name
- ))
+ cmd_output = self.openstack(
+ 'server show ' + name,
+ parse_output=True,
+ )
if ip_address not in cmd_output['addresses']['private']:
# Hang out for a bit and try again
print('retrying add port check')
@@ -1179,9 +1264,10 @@ class ServerTests(common.ComputeTestCase):
wait_time = 0
while wait_time < 60:
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' + name
- ))
+ cmd_output = self.openstack(
+ 'server show ' + name,
+ parse_output=True,
+ )
if ip_address in cmd_output['addresses']['private']:
# Hang out for a bit and try again
print('retrying add port check')
@@ -1192,30 +1278,88 @@ class ServerTests(common.ComputeTestCase):
addresses = cmd_output['addresses']['private']
self.assertNotIn(ip_address, addresses)
- def test_server_add_remove_volume(self):
- volume_wait_for = volume_common.BaseVolumeTests.wait_for_status
-
+ def test_server_add_fixed_ip(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'server create -f json ' +
+ cmd_output = self.openstack(
+ 'server create ' +
'--network private ' +
'--flavor ' + self.flavor_name + ' ' +
'--image ' + self.image_name + ' ' +
'--wait ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output['id'])
self.assertEqual(name, cmd_output['name'])
self.addCleanup(self.openstack, 'server delete --wait ' + name)
+
+ # create port, record its ip address to use in later call,
+ # then delete - this is to figure out what should be a free ip
+ # in the subnet
+ port_name = uuid.uuid4().hex
+
+ cmd_output = self.openstack(
+ 'port list',
+ parse_output=True,
+ )
+ self.assertNotIn(port_name, cmd_output)
+
+ cmd_output = self.openstack(
+ 'port create ' +
+ '--network private ' + port_name,
+ parse_output=True,
+ )
+ self.assertIsNotNone(cmd_output['id'])
+ ip_address = cmd_output['fixed_ips'][0]['ip_address']
+ self.openstack('port delete ' + port_name)
+
+ # add fixed ip to server, assert the ip address appears
+ self.openstack('server add fixed ip --fixed-ip-address ' + ip_address +
+ ' ' + name + ' private')
+
+ wait_time = 0
+ while wait_time < 60:
+ cmd_output = self.openstack(
+ 'server show ' + name,
+ parse_output=True,
+ )
+ if ip_address not in cmd_output['addresses']['private']:
+ # Hang out for a bit and try again
+ print('retrying add fixed ip check')
+ wait_time += 10
+ time.sleep(10)
+ else:
+ break
+ addresses = cmd_output['addresses']['private']
+ self.assertIn(ip_address, addresses)
+
+ def test_server_add_remove_volume(self):
+ volume_wait_for = volume_common.BaseVolumeTests.wait_for_status
+
+ server_name = uuid.uuid4().hex
+ cmd_output = self.openstack(
+ 'server create ' +
+ '--network private ' +
+ '--flavor ' + self.flavor_name + ' ' +
+ '--image ' + self.image_name + ' ' +
+ '--wait ' +
+ server_name,
+ parse_output=True,
+ )
+
+ self.assertIsNotNone(cmd_output['id'])
+ self.assertEqual(server_name, cmd_output['name'])
+ self.addCleanup(self.openstack, 'server delete --wait ' + server_name)
server_id = cmd_output['id']
volume_name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- volume_name
- ))
+ volume_name,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output['id'])
self.assertEqual(volume_name, cmd_output['name'])
@@ -1223,30 +1367,54 @@ class ServerTests(common.ComputeTestCase):
self.addCleanup(self.openstack, 'volume delete ' + volume_name)
volume_id = cmd_output['id']
- cmd_output = json.loads(self.openstack(
- 'server add volume -f json ' +
- name + ' ' +
+ cmd_output = self.openstack(
+ 'server add volume ' +
+ server_name + ' ' +
volume_name + ' ' +
- '--tag bar'
- ))
+ '--tag bar',
+ parse_output=True,
+ )
- self.assertIsNotNone(cmd_output['ID'])
self.assertEqual(server_id, cmd_output['Server ID'])
self.assertEqual(volume_id, cmd_output['Volume ID'])
- volume_attachment_id = cmd_output['ID']
- cmd_output = json.loads(self.openstack(
- 'server volume list -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'server volume list ' +
+ server_name,
+ parse_output=True,
+ )
- self.assertEqual(volume_attachment_id, cmd_output[0]['ID'])
self.assertEqual(server_id, cmd_output[0]['Server ID'])
self.assertEqual(volume_id, cmd_output[0]['Volume ID'])
volume_wait_for('volume', volume_name, 'in-use')
- self.openstack('server remove volume ' + name + ' ' + volume_name)
+
+ cmd_output = self.openstack(
+ 'server event list ' +
+ server_name,
+ parse_output=True,
+ )
+ self.assertEqual(2, len(cmd_output))
+ self.assertIn('attach_volume', {x['Action'] for x in cmd_output})
+
+ self.openstack(
+ 'server remove volume ' + server_name + ' ' + volume_name
+ )
volume_wait_for('volume', volume_name, 'available')
- raw_output = self.openstack('server volume list ' + name)
+ cmd_output = self.openstack(
+ 'server event list ' +
+ server_name,
+ parse_output=True,
+ )
+ self.assertEqual(3, len(cmd_output))
+ self.assertIn('detach_volume', {x['Action'] for x in cmd_output})
+
+ raw_output = self.openstack('server volume list ' + server_name)
self.assertEqual('\n', raw_output)
+
+ def test_server_migration_list(self):
+ # Verify that the command does not raise an exception when we list
+ # migrations, including when we specify a query.
+ self.openstack('server migration list')
+ self.openstack('server migration list --limit 1')
diff --git a/openstackclient/tests/functional/compute/v2/test_server_event.py b/openstackclient/tests/functional/compute/v2/test_server_event.py
index b4725fe6..48147507 100644
--- a/openstackclient/tests/functional/compute/v2/test_server_event.py
+++ b/openstackclient/tests/functional/compute/v2/test_server_event.py
@@ -13,8 +13,6 @@
# under the License.
#
-import json
-
from openstackclient.tests.functional.compute.v2 import common
@@ -33,9 +31,10 @@ class ServerEventTests(common.ComputeTestCase):
def test_server_event_list_and_show(self):
"""Test list, show server event"""
# Test 'server event list' for creating
- cmd_output = json.loads(self.openstack(
- 'server event list -f json ' + self.server_name
- ))
+ cmd_output = self.openstack(
+ 'server event list ' + self.server_name,
+ parse_output=True,
+ )
request_id = None
for each_event in cmd_output:
self.assertNotIn('Message', each_event)
@@ -47,9 +46,10 @@ class ServerEventTests(common.ComputeTestCase):
break
self.assertIsNotNone(request_id)
# Test 'server event show' for creating
- cmd_output = json.loads(self.openstack(
- 'server event show -f json ' + self.server_name + ' ' + request_id
- ))
+ cmd_output = self.openstack(
+ 'server event show ' + self.server_name + ' ' + request_id,
+ parse_output=True,
+ )
self.assertEqual(self.server_id, cmd_output.get('instance_uuid'))
self.assertEqual(request_id, cmd_output.get('request_id'))
self.assertEqual('create', cmd_output.get('action'))
@@ -59,9 +59,10 @@ class ServerEventTests(common.ComputeTestCase):
# Reboot server, trigger reboot event
self.openstack('server reboot --wait ' + self.server_name)
# Test 'server event list --long' for rebooting
- cmd_output = json.loads(self.openstack(
- 'server event list --long -f json ' + self.server_name
- ))
+ cmd_output = self.openstack(
+ 'server event list --long ' + self.server_name,
+ parse_output=True,
+ )
request_id = None
for each_event in cmd_output:
self.assertIn('Message', each_event)
@@ -73,9 +74,10 @@ class ServerEventTests(common.ComputeTestCase):
break
self.assertIsNotNone(request_id)
# Test 'server event show' for rebooting
- cmd_output = json.loads(self.openstack(
- 'server event show -f json ' + self.server_name + ' ' + request_id
- ))
+ cmd_output = self.openstack(
+ 'server event show ' + self.server_name + ' ' + request_id,
+ parse_output=True,
+ )
self.assertEqual(self.server_id, cmd_output.get('instance_uuid'))
self.assertEqual(request_id, cmd_output.get('request_id'))
@@ -93,10 +95,11 @@ class ServerEventTests(common.ComputeTestCase):
self.openstack('server delete --wait ' + server_id)
# And verify we can get the event list after it's deleted
# Test 'server event list' for deleting
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-compute-api-version 2.21 '
- 'server event list -f json ' + server_id
- ))
+ 'server event list ' + server_id,
+ parse_output=True,
+ )
request_id = None
for each_event in cmd_output:
self.assertNotIn('Message', each_event)
@@ -108,10 +111,11 @@ class ServerEventTests(common.ComputeTestCase):
break
self.assertIsNotNone(request_id)
# Test 'server event show' for deleting
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-compute-api-version 2.21 '
- 'server event show -f json ' + server_id + ' ' + request_id
- ))
+ 'server event show ' + server_id + ' ' + request_id,
+ parse_output=True,
+ )
self.assertEqual(server_id, cmd_output.get('instance_uuid'))
self.assertEqual(request_id, cmd_output.get('request_id'))
self.assertEqual('delete', cmd_output.get('action'))
diff --git a/openstackclient/tests/functional/compute/v2/test_server_group.py b/openstackclient/tests/functional/compute/v2/test_server_group.py
index 3dff3dcd..a599951c 100644
--- a/openstackclient/tests/functional/compute/v2/test_server_group.py
+++ b/openstackclient/tests/functional/compute/v2/test_server_group.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional import base
@@ -23,32 +22,34 @@ class ServerGroupTests(base.TestCase):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'server group create -f json ' +
+ cmd_output = self.openstack(
+ 'server group create ' +
'--policy affinity ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name']
)
self.assertEqual(
- ['affinity'],
- cmd_output['policies']
+ 'affinity',
+ cmd_output['policy']
)
- cmd_output = json.loads(self.openstack(
- 'server group create -f json ' +
+ cmd_output = self.openstack(
+ 'server group create ' +
'--policy anti-affinity ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name']
)
self.assertEqual(
- ['anti-affinity'],
- cmd_output['policies']
+ 'anti-affinity',
+ cmd_output['policy']
)
del_output = self.openstack(
@@ -60,47 +61,55 @@ class ServerGroupTests(base.TestCase):
name1 = uuid.uuid4().hex
name2 = uuid.uuid4().hex
- # test server gorup show
- cmd_output = json.loads(self.openstack(
- 'server group create -f json ' +
+ # test server group show
+ cmd_output = self.openstack(
+ 'server group create ' +
'--policy affinity ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'server group delete ' + name1)
- cmd_output = json.loads(self.openstack(
- 'server group show -f json ' + name1))
+ cmd_output = self.openstack(
+ 'server group show ' + name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name']
)
self.assertEqual(
- ['affinity'],
- cmd_output['policies']
+ 'affinity',
+ cmd_output['policy']
)
- cmd_output = json.loads(self.openstack(
- 'server group create -f json ' +
+ cmd_output = self.openstack(
+ 'server group create ' +
'--policy anti-affinity ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'server group delete ' + name2)
- cmd_output = json.loads(self.openstack(
- 'server group show -f json ' + name2))
+ cmd_output = self.openstack(
+ 'server group show ' + name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name']
)
self.assertEqual(
- ['anti-affinity'],
- cmd_output['policies']
+ 'anti-affinity',
+ cmd_output['policy']
)
# test server group list
- cmd_output = json.loads(self.openstack(
- 'server group list -f json'))
+ cmd_output = self.openstack(
+ 'server group list',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
- policies = [x["Policies"] for x in cmd_output]
- self.assertIn(['affinity'], policies)
- self.assertIn(['anti-affinity'], policies)
+ policies = [x["Policy"] for x in cmd_output]
+ self.assertIn('affinity', policies)
+ self.assertIn('anti-affinity', policies)
diff --git a/openstackclient/tests/functional/identity/v3/test_project.py b/openstackclient/tests/functional/identity/v3/test_project.py
index 27cf4481..b3d31aa7 100644
--- a/openstackclient/tests/functional/identity/v3/test_project.py
+++ b/openstackclient/tests/functional/identity/v3/test_project.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from tempest.lib.common.utils import data_utils
from openstackclient.tests.functional.identity.v3 import common
@@ -114,12 +112,14 @@ class ProjectTests(common.IdentityTests):
self.assert_show_fields(items, self.PROJECT_FIELDS)
def test_project_show_with_parents_children(self):
- json_output = json.loads(self.openstack(
+ output = self.openstack(
'project show '
- '--parents --children -f json '
+ '--parents --children '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
- 'name': self.project_name}))
+ 'name': self.project_name},
+ parse_output=True,
+ )
for attr_name in (self.PROJECT_FIELDS + ['parents', 'subtree']):
- self.assertIn(attr_name, json_output)
- self.assertEqual(self.project_name, json_output.get('name'))
+ self.assertIn(attr_name, output)
+ self.assertEqual(self.project_name, output.get('name'))
diff --git a/openstackclient/tests/functional/image/v1/test_image.py b/openstackclient/tests/functional/image/v1/test_image.py
index b9774ab5..2b4d8f41 100644
--- a/openstackclient/tests/functional/image/v1/test_image.py
+++ b/openstackclient/tests/functional/image/v1/test_image.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
import fixtures
@@ -22,41 +21,37 @@ class ImageTests(base.BaseImageTests):
"""Functional tests for Image commands"""
def setUp(self):
- super(ImageTests, self).setUp()
+ super().setUp()
+
if not self.haz_v1_api:
self.skipTest('No Image v1 API present')
- self.name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- '--os-image-api-version 1 '
- 'image create -f json ' +
- self.name
- ))
- self.image_id = json_output["id"]
- self.assertOutput(self.name, json_output['name'])
-
ver_fixture = fixtures.EnvironmentVariable(
'OS_IMAGE_API_VERSION', '1'
)
self.useFixture(ver_fixture)
+ self.name = uuid.uuid4().hex
+ output = self.openstack(
+ 'image create ' + self.name,
+ parse_output=True,
+ )
+ self.image_id = output["id"]
+ self.assertOutput(self.name, output['name'])
+
def tearDown(self):
try:
- self.openstack(
- '--os-image-api-version 1 '
- 'image delete ' +
- self.image_id
- )
+ self.openstack('image delete ' + self.image_id)
finally:
- super(ImageTests, self).tearDown()
+ super().tearDown()
def test_image_list(self):
- json_output = json.loads(self.openstack(
- 'image list -f json '
- ))
+ output = self.openstack(
+ 'image list'
+ )
self.assertIn(
self.name,
- [img['Name'] for img in json_output]
+ [img['Name'] for img in output]
)
def test_image_attributes(self):
@@ -71,24 +66,24 @@ class ImageTests(base.BaseImageTests):
'--public ' +
self.name
)
- json_output = json.loads(self.openstack(
- 'image show -f json ' +
- self.name
- ))
+ output = self.openstack(
+ 'image show ' + self.name,
+ parse_output=True,
+ )
self.assertEqual(
4,
- json_output["min_disk"],
+ output["min_disk"],
)
self.assertEqual(
5,
- json_output["min_ram"],
+ output["min_ram"],
)
self.assertEqual(
'qcow2',
- json_output['disk_format'],
+ output['disk_format'],
)
self.assertTrue(
- json_output["is_public"],
+ output["is_public"],
)
# Test properties
@@ -99,11 +94,11 @@ class ImageTests(base.BaseImageTests):
'--public ' +
self.name
)
- json_output = json.loads(self.openstack(
- 'image show -f json ' +
- self.name
- ))
+ output = self.openstack(
+ 'image show ' + self.name,
+ parse_output=True,
+ )
self.assertEqual(
{'a': 'b', 'c': 'd'},
- json_output["properties"],
+ output["properties"],
)
diff --git a/openstackclient/tests/functional/image/v2/test_image.py b/openstackclient/tests/functional/image/v2/test_image.py
index 0a3a7360..3535bd7e 100644
--- a/openstackclient/tests/functional/image/v2/test_image.py
+++ b/openstackclient/tests/functional/image/v2/test_image.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
import fixtures
@@ -24,67 +23,70 @@ class ImageTests(base.BaseImageTests):
def setUp(self):
super(ImageTests, self).setUp()
- self.name = uuid.uuid4().hex
- self.image_tag = 'my_tag'
- json_output = json.loads(self.openstack(
- '--os-image-api-version 2 '
- 'image create -f json --tag {tag} {name}'.format(
- tag=self.image_tag, name=self.name)
- ))
- self.image_id = json_output["id"]
- self.assertOutput(self.name, json_output['name'])
-
ver_fixture = fixtures.EnvironmentVariable(
'OS_IMAGE_API_VERSION', '2'
)
self.useFixture(ver_fixture)
+ self.name = uuid.uuid4().hex
+ self.image_tag = 'my_tag'
+ self.image_tag1 = 'random'
+ output = self.openstack(
+ 'image create --tag {tag} {name}'.format(
+ tag=self.image_tag, name=self.name),
+ parse_output=True,
+ )
+ self.image_id = output["id"]
+ self.assertOutput(self.name, output['name'])
+
def tearDown(self):
try:
- self.openstack(
- '--os-image-api-version 2 '
- 'image delete ' +
- self.image_id
- )
+ self.openstack('image delete ' + self.image_id)
finally:
- super(ImageTests, self).tearDown()
+ super().tearDown()
def test_image_list(self):
- json_output = json.loads(self.openstack(
- 'image list -f json '
- ))
+ output = self.openstack('image list', parse_output=True)
self.assertIn(
self.name,
- [img['Name'] for img in json_output]
+ [img['Name'] for img in output]
)
def test_image_list_with_name_filter(self):
- json_output = json.loads(self.openstack(
- 'image list --name ' + self.name + ' -f json'
- ))
+ output = self.openstack(
+ 'image list --name ' + self.name,
+ parse_output=True,
+ )
self.assertIn(
self.name,
- [img['Name'] for img in json_output]
+ [img['Name'] for img in output]
)
def test_image_list_with_status_filter(self):
- json_output = json.loads(self.openstack(
- 'image list ' + ' --status active -f json'
- ))
+ output = self.openstack(
+ 'image list --status active',
+ parse_output=True,
+ )
self.assertIn(
'active',
- [img['Status'] for img in json_output]
+ [img['Status'] for img in output]
)
def test_image_list_with_tag_filter(self):
- json_output = json.loads(self.openstack(
- 'image list --tag ' + self.image_tag + ' --long -f json'
- ))
- for taglist in [img['Tags'] for img in json_output]:
+ output = self.openstack(
+ 'image list --tag ' + self.image_tag + ' --tag ' +
+ self.image_tag1 + ' --long',
+ parse_output=True,
+ )
+ for taglist in [img['Tags'] for img in output]:
self.assertIn(
self.image_tag,
taglist
)
+ self.assertIn(
+ self.image_tag1,
+ taglist
+ )
def test_image_attributes(self):
"""Test set, unset, show on attributes, tags and properties"""
@@ -97,21 +99,21 @@ class ImageTests(base.BaseImageTests):
'--public ' +
self.name
)
- json_output = json.loads(self.openstack(
- 'image show -f json ' +
- self.name
- ))
+ output = self.openstack(
+ 'image show ' + self.name,
+ parse_output=True,
+ )
self.assertEqual(
4,
- json_output["min_disk"],
+ output["min_disk"],
)
self.assertEqual(
5,
- json_output["min_ram"],
+ output["min_ram"],
)
self.assertEqual(
'public',
- json_output["visibility"],
+ output["visibility"],
)
# Test properties
@@ -123,12 +125,12 @@ class ImageTests(base.BaseImageTests):
'--public ' +
self.name
)
- json_output = json.loads(self.openstack(
- 'image show -f json ' +
- self.name
- ))
- self.assertIn("a", json_output["properties"])
- self.assertIn("c", json_output["properties"])
+ output = self.openstack(
+ 'image show ' + self.name,
+ parse_output=True,
+ )
+ self.assertIn("a", output["properties"])
+ self.assertIn("c", output["properties"])
self.openstack(
'image unset ' +
@@ -137,30 +139,30 @@ class ImageTests(base.BaseImageTests):
'--property hw_rng_model ' +
self.name
)
- json_output = json.loads(self.openstack(
- 'image show -f json ' +
- self.name
- ))
- self.assertNotIn("a", json_output["properties"])
- self.assertNotIn("c", json_output["properties"])
+ output = self.openstack(
+ 'image show ' + self.name,
+ parse_output=True,
+ )
+ self.assertNotIn("a", output["properties"])
+ self.assertNotIn("c", output["properties"])
# Test tags
self.assertNotIn(
'01',
- json_output["tags"]
+ output["tags"]
)
self.openstack(
'image set ' +
'--tag 01 ' +
self.name
)
- json_output = json.loads(self.openstack(
- 'image show -f json ' +
- self.name
- ))
+ output = self.openstack(
+ 'image show ' + self.name,
+ parse_output=True,
+ )
self.assertIn(
'01',
- json_output["tags"]
+ output["tags"]
)
self.openstack(
@@ -168,38 +170,38 @@ class ImageTests(base.BaseImageTests):
'--tag 01 ' +
self.name
)
- json_output = json.loads(self.openstack(
- 'image show -f json ' +
- self.name
- ))
+ output = self.openstack(
+ 'image show ' + self.name,
+ parse_output=True,
+ )
self.assertNotIn(
'01',
- json_output["tags"]
+ output["tags"]
)
def test_image_set_rename(self):
name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'image create -f json ' +
- name
- ))
- image_id = json_output["id"]
+ output = self.openstack(
+ 'image create ' + name,
+ parse_output=True,
+ )
+ image_id = output["id"]
self.assertEqual(
name,
- json_output["name"],
+ output["name"],
)
self.openstack(
'image set ' +
'--name ' + name + 'xx ' +
image_id
)
- json_output = json.loads(self.openstack(
- 'image show -f json ' +
- name + 'xx'
- ))
+ output = self.openstack(
+ 'image show ' + name + 'xx',
+ parse_output=True,
+ )
self.assertEqual(
name + 'xx',
- json_output["name"],
+ output["name"],
)
# TODO(dtroyer): This test is incomplete and doesn't properly test
@@ -207,19 +209,21 @@ class ImageTests(base.BaseImageTests):
# properly added.
def test_image_members(self):
"""Test member add, remove, accept"""
- json_output = json.loads(self.openstack(
- 'token issue -f json'
- ))
- my_project_id = json_output['project_id']
+ output = self.openstack(
+ 'token issue',
+ parse_output=True,
+ )
+ my_project_id = output['project_id']
- json_output = json.loads(self.openstack(
+ output = self.openstack(
'image show -f json ' +
- self.name
- ))
+ self.name,
+ parse_output=True,
+ )
# NOTE(dtroyer): Until OSC supports --shared flags in create and set
# we can not properly test membership. Sometimes the
# images are shared and sometimes they are not.
- if json_output["visibility"] == 'shared':
+ if output["visibility"] == 'shared':
self.openstack(
'image add project ' +
self.name + ' ' +
@@ -237,13 +241,14 @@ class ImageTests(base.BaseImageTests):
'--accept ' +
self.name
)
- json_output = json.loads(self.openstack(
+ output = self.openstack(
'image list -f json ' +
- '--shared'
- ))
+ '--shared',
+ parse_output=True,
+ )
self.assertIn(
self.name,
- [img['Name'] for img in json_output]
+ [img['Name'] for img in output]
)
self.openstack(
@@ -251,13 +256,14 @@ class ImageTests(base.BaseImageTests):
'--reject ' +
self.name
)
- json_output = json.loads(self.openstack(
+ output = self.openstack(
'image list -f json ' +
- '--shared'
- ))
+ '--shared',
+ parse_output=True,
+ )
# self.assertNotIn(
# self.name,
- # [img['Name'] for img in json_output]
+ # [img['Name'] for img in output]
# )
self.openstack(
diff --git a/openstackclient/tests/functional/network/v2/common.py b/openstackclient/tests/functional/network/v2/common.py
index 2287f329..a9c5b830 100644
--- a/openstackclient/tests/functional/network/v2/common.py
+++ b/openstackclient/tests/functional/network/v2/common.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional import base
@@ -32,7 +31,10 @@ class NetworkTagTests(NetworkTests):
def test_tag_operation(self):
# Get project IDs
- cmd_output = json.loads(self.openstack('token issue -f json '))
+ cmd_output = self.openstack(
+ 'token issue ',
+ parse_output=True,
+ )
auth_project_id = cmd_output['project_id']
# Network create with no options
@@ -63,17 +65,20 @@ class NetworkTagTests(NetworkTests):
self._set_resource_and_tag_check('set', name2, '--no-tag', [])
def _list_tag_check(self, project_id, expected):
- cmd_output = json.loads(self.openstack(
- '{} list --long --project {} -f json'.format(self.base_command,
- project_id)))
+ cmd_output = self.openstack(
+ '{} list --long --project {}'.format(self.base_command,
+ project_id),
+ parse_output=True,
+ )
for name, tags in expected:
net = [n for n in cmd_output if n['Name'] == name][0]
self.assertEqual(set(tags), set(net['Tags']))
def _create_resource_for_tag_test(self, name, args):
- return json.loads(self.openstack(
- '{} create -f json {} {}'.format(self.base_command, args, name)
- ))
+ return self.openstack(
+ '{} create {} {}'.format(self.base_command, args, name),
+ parse_output=True,
+ )
def _create_resource_and_tag_check(self, args, expected):
name = uuid.uuid4().hex
@@ -89,7 +94,8 @@ class NetworkTagTests(NetworkTests):
'{} {} {} {}'.format(self.base_command, command, args, name)
)
self.assertFalse(cmd_output)
- cmd_output = json.loads(self.openstack(
- '{} show -f json {}'.format(self.base_command, name)
- ))
+ cmd_output = self.openstack(
+ '{} show {}'.format(self.base_command, name),
+ parse_output=True,
+ )
self.assertEqual(set(expected), set(cmd_output['tags']))
diff --git a/openstackclient/tests/functional/network/v2/test_address_group.py b/openstackclient/tests/functional/network/v2/test_address_group.py
index 52c628a3..17ab2362 100644
--- a/openstackclient/tests/functional/network/v2/test_address_group.py
+++ b/openstackclient/tests/functional/network/v2/test_address_group.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -30,20 +29,22 @@ class AddressGroupTests(common.NetworkTests):
def test_address_group_create_and_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'address group create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'address group create ' +
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name'],
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'address group create -f json ' +
- name2
- ))
+ cmd_output = self.openstack(
+ 'address group create ' +
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name'],
@@ -57,10 +58,10 @@ class AddressGroupTests(common.NetworkTests):
def test_address_group_list(self):
"""Test create, list filters, delete"""
# Get project IDs
- cmd_output = json.loads(self.openstack('token issue -f json '))
+ cmd_output = self.openstack('token issue ', parse_output=True)
auth_project_id = cmd_output['project_id']
- cmd_output = json.loads(self.openstack('project list -f json '))
+ cmd_output = self.openstack('project list ', parse_output=True)
admin_project_id = None
demo_project_id = None
for p in cmd_output:
@@ -79,10 +80,11 @@ class AddressGroupTests(common.NetworkTests):
self.assertEqual(admin_project_id, auth_project_id)
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'address group create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'address group create ' +
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'address group delete ' + name1)
self.assertEqual(
admin_project_id,
@@ -90,11 +92,12 @@ class AddressGroupTests(common.NetworkTests):
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'address group create -f json ' +
+ cmd_output = self.openstack(
+ 'address group create ' +
'--project ' + demo_project_id +
- ' ' + name2
- ))
+ ' ' + name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'address group delete ' + name2)
self.assertEqual(
demo_project_id,
@@ -102,27 +105,30 @@ class AddressGroupTests(common.NetworkTests):
)
# Test list
- cmd_output = json.loads(self.openstack(
- 'address group list -f json ',
- ))
+ cmd_output = self.openstack(
+ 'address group list ',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --project
- cmd_output = json.loads(self.openstack(
- 'address group list -f json ' +
- '--project ' + demo_project_id
- ))
+ cmd_output = self.openstack(
+ 'address group list ' +
+ '--project ' + demo_project_id,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
# Test list --name
- cmd_output = json.loads(self.openstack(
- 'address group list -f json ' +
- '--name ' + name1
- ))
+ cmd_output = self.openstack(
+ 'address group list ' +
+ '--name ' + name1,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
@@ -131,12 +137,13 @@ class AddressGroupTests(common.NetworkTests):
"""Tests create options, set, unset, and show"""
name = uuid.uuid4().hex
newname = name + "_"
- cmd_output = json.loads(self.openstack(
- 'address group create -f json ' +
+ cmd_output = self.openstack(
+ 'address group create ' +
'--description aaaa ' +
'--address 10.0.0.1 --address 2001::/16 ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'address group delete ' + newname)
self.assertEqual(name, cmd_output['name'])
self.assertEqual('aaaa', cmd_output['description'])
@@ -153,10 +160,11 @@ class AddressGroupTests(common.NetworkTests):
self.assertOutput('', raw_output)
# Show the updated address group
- cmd_output = json.loads(self.openstack(
- 'address group show -f json ' +
+ cmd_output = self.openstack(
+ 'address group show ' +
newname,
- ))
+ parse_output=True,
+ )
self.assertEqual(newname, cmd_output['name'])
self.assertEqual('bbbb', cmd_output['description'])
self.assertEqual(4, len(cmd_output['addresses']))
@@ -170,8 +178,9 @@ class AddressGroupTests(common.NetworkTests):
)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'address group show -f json ' +
+ cmd_output = self.openstack(
+ 'address group show ' +
newname,
- ))
+ parse_output=True,
+ )
self.assertEqual(0, len(cmd_output['addresses']))
diff --git a/openstackclient/tests/functional/network/v2/test_address_scope.py b/openstackclient/tests/functional/network/v2/test_address_scope.py
index 8a99ec5e..8ebb9522 100644
--- a/openstackclient/tests/functional/network/v2/test_address_scope.py
+++ b/openstackclient/tests/functional/network/v2/test_address_scope.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -33,10 +32,11 @@ class AddressScopeTests(common.NetworkTests):
def test_address_scope_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'address scope create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'address scope create ' +
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name'],
@@ -45,10 +45,11 @@ class AddressScopeTests(common.NetworkTests):
self.assertFalse(cmd_output['shared'])
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'address scope create -f json ' +
- name2
- ))
+ cmd_output = self.openstack(
+ 'address scope create ' +
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name'],
@@ -62,12 +63,13 @@ class AddressScopeTests(common.NetworkTests):
def test_address_scope_list(self):
"""Test create defaults, list filters, delete"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'address scope create -f json ' +
+ cmd_output = self.openstack(
+ 'address scope create ' +
'--ip-version 4 ' +
'--share ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'address scope delete ' + name1)
self.assertEqual(
name1,
@@ -80,12 +82,13 @@ class AddressScopeTests(common.NetworkTests):
self.assertTrue(cmd_output['shared'])
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'address scope create -f json ' +
+ cmd_output = self.openstack(
+ 'address scope create ' +
'--ip-version 6 ' +
'--no-share ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'address scope delete ' + name2)
self.assertEqual(
name2,
@@ -98,25 +101,28 @@ class AddressScopeTests(common.NetworkTests):
self.assertFalse(cmd_output['shared'])
# Test list
- cmd_output = json.loads(self.openstack(
- 'address scope list -f json ',
- ))
+ cmd_output = self.openstack(
+ 'address scope list ',
+ parse_output=True,
+ )
col_data = [x["IP Version"] for x in cmd_output]
self.assertIn(4, col_data)
self.assertIn(6, col_data)
# Test list --share
- cmd_output = json.loads(self.openstack(
- 'address scope list -f json --share',
- ))
+ cmd_output = self.openstack(
+ 'address scope list --share',
+ parse_output=True,
+ )
col_data = [x["Shared"] for x in cmd_output]
self.assertIn(True, col_data)
self.assertNotIn(False, col_data)
# Test list --no-share
- cmd_output = json.loads(self.openstack(
- 'address scope list -f json --no-share',
- ))
+ cmd_output = self.openstack(
+ 'address scope list --no-share',
+ parse_output=True,
+ )
col_data = [x["Shared"] for x in cmd_output]
self.assertIn(False, col_data)
self.assertNotIn(True, col_data)
@@ -125,12 +131,13 @@ class AddressScopeTests(common.NetworkTests):
"""Tests create options, set, show, delete"""
name = uuid.uuid4().hex
newname = name + "_"
- cmd_output = json.loads(self.openstack(
- 'address scope create -f json ' +
+ cmd_output = self.openstack(
+ 'address scope create ' +
'--ip-version 4 ' +
'--no-share ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'address scope delete ' + newname)
self.assertEqual(
name,
@@ -150,10 +157,11 @@ class AddressScopeTests(common.NetworkTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'address scope show -f json ' +
+ cmd_output = self.openstack(
+ 'address scope show ' +
newname,
- ))
+ parse_output=True,
+ )
self.assertEqual(
newname,
cmd_output['name'],
diff --git a/openstackclient/tests/functional/network/v2/test_floating_ip.py b/openstackclient/tests/functional/network/v2/test_floating_ip.py
index 9d109f87..871cab2d 100644
--- a/openstackclient/tests/functional/network/v2/test_floating_ip.py
+++ b/openstackclient/tests/functional/network/v2/test_floating_ip.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import random
import uuid
@@ -29,18 +28,20 @@ class FloatingIpTests(common.NetworkTests):
cls.PRIVATE_NETWORK_NAME = uuid.uuid4().hex
# Create a network for the floating ip
- json_output = json.loads(cls.openstack(
- 'network create -f json ' +
+ json_output = cls.openstack(
+ 'network create ' +
'--external ' +
- cls.EXTERNAL_NETWORK_NAME
- ))
+ cls.EXTERNAL_NETWORK_NAME,
+ parse_output=True,
+ )
cls.external_network_id = json_output["id"]
# Create a private network for the port
- json_output = json.loads(cls.openstack(
- 'network create -f json ' +
- cls.PRIVATE_NETWORK_NAME
- ))
+ json_output = cls.openstack(
+ 'network create ' +
+ cls.PRIVATE_NETWORK_NAME,
+ parse_output=True,
+ )
cls.private_network_id = json_output["id"]
@classmethod
@@ -81,12 +82,13 @@ class FloatingIpTests(common.NetworkTests):
)) + ".0/26"
try:
# Create a subnet for the network
- json_output = json.loads(self.openstack(
- 'subnet create -f json ' +
+ json_output = self.openstack(
+ 'subnet create ' +
'--network ' + network_name + ' ' +
'--subnet-range ' + subnet + ' ' +
- subnet_name
- ))
+ subnet_name,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output["id"])
subnet_id = json_output["id"]
except Exception:
@@ -109,11 +111,12 @@ class FloatingIpTests(common.NetworkTests):
)
self.addCleanup(self.openstack, 'subnet delete ' + ext_subnet_id)
- json_output = json.loads(self.openstack(
- 'floating ip create -f json ' +
+ json_output = self.openstack(
+ 'floating ip create ' +
'--description aaaa ' +
- self.EXTERNAL_NETWORK_NAME
- ))
+ self.EXTERNAL_NETWORK_NAME,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output["id"])
ip1 = json_output["id"]
self.assertEqual(
@@ -121,11 +124,12 @@ class FloatingIpTests(common.NetworkTests):
json_output["description"],
)
- json_output = json.loads(self.openstack(
- 'floating ip create -f json ' +
+ json_output = self.openstack(
+ 'floating ip create ' +
'--description bbbb ' +
- self.EXTERNAL_NETWORK_NAME
- ))
+ self.EXTERNAL_NETWORK_NAME,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output["id"])
ip2 = json_output["id"]
self.assertEqual(
@@ -149,11 +153,12 @@ class FloatingIpTests(common.NetworkTests):
)
self.addCleanup(self.openstack, 'subnet delete ' + ext_subnet_id)
- json_output = json.loads(self.openstack(
- 'floating ip create -f json ' +
+ json_output = self.openstack(
+ 'floating ip create ' +
'--description aaaa ' +
- self.EXTERNAL_NETWORK_NAME
- ))
+ self.EXTERNAL_NETWORK_NAME,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output["id"])
ip1 = json_output["id"]
self.addCleanup(self.openstack, 'floating ip delete ' + ip1)
@@ -164,11 +169,12 @@ class FloatingIpTests(common.NetworkTests):
self.assertIsNotNone(json_output["floating_network_id"])
fip1 = json_output["floating_ip_address"]
- json_output = json.loads(self.openstack(
- 'floating ip create -f json ' +
+ json_output = self.openstack(
+ 'floating ip create ' +
'--description bbbb ' +
- self.EXTERNAL_NETWORK_NAME
- ))
+ self.EXTERNAL_NETWORK_NAME,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output["id"])
ip2 = json_output["id"]
self.addCleanup(self.openstack, 'floating ip delete ' + ip2)
@@ -180,9 +186,10 @@ class FloatingIpTests(common.NetworkTests):
fip2 = json_output["floating_ip_address"]
# Test list
- json_output = json.loads(self.openstack(
- 'floating ip list -f json'
- ))
+ json_output = self.openstack(
+ 'floating ip list',
+ parse_output=True,
+ )
fip_map = {
item.get('ID'):
item.get('Floating IP Address') for item in json_output
@@ -194,10 +201,11 @@ class FloatingIpTests(common.NetworkTests):
self.assertIn(fip2, fip_map.values())
# Test list --long
- json_output = json.loads(self.openstack(
- 'floating ip list -f json ' +
- '--long'
- ))
+ json_output = self.openstack(
+ 'floating ip list ' +
+ '--long',
+ parse_output=True,
+ )
fip_map = {
item.get('ID'):
item.get('Floating IP Address') for item in json_output
@@ -214,10 +222,11 @@ class FloatingIpTests(common.NetworkTests):
# TODO(dtroyer): add more filter tests
- json_output = json.loads(self.openstack(
- 'floating ip show -f json ' +
- ip1
- ))
+ json_output = self.openstack(
+ 'floating ip show ' +
+ ip1,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output["id"])
self.assertEqual(
ip1,
@@ -251,11 +260,12 @@ class FloatingIpTests(common.NetworkTests):
self.ROUTER = uuid.uuid4().hex
self.PORT_NAME = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'floating ip create -f json ' +
+ json_output = self.openstack(
+ 'floating ip create ' +
'--description aaaa ' +
- self.EXTERNAL_NETWORK_NAME
- ))
+ self.EXTERNAL_NETWORK_NAME,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output["id"])
ip1 = json_output["id"]
self.addCleanup(self.openstack, 'floating ip delete ' + ip1)
@@ -264,19 +274,21 @@ class FloatingIpTests(common.NetworkTests):
json_output["description"],
)
- json_output = json.loads(self.openstack(
- 'port create -f json ' +
+ json_output = self.openstack(
+ 'port create ' +
'--network ' + self.PRIVATE_NETWORK_NAME + ' ' +
'--fixed-ip subnet=' + priv_subnet_id + ' ' +
- self.PORT_NAME
- ))
+ self.PORT_NAME,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output["id"])
port_id = json_output["id"]
- json_output = json.loads(self.openstack(
- 'router create -f json ' +
- self.ROUTER
- ))
+ json_output = self.openstack(
+ 'router create ' +
+ self.ROUTER,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output["id"])
self.addCleanup(self.openstack, 'router delete ' + self.ROUTER)
@@ -310,10 +322,11 @@ class FloatingIpTests(common.NetworkTests):
'floating ip unset --port ' + ip1,
)
- json_output = json.loads(self.openstack(
- 'floating ip show -f json ' +
- ip1
- ))
+ json_output = self.openstack(
+ 'floating ip show ' +
+ ip1,
+ parse_output=True,
+ )
self.assertEqual(
port_id,
diff --git a/openstackclient/tests/functional/network/v2/test_ip_availability.py b/openstackclient/tests/functional/network/v2/test_ip_availability.py
index 86a53c0c..6697ed36 100644
--- a/openstackclient/tests/functional/network/v2/test_ip_availability.py
+++ b/openstackclient/tests/functional/network/v2/test_ip_availability.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -31,12 +30,13 @@ class IPAvailabilityTests(common.NetworkTests):
'network create ' +
cls.NETWORK_NAME
)
- cmd_output = json.loads(cls.openstack(
- 'subnet create -f json ' +
+ cmd_output = cls.openstack(
+ 'subnet create ' +
'--network ' + cls.NETWORK_NAME + ' ' +
'--subnet-range 10.10.10.0/24 ' +
- cls.NAME
- ))
+ cls.NAME,
+ parse_output=True,
+ )
cls.assertOutput(cls.NAME, cmd_output['name'])
@classmethod
@@ -64,15 +64,17 @@ class IPAvailabilityTests(common.NetworkTests):
def test_ip_availability_list(self):
"""Test ip availability list"""
- cmd_output = json.loads(self.openstack(
- 'ip availability list -f json'))
+ cmd_output = self.openstack(
+ 'ip availability list',
+ parse_output=True,)
names = [x['Network Name'] for x in cmd_output]
self.assertIn(self.NETWORK_NAME, names)
def test_ip_availability_show(self):
"""Test ip availability show"""
- cmd_output = json.loads(self.openstack(
- 'ip availability show -f json ' + self.NETWORK_NAME))
+ cmd_output = self.openstack(
+ 'ip availability show ' + self.NETWORK_NAME,
+ parse_output=True,)
self.assertEqual(
self.NETWORK_NAME,
cmd_output['network_name'],
diff --git a/openstackclient/tests/functional/network/v2/test_l3_conntrack_helper.py b/openstackclient/tests/functional/network/v2/test_l3_conntrack_helper.py
index bbb9a7cd..2563bcf9 100644
--- a/openstackclient/tests/functional/network/v2/test_l3_conntrack_helper.py
+++ b/openstackclient/tests/functional/network/v2/test_l3_conntrack_helper.py
@@ -11,8 +11,6 @@
# under the License.
#
-
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -32,9 +30,10 @@ class L3ConntrackHelperTests(common.NetworkTests):
def _create_router(self):
router_name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'router create -f json ' + router_name
- ))
+ json_output = self.openstack(
+ 'router create ' + router_name,
+ parse_output=True,
+ )
self.assertIsNotNone(json_output['id'])
router_id = json_output['id']
self.addCleanup(self.openstack, 'router delete ' + router_id)
@@ -43,13 +42,17 @@ class L3ConntrackHelperTests(common.NetworkTests):
def _create_helpers(self, router_id, helpers):
created_helpers = []
for helper in helpers:
- output = json.loads(self.openstack(
+ output = self.openstack(
'network l3 conntrack helper create %(router)s '
- '--helper %(helper)s --protocol %(protocol)s --port %(port)s '
- '-f json' % {'router': router_id,
- 'helper': helper['helper'],
- 'protocol': helper['protocol'],
- 'port': helper['port']}))
+ '--helper %(helper)s --protocol %(protocol)s '
+ '--port %(port)s ' % {
+ 'router': router_id,
+ 'helper': helper['helper'],
+ 'protocol': helper['protocol'],
+ 'port': helper['port'],
+ },
+ parse_output=True,
+ )
self.assertEqual(helper['helper'], output['helper'])
self.assertEqual(helper['protocol'], output['protocol'])
self.assertEqual(helper['port'], output['port'])
@@ -105,9 +108,10 @@ class L3ConntrackHelperTests(common.NetworkTests):
]
router_id = self._create_router()
self._create_helpers(router_id, helpers)
- output = json.loads(self.openstack(
- 'network l3 conntrack helper list %s -f json ' % router_id
- ))
+ output = self.openstack(
+ 'network l3 conntrack helper list %s ' % router_id,
+ parse_output=True,
+ )
for ct in output:
self.assertEqual(router_id, ct.pop('Router ID'))
ct.pop("ID")
@@ -120,10 +124,14 @@ class L3ConntrackHelperTests(common.NetworkTests):
'port': 69}
router_id = self._create_router()
created_helper = self._create_helpers(router_id, [helper])[0]
- output = json.loads(self.openstack(
+ output = self.openstack(
'network l3 conntrack helper show %(router_id)s %(ct_id)s '
'-f json' % {
- 'router_id': router_id, 'ct_id': created_helper['id']}))
+ 'router_id': router_id,
+ 'ct_id': created_helper['id'],
+ },
+ parse_output=True,
+ )
self.assertEqual(helper['helper'], output['helper'])
self.assertEqual(helper['protocol'], output['protocol'])
self.assertEqual(helper['port'], output['port'])
@@ -136,10 +144,14 @@ class L3ConntrackHelperTests(common.NetworkTests):
'port': helper['port'] + 1})
self.assertOutput('', raw_output)
- output = json.loads(self.openstack(
+ output = self.openstack(
'network l3 conntrack helper show %(router_id)s %(ct_id)s '
'-f json' % {
- 'router_id': router_id, 'ct_id': created_helper['id']}))
+ 'router_id': router_id,
+ 'ct_id': created_helper['id'],
+ },
+ parse_output=True,
+ )
self.assertEqual(helper['port'] + 1, output['port'])
self.assertEqual(helper['helper'], output['helper'])
self.assertEqual(helper['protocol'], output['protocol'])
diff --git a/openstackclient/tests/functional/network/v2/test_local_ip.py b/openstackclient/tests/functional/network/v2/test_local_ip.py
index dd278e38..b5672b6d 100644
--- a/openstackclient/tests/functional/network/v2/test_local_ip.py
+++ b/openstackclient/tests/functional/network/v2/test_local_ip.py
@@ -11,9 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-#
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -33,20 +31,22 @@ class LocalIPTests(common.NetworkTests):
def test_local_ip_create_and_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'local ip create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'local ip create ' +
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name'],
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'local ip create -f json ' +
- name2
- ))
+ cmd_output = self.openstack(
+ 'local ip create ' +
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name'],
@@ -60,10 +60,10 @@ class LocalIPTests(common.NetworkTests):
def test_local_ip_list(self):
"""Test create, list filters, delete"""
# Get project IDs
- cmd_output = json.loads(self.openstack('token issue -f json '))
+ cmd_output = self.openstack('token issue ', parse_output=True)
auth_project_id = cmd_output['project_id']
- cmd_output = json.loads(self.openstack('project list -f json '))
+ cmd_output = self.openstack('project list ', parse_output=True)
admin_project_id = None
demo_project_id = None
for p in cmd_output:
@@ -82,10 +82,11 @@ class LocalIPTests(common.NetworkTests):
self.assertEqual(admin_project_id, auth_project_id)
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'local ip create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'local ip create ' +
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'local ip delete ' + name1)
self.assertEqual(
admin_project_id,
@@ -93,11 +94,12 @@ class LocalIPTests(common.NetworkTests):
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'local ip create -f json ' +
+ cmd_output = self.openstack(
+ 'local ip create ' +
'--project ' + demo_project_id +
- ' ' + name2
- ))
+ ' ' + name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'local ip delete ' + name2)
self.assertEqual(
demo_project_id,
@@ -105,27 +107,30 @@ class LocalIPTests(common.NetworkTests):
)
# Test list
- cmd_output = json.loads(self.openstack(
- 'local ip list -f json ',
- ))
+ cmd_output = self.openstack(
+ 'local ip list ',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --project
- cmd_output = json.loads(self.openstack(
- 'local ip list -f json ' +
- '--project ' + demo_project_id
- ))
+ cmd_output = self.openstack(
+ 'local ip list ' +
+ '--project ' + demo_project_id,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
# Test list --name
- cmd_output = json.loads(self.openstack(
- 'local ip list -f json ' +
- '--name ' + name1
- ))
+ cmd_output = self.openstack(
+ 'local ip list ' +
+ '--name ' + name1,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
@@ -134,11 +139,12 @@ class LocalIPTests(common.NetworkTests):
"""Tests create options, set, and show"""
name = uuid.uuid4().hex
newname = name + "_"
- cmd_output = json.loads(self.openstack(
- 'local ip create -f json ' +
+ cmd_output = self.openstack(
+ 'local ip create ' +
'--description aaaa ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'local ip delete ' + newname)
self.assertEqual(name, cmd_output['name'])
self.assertEqual('aaaa', cmd_output['description'])
@@ -153,9 +159,10 @@ class LocalIPTests(common.NetworkTests):
self.assertOutput('', raw_output)
# Show the updated local ip
- cmd_output = json.loads(self.openstack(
- 'local ip show -f json ' +
+ cmd_output = self.openstack(
+ 'local ip show ' +
newname,
- ))
+ parse_output=True,
+ )
self.assertEqual(newname, cmd_output['name'])
self.assertEqual('bbbb', cmd_output['description'])
diff --git a/openstackclient/tests/functional/network/v2/test_network.py b/openstackclient/tests/functional/network/v2/test_network.py
index f68b3143..20be2d1a 100644
--- a/openstackclient/tests/functional/network/v2/test_network.py
+++ b/openstackclient/tests/functional/network/v2/test_network.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -34,11 +33,12 @@ class NetworkTests(common.NetworkTagTests):
# Network create with minimum options
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' +
+ cmd_output = self.openstack(
+ 'network create ' +
'--subnet 1.2.3.4/28 ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network delete ' + name1)
self.assertIsNotNone(cmd_output["id"])
@@ -53,12 +53,13 @@ class NetworkTests(common.NetworkTagTests):
# Network create with more options
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' +
+ cmd_output = self.openstack(
+ 'network create ' +
'--subnet 1.2.4.4/28 ' +
'--share ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network delete ' + name2)
self.assertIsNotNone(cmd_output["id"])
@@ -80,10 +81,10 @@ class NetworkTests(common.NetworkTagTests):
self.skipTest("No Network service present")
# Get project IDs
- cmd_output = json.loads(self.openstack('token issue -f json '))
+ cmd_output = self.openstack('token issue ', parse_output=True)
auth_project_id = cmd_output['project_id']
- cmd_output = json.loads(self.openstack('project list -f json '))
+ cmd_output = self.openstack('project list ', parse_output=True)
admin_project_id = None
demo_project_id = None
for p in cmd_output:
@@ -103,10 +104,11 @@ class NetworkTests(common.NetworkTagTests):
# Network create with no options
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'network create ' +
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network delete ' + name1)
self.assertIsNotNone(cmd_output["id"])
@@ -133,11 +135,12 @@ class NetworkTests(common.NetworkTagTests):
# Network create with options
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' +
+ cmd_output = self.openstack(
+ 'network create ' +
'--project demo ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network delete ' + name2)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
@@ -155,11 +158,12 @@ class NetworkTests(common.NetworkTagTests):
self.skipTest("Skip Nova-net test")
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' +
+ cmd_output = self.openstack(
+ 'network create ' +
'--subnet 9.8.7.6/28 ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
name1,
@@ -167,11 +171,12 @@ class NetworkTests(common.NetworkTagTests):
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' +
+ cmd_output = self.openstack(
+ 'network create ' +
'--subnet 8.7.6.5/28 ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
name2,
@@ -184,11 +189,12 @@ class NetworkTests(common.NetworkTagTests):
self.skipTest("No Network service present")
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' +
+ cmd_output = self.openstack(
+ 'network create ' +
'--description aaaa ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
'aaaa',
@@ -196,11 +202,12 @@ class NetworkTests(common.NetworkTagTests):
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' +
+ cmd_output = self.openstack(
+ 'network create ' +
'--description bbbb ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
'bbbb',
@@ -217,11 +224,12 @@ class NetworkTests(common.NetworkTagTests):
network_options = '--description aaaa --no-default '
else:
network_options = '--subnet 3.4.5.6/28 '
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' +
+ cmd_output = self.openstack(
+ 'network create ' +
network_options +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network delete %s' % name1)
self.assertIsNotNone(cmd_output["id"])
if self.haz_network:
@@ -254,10 +262,11 @@ class NetworkTests(common.NetworkTagTests):
network_options = '--description bbbb --disable '
else:
network_options = '--subnet 4.5.6.7/28 '
- cmd_output = json.loads(self.openstack(
- 'network create -f json --share %s%s' %
- (network_options, name2)
- ))
+ cmd_output = self.openstack(
+ 'network create --share %s%s' %
+ (network_options, name2),
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network delete ' + name2)
self.assertIsNotNone(cmd_output["id"])
if self.haz_network:
@@ -280,54 +289,60 @@ class NetworkTests(common.NetworkTagTests):
self.assertTrue(cmd_output["share_address"])
# Test list
- cmd_output = json.loads(self.openstack(
- "network list -f json "
- ))
+ cmd_output = self.openstack(
+ "network list ",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --long
if self.haz_network:
- cmd_output = json.loads(self.openstack(
- "network list -f json --long"
- ))
+ cmd_output = self.openstack(
+ "network list --long",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --long --enable
if self.haz_network:
- cmd_output = json.loads(self.openstack(
- "network list -f json --enable --long"
- ))
+ cmd_output = self.openstack(
+ "network list --enable --long",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
# Test list --long --disable
if self.haz_network:
- cmd_output = json.loads(self.openstack(
- "network list -f json --disable --long"
- ))
+ cmd_output = self.openstack(
+ "network list --disable --long",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --share
if self.haz_network:
- cmd_output = json.loads(self.openstack(
- "network list -f json --share "
- ))
+ cmd_output = self.openstack(
+ "network list --share ",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, col_name)
self.assertIn(name2, col_name)
# Test list --no-share
if self.haz_network:
- cmd_output = json.loads(self.openstack(
- "network list -f json --no-share "
- ))
+ cmd_output = self.openstack(
+ "network list --no-share ",
+ parse_output=True,
+ )
col_name = [x["Name"] for x in cmd_output]
self.assertIn(name1, col_name)
self.assertNotIn(name2, col_name)
@@ -339,9 +354,10 @@ class NetworkTests(common.NetworkTagTests):
self.skipTest("No dhcp_agent_scheduler extension present")
name1 = uuid.uuid4().hex
- cmd_output1 = json.loads(self.openstack(
- 'network create -f json --description aaaa %s' % name1
- ))
+ cmd_output1 = self.openstack(
+ 'network create --description aaaa %s' % name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network delete %s' % name1)
@@ -349,9 +365,10 @@ class NetworkTests(common.NetworkTagTests):
network_id = cmd_output1['id']
# Get DHCP Agent ID
- cmd_output2 = json.loads(self.openstack(
- 'network agent list -f json --agent-type dhcp'
- ))
+ cmd_output2 = self.openstack(
+ 'network agent list --agent-type dhcp',
+ parse_output=True,
+ )
agent_id = cmd_output2[0]['ID']
# Add Agent to Network
@@ -360,9 +377,10 @@ class NetworkTests(common.NetworkTagTests):
)
# Test network list --agent
- cmd_output3 = json.loads(self.openstack(
- 'network list -f json --agent %s' % agent_id
- ))
+ cmd_output3 = self.openstack(
+ 'network list --agent %s' % agent_id,
+ parse_output=True,
+ )
# Cleanup
# Remove Agent from Network
@@ -383,16 +401,17 @@ class NetworkTests(common.NetworkTagTests):
self.skipTest("No Network service present")
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json '
+ cmd_output = self.openstack(
+ 'network create '
'--description aaaa '
'--enable '
'--no-share '
'--internal '
'--no-default '
'--enable-port-security %s' %
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network delete %s' % name)
self.assertIsNotNone(cmd_output["id"])
self.assertEqual(
@@ -425,9 +444,10 @@ class NetworkTests(common.NetworkTagTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'network show -f json ' + name
- ))
+ cmd_output = self.openstack(
+ 'network show ' + name,
+ parse_output=True,
+ )
self.assertEqual(
'cccc',
diff --git a/openstackclient/tests/functional/network/v2/test_network_agent.py b/openstackclient/tests/functional/network/v2/test_network_agent.py
index e5580945..d3e6353e 100644
--- a/openstackclient/tests/functional/network/v2/test_network_agent.py
+++ b/openstackclient/tests/functional/network/v2/test_network_agent.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -33,17 +32,19 @@ class NetworkAgentTests(common.NetworkTests):
"""
# agent list
- agent_list = json.loads(self.openstack(
- 'network agent list -f json'
- ))
+ agent_list = self.openstack(
+ 'network agent list',
+ parse_output=True,
+ )
self.assertIsNotNone(agent_list[0])
agent_ids = list([row["ID"] for row in agent_list])
# agent show
- cmd_output = json.loads(self.openstack(
- 'network agent show -f json %s' % agent_ids[0]
- ))
+ cmd_output = self.openstack(
+ 'network agent show %s' % agent_ids[0],
+ parse_output=True,
+ )
self.assertEqual(
agent_ids[0],
cmd_output['id'],
@@ -60,9 +61,10 @@ class NetworkAgentTests(common.NetworkTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'network agent show -f json %s' % agent_ids[0]
- ))
+ cmd_output = self.openstack(
+ 'network agent show %s' % agent_ids[0],
+ parse_output=True,
+ )
self.assertEqual(
False,
cmd_output['admin_state_up'],
@@ -73,9 +75,10 @@ class NetworkAgentTests(common.NetworkTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'network agent show -f json %s' % agent_ids[0]
- ))
+ cmd_output = self.openstack(
+ 'network agent show %s' % agent_ids[0],
+ parse_output=True,
+ )
self.assertEqual(
True,
cmd_output['admin_state_up'],
@@ -98,9 +101,10 @@ class NetworkAgentListTests(common.NetworkTests):
self.skipTest("No dhcp_agent_scheduler extension present")
name1 = uuid.uuid4().hex
- cmd_output1 = json.loads(self.openstack(
- 'network create -f json --description aaaa %s' % name1
- ))
+ cmd_output1 = self.openstack(
+ 'network create --description aaaa %s' % name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network delete %s' % name1)
@@ -108,9 +112,10 @@ class NetworkAgentListTests(common.NetworkTests):
network_id = cmd_output1['id']
# Get DHCP Agent ID
- cmd_output2 = json.loads(self.openstack(
- 'network agent list -f json --agent-type dhcp'
- ))
+ cmd_output2 = self.openstack(
+ 'network agent list --agent-type dhcp',
+ parse_output=True,
+ )
agent_id = cmd_output2[0]['ID']
# Add Agent to Network
@@ -120,9 +125,10 @@ class NetworkAgentListTests(common.NetworkTests):
)
# Test network agent list --network
- cmd_output3 = json.loads(self.openstack(
- 'network agent list -f json --network %s' % network_id
- ))
+ cmd_output3 = self.openstack(
+ 'network agent list --network %s' % network_id,
+ parse_output=True,
+ )
# Cleanup
# Remove Agent from Network
@@ -144,15 +150,17 @@ class NetworkAgentListTests(common.NetworkTests):
self.skipTest("No l3_agent_scheduler extension present")
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'router create -f json %s' % name))
+ cmd_output = self.openstack(
+ 'router create %s' % name,
+ parse_output=True,)
self.addCleanup(self.openstack, 'router delete %s' % name)
# Get router ID
router_id = cmd_output['id']
# Get l3 agent id
- cmd_output = json.loads(self.openstack(
- 'network agent list -f json --agent-type l3'))
+ cmd_output = self.openstack(
+ 'network agent list --agent-type l3',
+ parse_output=True,)
# Check at least one L3 agent is included in the response.
self.assertTrue(cmd_output)
@@ -163,8 +171,9 @@ class NetworkAgentListTests(common.NetworkTests):
'network agent add router --l3 %s %s' % (agent_id, router_id))
# Test router list --agent
- cmd_output = json.loads(self.openstack(
- 'network agent list -f json --router %s' % router_id))
+ cmd_output = self.openstack(
+ 'network agent list --router %s' % router_id,
+ parse_output=True,)
agent_ids = [x['ID'] for x in cmd_output]
self.assertIn(agent_id, agent_ids)
@@ -172,7 +181,8 @@ class NetworkAgentListTests(common.NetworkTests):
# Remove router from agent
self.openstack(
'network agent remove router --l3 %s %s' % (agent_id, router_id))
- cmd_output = json.loads(self.openstack(
- 'network agent list -f json --router %s' % router_id))
+ cmd_output = self.openstack(
+ 'network agent list --router %s' % router_id,
+ parse_output=True,)
agent_ids = [x['ID'] for x in cmd_output]
self.assertNotIn(agent_id, agent_ids)
diff --git a/openstackclient/tests/functional/network/v2/test_network_flavor.py b/openstackclient/tests/functional/network/v2/test_network_flavor.py
index cf68a096..2ac0daef 100644
--- a/openstackclient/tests/functional/network/v2/test_network_flavor.py
+++ b/openstackclient/tests/functional/network/v2/test_network_flavor.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -30,17 +28,19 @@ class NetworkFlavorTests(common.NetworkTests):
"""Test add and remove network flavor to/from profile"""
# Create Flavor
name1 = uuid.uuid4().hex
- cmd_output1 = json.loads(self.openstack(
- 'network flavor create -f json --description testdescription '
+ cmd_output1 = self.openstack(
+ 'network flavor create --description testdescription '
'--enable --service-type L3_ROUTER_NAT ' + name1,
- ))
+ parse_output=True,
+ )
flavor_id = cmd_output1.get('id')
# Create Service Flavor
- cmd_output2 = json.loads(self.openstack(
- 'network flavor profile create -f json --description '
- 'fakedescription --enable --metainfo Extrainfo'
- ))
+ cmd_output2 = self.openstack(
+ 'network flavor profile create --description '
+ 'fakedescription --enable --metainfo Extrainfo',
+ parse_output=True,
+ )
service_profile_id = cmd_output2.get('id')
self.addCleanup(self.openstack, 'network flavor delete %s' %
@@ -53,9 +53,10 @@ class NetworkFlavorTests(common.NetworkTests):
flavor_id + ' ' + service_profile_id
)
- cmd_output4 = json.loads(self.openstack(
- 'network flavor show -f json ' + flavor_id
- ))
+ cmd_output4 = self.openstack(
+ 'network flavor show ' + flavor_id,
+ parse_output=True,
+ )
service_profile_ids1 = cmd_output4.get('service_profile_ids')
# Assert
@@ -68,9 +69,10 @@ class NetworkFlavorTests(common.NetworkTests):
flavor_id + ' ' + service_profile_id
)
- cmd_output6 = json.loads(self.openstack(
- 'network flavor show -f json ' + flavor_id
- ))
+ cmd_output6 = self.openstack(
+ 'network flavor show ' + flavor_id,
+ parse_output=True,
+ )
service_profile_ids2 = cmd_output6.get('service_profile_ids')
# Assert
@@ -79,10 +81,11 @@ class NetworkFlavorTests(common.NetworkTests):
def test_network_flavor_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network flavor create -f json --description testdescription '
+ cmd_output = self.openstack(
+ 'network flavor create --description testdescription '
'--enable --service-type L3_ROUTER_NAT ' + name1,
- ))
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name'],
@@ -94,10 +97,11 @@ class NetworkFlavorTests(common.NetworkTests):
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network flavor create -f json --description testdescription1 '
+ cmd_output = self.openstack(
+ 'network flavor create --description testdescription1 '
'--disable --service-type L3_ROUTER_NAT ' + name2,
- ))
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name'],
@@ -114,10 +118,11 @@ class NetworkFlavorTests(common.NetworkTests):
def test_network_flavor_list(self):
"""Test create defaults, list filters, delete"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network flavor create -f json --description testdescription '
+ cmd_output = self.openstack(
+ 'network flavor create --description testdescription '
'--enable --service-type L3_ROUTER_NAT ' + name1,
- ))
+ parse_output=True,
+ )
self.addCleanup(self.openstack, "network flavor delete " + name1)
self.assertEqual(
name1,
@@ -133,10 +138,11 @@ class NetworkFlavorTests(common.NetworkTests):
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network flavor create -f json --description testdescription1 '
+ cmd_output = self.openstack(
+ 'network flavor create --description testdescription1 '
'--disable --service-type L3_ROUTER_NAT ' + name2,
- ))
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name'],
@@ -152,8 +158,9 @@ class NetworkFlavorTests(common.NetworkTests):
self.addCleanup(self.openstack, "network flavor delete " + name2)
# Test list
- cmd_output = json.loads(self.openstack(
- 'network flavor list -f json ',))
+ cmd_output = self.openstack(
+ 'network flavor list ',
+ parse_output=True,)
self.assertIsNotNone(cmd_output)
name_list = [item.get('Name') for item in cmd_output]
@@ -164,10 +171,11 @@ class NetworkFlavorTests(common.NetworkTests):
"""Tests create options, set, show, delete"""
name = uuid.uuid4().hex
newname = name + "_"
- cmd_output = json.loads(self.openstack(
- 'network flavor create -f json --description testdescription '
+ cmd_output = self.openstack(
+ 'network flavor create --description testdescription '
'--disable --service-type L3_ROUTER_NAT ' + name,
- ))
+ parse_output=True,
+ )
self.addCleanup(self.openstack, "network flavor delete " + newname)
self.assertEqual(
name,
@@ -187,8 +195,9 @@ class NetworkFlavorTests(common.NetworkTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'network flavor show -f json ' + newname,))
+ cmd_output = self.openstack(
+ 'network flavor show ' + newname,
+ parse_output=True,)
self.assertEqual(
newname,
cmd_output['name'],
@@ -205,13 +214,15 @@ class NetworkFlavorTests(common.NetworkTests):
def test_network_flavor_show(self):
"""Test show network flavor"""
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network flavor create -f json --description testdescription '
+ cmd_output = self.openstack(
+ 'network flavor create --description testdescription '
'--disable --service-type L3_ROUTER_NAT ' + name,
- ))
+ parse_output=True,
+ )
self.addCleanup(self.openstack, "network flavor delete " + name)
- cmd_output = json.loads(self.openstack(
- 'network flavor show -f json ' + name,))
+ cmd_output = self.openstack(
+ 'network flavor show ' + name,
+ parse_output=True,)
self.assertEqual(
name,
cmd_output['name'],
diff --git a/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py b/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py
index 5b5ec926..60fd949b 100644
--- a/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py
+++ b/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from openstackclient.tests.functional.network.v2 import common
@@ -28,11 +26,12 @@ class NetworkFlavorProfileTests(common.NetworkTests):
self.skipTest("No Network service present")
def test_network_flavor_profile_create(self):
- json_output = json.loads(self.openstack(
- 'network flavor profile create -f json ' +
+ json_output = self.openstack(
+ 'network flavor profile create ' +
'--description ' + self.DESCRIPTION + ' ' +
- '--enable --metainfo ' + self.METAINFO
- ))
+ '--enable --metainfo ' + self.METAINFO,
+ parse_output=True,
+ )
ID = json_output.get('id')
self.assertIsNotNone(ID)
self.assertTrue(json_output.get('enabled'))
@@ -50,12 +49,13 @@ class NetworkFlavorProfileTests(common.NetworkTests):
self.assertOutput('', raw_output)
def test_network_flavor_profile_list(self):
- json_output = json.loads(self.openstack(
- 'network flavor profile create -f json ' +
+ json_output = self.openstack(
+ 'network flavor profile create ' +
'--description ' + self.DESCRIPTION + ' ' +
'--enable ' +
- '--metainfo ' + self.METAINFO
- ))
+ '--metainfo ' + self.METAINFO,
+ parse_output=True,
+ )
ID1 = json_output.get('id')
self.assertIsNotNone(ID1)
self.assertTrue(json_output.get('enabled'))
@@ -68,12 +68,13 @@ class NetworkFlavorProfileTests(common.NetworkTests):
json_output.get('meta_info'),
)
- json_output = json.loads(self.openstack(
- 'network flavor profile create -f json ' +
+ json_output = self.openstack(
+ 'network flavor profile create ' +
'--description ' + self.DESCRIPTION + ' ' +
'--disable ' +
- '--metainfo ' + self.METAINFO
- ))
+ '--metainfo ' + self.METAINFO,
+ parse_output=True,
+ )
ID2 = json_output.get('id')
self.assertIsNotNone(ID2)
self.assertFalse(json_output.get('enabled'))
@@ -87,9 +88,10 @@ class NetworkFlavorProfileTests(common.NetworkTests):
)
# Test list
- json_output = json.loads(self.openstack(
- 'network flavor profile list -f json'
- ))
+ json_output = self.openstack(
+ 'network flavor profile list',
+ parse_output=True,
+ )
self.assertIsNotNone(json_output)
id_list = [item.get('ID') for item in json_output]
@@ -103,12 +105,13 @@ class NetworkFlavorProfileTests(common.NetworkTests):
self.assertOutput('', raw_output)
def test_network_flavor_profile_set(self):
- json_output_1 = json.loads(self.openstack(
- 'network flavor profile create -f json ' +
+ json_output_1 = self.openstack(
+ 'network flavor profile create ' +
'--description ' + self.DESCRIPTION + ' ' +
'--enable ' +
- '--metainfo ' + self.METAINFO
- ))
+ '--metainfo ' + self.METAINFO,
+ parse_output=True,
+ )
ID = json_output_1.get('id')
self.assertIsNotNone(ID)
self.assertTrue(json_output_1.get('enabled'))
@@ -123,9 +126,10 @@ class NetworkFlavorProfileTests(common.NetworkTests):
self.openstack('network flavor profile set --disable ' + ID)
- json_output = json.loads(self.openstack(
- 'network flavor profile show -f json ' + ID
- ))
+ json_output = self.openstack(
+ 'network flavor profile show ' + ID,
+ parse_output=True,
+ )
self.assertFalse(json_output.get('enabled'))
self.assertEqual(
'fakedescription',
@@ -141,17 +145,19 @@ class NetworkFlavorProfileTests(common.NetworkTests):
self.assertOutput('', raw_output)
def test_network_flavor_profile_show(self):
- json_output_1 = json.loads(self.openstack(
- 'network flavor profile create -f json ' +
+ json_output_1 = self.openstack(
+ 'network flavor profile create ' +
'--description ' + self.DESCRIPTION + ' ' +
'--enable ' +
- '--metainfo ' + self.METAINFO
- ))
+ '--metainfo ' + self.METAINFO,
+ parse_output=True,
+ )
ID = json_output_1.get('id')
self.assertIsNotNone(ID)
- json_output = json.loads(self.openstack(
- 'network flavor profile show -f json ' + ID
- ))
+ json_output = self.openstack(
+ 'network flavor profile show ' + ID,
+ parse_output=True,
+ )
self.assertEqual(
ID,
json_output["id"],
diff --git a/openstackclient/tests/functional/network/v2/test_network_meter.py b/openstackclient/tests/functional/network/v2/test_network_meter.py
index 0a8b89ca..ea9d289f 100644
--- a/openstackclient/tests/functional/network/v2/test_network_meter.py
+++ b/openstackclient/tests/functional/network/v2/test_network_meter.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -38,11 +37,12 @@ class TestMeter(common.NetworkTests):
name1 = uuid.uuid4().hex
name2 = uuid.uuid4().hex
description = 'fakedescription'
- json_output = json.loads(self.openstack(
- 'network meter create -f json ' +
+ json_output = self.openstack(
+ 'network meter create ' +
' --description ' + description + ' ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
json_output.get('name'),
@@ -54,11 +54,12 @@ class TestMeter(common.NetworkTests):
json_output.get('description'),
)
- json_output_2 = json.loads(self.openstack(
- 'network meter create -f json ' +
+ json_output_2 = self.openstack(
+ 'network meter create ' +
'--description ' + description + ' ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
json_output_2.get('name'),
@@ -78,12 +79,13 @@ class TestMeter(common.NetworkTests):
def test_meter_list(self):
"""Test create, list filters, delete"""
name1 = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'network meter create -f json ' +
+ json_output = self.openstack(
+ 'network meter create ' +
'--description Test1 ' +
'--share ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'network meter delete ' + name1
@@ -96,12 +98,13 @@ class TestMeter(common.NetworkTests):
self.assertTrue(json_output.get('shared'))
name2 = uuid.uuid4().hex
- json_output_2 = json.loads(self.openstack(
- 'network meter create -f json ' +
+ json_output_2 = self.openstack(
+ 'network meter create ' +
'--description Test2 ' +
'--no-share ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'network meter delete ' + name2)
self.assertEqual(
@@ -112,7 +115,7 @@ class TestMeter(common.NetworkTests):
json_output_2.get('shared'),
)
- raw_output = json.loads(self.openstack('network meter list -f json'))
+ raw_output = self.openstack('network meter list', parse_output=True)
name_list = [item.get('Name') for item in raw_output]
self.assertIn(name1, name_list)
self.assertIn(name2, name_list)
@@ -121,18 +124,20 @@ class TestMeter(common.NetworkTests):
"""Test create, show, delete"""
name1 = uuid.uuid4().hex
description = 'fakedescription'
- json_output = json.loads(self.openstack(
- 'network meter create -f json ' +
+ json_output = self.openstack(
+ 'network meter create ' +
' --description ' + description + ' ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
meter_id = json_output.get('id')
self.addCleanup(self.openstack, 'network meter delete ' + name1)
# Test show with ID
- json_output = json.loads(self.openstack(
- 'network meter show -f json ' + meter_id
- ))
+ json_output = self.openstack(
+ 'network meter show ' + meter_id,
+ parse_output=True,
+ )
self.assertFalse(json_output.get('shared'))
self.assertEqual(
'fakedescription',
@@ -144,9 +149,10 @@ class TestMeter(common.NetworkTests):
)
# Test show with name
- json_output = json.loads(self.openstack(
- 'network meter show -f json ' + name1
- ))
+ json_output = self.openstack(
+ 'network meter show ' + name1,
+ parse_output=True,
+ )
self.assertEqual(
meter_id,
json_output.get('id'),
diff --git a/openstackclient/tests/functional/network/v2/test_network_meter_rule.py b/openstackclient/tests/functional/network/v2/test_network_meter_rule.py
index 31bc0845..ae1bb904 100644
--- a/openstackclient/tests/functional/network/v2/test_network_meter_rule.py
+++ b/openstackclient/tests/functional/network/v2/test_network_meter_rule.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -31,10 +30,11 @@ class TestMeterRule(common.NetworkTests):
if cls.haz_network:
cls.METER_NAME = uuid.uuid4().hex
- json_output = json.loads(cls.openstack(
- 'network meter create -f json ' +
- cls.METER_NAME
- ))
+ json_output = cls.openstack(
+ 'network meter create ' +
+ cls.METER_NAME,
+ parse_output=True,
+ )
cls.METER_ID = json_output.get('id')
@classmethod
@@ -57,11 +57,12 @@ class TestMeterRule(common.NetworkTests):
def test_meter_rule_delete(self):
"""test create, delete"""
- json_output = json.loads(self.openstack(
- 'network meter rule create -f json ' +
+ json_output = self.openstack(
+ 'network meter rule create ' +
'--remote-ip-prefix 10.0.0.0/8 ' +
- self.METER_ID
- ))
+ self.METER_ID,
+ parse_output=True,
+ )
rule_id = json_output.get('id')
re_ip = json_output.get('remote_ip_prefix')
@@ -77,11 +78,12 @@ class TestMeterRule(common.NetworkTests):
def test_meter_rule_list(self):
"""Test create, list, delete"""
- json_output = json.loads(self.openstack(
- 'network meter rule create -f json ' +
+ json_output = self.openstack(
+ 'network meter rule create ' +
'--remote-ip-prefix 10.0.0.0/8 ' +
- self.METER_ID
- ))
+ self.METER_ID,
+ parse_output=True,
+ )
rule_id_1 = json_output.get('id')
self.addCleanup(
self.openstack,
@@ -92,11 +94,12 @@ class TestMeterRule(common.NetworkTests):
json_output.get('remote_ip_prefix')
)
- json_output_1 = json.loads(self.openstack(
- 'network meter rule create -f json ' +
+ json_output_1 = self.openstack(
+ 'network meter rule create ' +
'--remote-ip-prefix 11.0.0.0/8 ' +
- self.METER_ID
- ))
+ self.METER_ID,
+ parse_output=True,
+ )
rule_id_2 = json_output_1.get('id')
self.addCleanup(
self.openstack,
@@ -107,9 +110,10 @@ class TestMeterRule(common.NetworkTests):
json_output_1.get('remote_ip_prefix')
)
- json_output = json.loads(self.openstack(
- 'network meter rule list -f json'
- ))
+ json_output = self.openstack(
+ 'network meter rule list',
+ parse_output=True,
+ )
rule_id_list = [item.get('ID') for item in json_output]
ip_prefix_list = [item.get('Remote IP Prefix') for item in json_output]
self.assertIn(rule_id_1, rule_id_list)
@@ -119,12 +123,13 @@ class TestMeterRule(common.NetworkTests):
def test_meter_rule_show(self):
"""Test create, show, delete"""
- json_output = json.loads(self.openstack(
- 'network meter rule create -f json ' +
+ json_output = self.openstack(
+ 'network meter rule create ' +
'--remote-ip-prefix 10.0.0.0/8 ' +
'--egress ' +
- self.METER_ID
- ))
+ self.METER_ID,
+ parse_output=True,
+ )
rule_id = json_output.get('id')
self.assertEqual(
@@ -132,9 +137,10 @@ class TestMeterRule(common.NetworkTests):
json_output.get('direction')
)
- json_output = json.loads(self.openstack(
- 'network meter rule show -f json ' + rule_id
- ))
+ json_output = self.openstack(
+ 'network meter rule show ' + rule_id,
+ parse_output=True,
+ )
self.assertEqual(
'10.0.0.0/8',
json_output.get('remote_ip_prefix')
diff --git a/openstackclient/tests/functional/network/v2/test_network_ndp_proxy.py b/openstackclient/tests/functional/network/v2/test_network_ndp_proxy.py
new file mode 100644
index 00000000..588b1f56
--- /dev/null
+++ b/openstackclient/tests/functional/network/v2/test_network_ndp_proxy.py
@@ -0,0 +1,217 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from openstackclient.tests.functional.network.v2 import common
+
+
+class L3NDPProxyTests(common.NetworkTests):
+
+ def setUp(self):
+ super().setUp()
+ # Nothing in this class works with Nova Network
+ if not self.haz_network:
+ self.skipTest("No Network service present")
+ if not self.is_extension_enabled('l3-ndp-proxy'):
+ self.skipTest("No l3-ndp-proxy extension present")
+
+ self.ROT_NAME = self.getUniqueString()
+ self.EXT_NET_NAME = self.getUniqueString()
+ self.EXT_SUB_NAME = self.getUniqueString()
+ self.INT_NET_NAME = self.getUniqueString()
+ self.INT_SUB_NAME = self.getUniqueString()
+ self.INT_PORT_NAME = self.getUniqueString()
+ self.ADDR_SCOPE_NAME = self.getUniqueString()
+ self.SUBNET_P_NAME = self.getUniqueString()
+ self.created_ndp_proxies = []
+
+ json_output = self.openstack(
+ 'address scope create --ip-version 6 '
+ '%(address_s_name)s' % {'address_s_name': self.ADDR_SCOPE_NAME},
+ parse_output=True,
+ )
+ self.assertIsNotNone(json_output['id'])
+ self.ADDRESS_SCOPE_ID = json_output['id']
+ json_output = self.openstack(
+ 'subnet pool create %(subnet_p_name)s '
+ '--address-scope %(address_scope)s '
+ '--pool-prefix 2001:db8::/96 --default-prefix-length 112' % {
+ 'subnet_p_name': self.SUBNET_P_NAME,
+ 'address_scope': self.ADDRESS_SCOPE_ID,
+ },
+ parse_output=True,
+ )
+ self.assertIsNotNone(json_output['id'])
+ self.SUBNET_POOL_ID = json_output['id']
+ json_output = self.openstack(
+ 'network create --external ' + self.EXT_NET_NAME,
+ parse_output=True,
+ )
+ self.assertIsNotNone(json_output['id'])
+ self.EXT_NET_ID = json_output['id']
+ json_output = self.openstack(
+ 'subnet create --ip-version 6 --subnet-pool '
+ '%(subnet_pool)s --network %(net_id)s %(sub_name)s' % {
+ 'subnet_pool': self.SUBNET_POOL_ID,
+ 'net_id': self.EXT_NET_ID,
+ 'sub_name': self.EXT_SUB_NAME,
+ },
+ parse_output=True,
+ )
+ self.assertIsNotNone(json_output['id'])
+ self.EXT_SUB_ID = json_output['id']
+ json_output = self.openstack(
+ 'router create ' + self.ROT_NAME,
+ parse_output=True,
+ )
+ self.assertIsNotNone(json_output['id'])
+ self.ROT_ID = json_output['id']
+ output = self.openstack(
+ 'router set %(router_id)s --external-gateway %(net_id)s' % {
+ 'router_id': self.ROT_ID,
+ 'net_id': self.EXT_NET_ID})
+ self.assertEqual('', output)
+ output = self.openstack('router set --enable-ndp-proxy ' + self.ROT_ID)
+ self.assertEqual('', output)
+ json_output = self.openstack(
+ 'router show -c enable_ndp_proxy ' + self.ROT_ID,
+ parse_output=True,
+ )
+ self.assertTrue(json_output['enable_ndp_proxy'])
+ json_output = self.openstack(
+ 'network create ' + self.INT_NET_NAME,
+ parse_output=True,
+ )
+ self.assertIsNotNone(json_output['id'])
+ self.INT_NET_ID = json_output['id']
+ json_output = self.openstack(
+ 'subnet create --ip-version 6 --subnet-pool '
+ '%(subnet_pool)s --network %(net_id)s %(sub_name)s' % {
+ 'subnet_pool': self.SUBNET_POOL_ID,
+ 'net_id': self.INT_NET_ID,
+ 'sub_name': self.INT_SUB_NAME,
+ },
+ parse_output=True,
+ )
+ self.assertIsNotNone(json_output['id'])
+ self.INT_SUB_ID = json_output['id']
+ json_output = self.openstack(
+ 'port create --network %(net_id)s '
+ '%(port_name)s' % {
+ 'net_id': self.INT_NET_ID,
+ 'port_name': self.INT_PORT_NAME,
+ },
+ parse_output=True,
+ )
+ self.assertIsNotNone(json_output['id'])
+ self.INT_PORT_ID = json_output['id']
+ self.INT_PORT_ADDRESS = json_output['fixed_ips'][0]['ip_address']
+ output = self.openstack(
+ 'router add subnet ' + self.ROT_ID + ' ' + self.INT_SUB_ID)
+ self.assertEqual('', output)
+
+ def tearDown(self):
+ for ndp_proxy in self.created_ndp_proxies:
+ output = self.openstack(
+ 'router ndp proxy delete ' + ndp_proxy['id'])
+ self.assertEqual('', output)
+ output = self.openstack('port delete ' + self.INT_PORT_ID)
+ self.assertEqual('', output)
+ output = self.openstack(
+ 'router set --disable-ndp-proxy ' + self.ROT_ID)
+ self.assertEqual('', output)
+ output = self.openstack(
+ 'router remove subnet ' + self.ROT_ID + ' ' + self.INT_SUB_ID)
+ self.assertEqual('', output)
+ output = self.openstack('subnet delete ' + self.INT_SUB_ID)
+ self.assertEqual('', output)
+ output = self.openstack('network delete ' + self.INT_NET_ID)
+ self.assertEqual('', output)
+ output = self.openstack(
+ 'router unset ' + self.ROT_ID + ' ' + '--external-gateway')
+ self.assertEqual('', output)
+ output = self.openstack('router delete ' + self.ROT_ID)
+ self.assertEqual('', output)
+ output = self.openstack('subnet delete ' + self.EXT_SUB_ID)
+ self.assertEqual('', output)
+ output = self.openstack('network delete ' + self.EXT_NET_ID)
+ self.assertEqual('', output)
+ output = self.openstack('subnet pool delete ' + self.SUBNET_POOL_ID)
+ self.assertEqual('', output)
+ output = self.openstack('address scope delete ' +
+ self.ADDRESS_SCOPE_ID)
+ self.assertEqual('', output)
+ super().tearDown()
+
+ def _create_ndp_proxies(self, ndp_proxies):
+ for ndp_proxy in ndp_proxies:
+ output = self.openstack(
+ 'router ndp proxy create %(router)s --name %(name)s '
+ '--port %(port)s --ip-address %(address)s' % {
+ 'router': ndp_proxy['router_id'],
+ 'name': ndp_proxy['name'],
+ 'port': ndp_proxy['port_id'],
+ 'address': ndp_proxy['address'],
+ },
+ parse_output=True,
+ )
+ self.assertEqual(ndp_proxy['router_id'], output['router_id'])
+ self.assertEqual(ndp_proxy['port_id'], output['port_id'])
+ self.assertEqual(ndp_proxy['address'], output['ip_address'])
+ self.created_ndp_proxies.append(output)
+
+ def test_create_ndp_proxy(self):
+ ndp_proxies = [
+ {
+ 'name': self.getUniqueString(),
+ 'router_id': self.ROT_ID,
+ 'port_id': self.INT_PORT_ID,
+ 'address': self.INT_PORT_ADDRESS
+ }
+ ]
+ self._create_ndp_proxies(ndp_proxies)
+
+ def test_ndp_proxy_list(self):
+ ndp_proxies = {
+ 'name': self.getUniqueString(),
+ 'router_id': self.ROT_ID,
+ 'port_id': self.INT_PORT_ID,
+ 'address': self.INT_PORT_ADDRESS}
+ self._create_ndp_proxies([ndp_proxies])
+ ndp_proxy = self.openstack(
+ 'router ndp proxy list',
+ parse_output=True,)[0]
+ self.assertEqual(ndp_proxies['name'], ndp_proxy['Name'])
+ self.assertEqual(ndp_proxies['router_id'], ndp_proxy['Router ID'])
+ self.assertEqual(ndp_proxies['address'], ndp_proxy['IP Address'])
+
+ def test_ndp_proxy_set_and_show(self):
+ ndp_proxies = {
+ 'name': self.getUniqueString(),
+ 'router_id': self.ROT_ID,
+ 'port_id': self.INT_PORT_ID,
+ 'address': self.INT_PORT_ADDRESS}
+ description = 'balala'
+ self._create_ndp_proxies([ndp_proxies])
+ ndp_proxy_id = self.created_ndp_proxies[0]['id']
+ output = self.openstack(
+ 'router ndp proxy set --description %s %s' % (
+ description, ndp_proxy_id))
+ self.assertEqual('', output)
+ json_output = self.openstack(
+ 'router ndp proxy show ' + ndp_proxy_id,
+ parse_output=True,
+ )
+ self.assertEqual(ndp_proxies['name'], json_output['name'])
+ self.assertEqual(ndp_proxies['router_id'], json_output['router_id'])
+ self.assertEqual(ndp_proxies['port_id'], json_output['port_id'])
+ self.assertEqual(ndp_proxies['address'], json_output['ip_address'])
+ self.assertEqual(description, json_output['description'])
diff --git a/openstackclient/tests/functional/network/v2/test_network_qos_policy.py b/openstackclient/tests/functional/network/v2/test_network_qos_policy.py
index 02e64028..b603cf1f 100644
--- a/openstackclient/tests/functional/network/v2/test_network_qos_policy.py
+++ b/openstackclient/tests/functional/network/v2/test_network_qos_policy.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -31,7 +30,7 @@ class NetworkQosPolicyTests(common.NetworkTests):
def test_qos_rule_create_delete(self):
# This is to check the output of qos policy delete
policy_name = uuid.uuid4().hex
- self.openstack('network qos policy create -f json ' + policy_name)
+ self.openstack('network qos policy create ' + policy_name)
raw_output = self.openstack(
'network qos policy delete ' +
policy_name
@@ -40,25 +39,28 @@ class NetworkQosPolicyTests(common.NetworkTests):
def test_qos_policy_list(self):
policy_name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'network qos policy create -f json ' +
- policy_name
- ))
+ json_output = self.openstack(
+ 'network qos policy create ' +
+ policy_name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack,
'network qos policy delete ' + policy_name)
self.assertEqual(policy_name, json_output['name'])
- json_output = json.loads(self.openstack(
- 'network qos policy list -f json'
- ))
+ json_output = self.openstack(
+ 'network qos policy list',
+ parse_output=True,
+ )
self.assertIn(policy_name, [p['Name'] for p in json_output])
def test_qos_policy_set(self):
policy_name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'network qos policy create -f json ' +
- policy_name
- ))
+ json_output = self.openstack(
+ 'network qos policy create ' +
+ policy_name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack,
'network qos policy delete ' + policy_name)
self.assertEqual(policy_name, json_output['name'])
@@ -69,10 +71,11 @@ class NetworkQosPolicyTests(common.NetworkTests):
policy_name
)
- json_output = json.loads(self.openstack(
- 'network qos policy show -f json ' +
- policy_name
- ))
+ json_output = self.openstack(
+ 'network qos policy show ' +
+ policy_name,
+ parse_output=True,
+ )
self.assertTrue(json_output['shared'])
self.openstack(
@@ -81,9 +84,10 @@ class NetworkQosPolicyTests(common.NetworkTests):
'--no-default ' +
policy_name
)
- json_output = json.loads(self.openstack(
- 'network qos policy show -f json ' +
- policy_name
- ))
+ json_output = self.openstack(
+ 'network qos policy show ' +
+ policy_name,
+ parse_output=True,
+ )
self.assertFalse(json_output['shared'])
self.assertFalse(json_output['is_default'])
diff --git a/openstackclient/tests/functional/network/v2/test_network_qos_rule.py b/openstackclient/tests/functional/network/v2/test_network_qos_rule.py
index 98e588e8..0fe1854b 100644
--- a/openstackclient/tests/functional/network/v2/test_network_qos_rule.py
+++ b/openstackclient/tests/functional/network/v2/test_network_qos_rule.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -35,13 +34,14 @@ class NetworkQosRuleTestsMinimumBandwidth(common.NetworkTests):
)
self.addCleanup(self.openstack,
'network qos policy delete %s' % self.QOS_POLICY_NAME)
- cmd_output = json.loads(self.openstack(
- 'network qos rule create -f json '
+ cmd_output = self.openstack(
+ 'network qos rule create '
'--type minimum-bandwidth '
'--min-kbps 2800 '
'--egress %s' %
- self.QOS_POLICY_NAME
- ))
+ self.QOS_POLICY_NAME,
+ parse_output=True,
+ )
self.RULE_ID = cmd_output['id']
self.addCleanup(self.openstack,
'network qos rule delete %s %s' %
@@ -51,40 +51,120 @@ class NetworkQosRuleTestsMinimumBandwidth(common.NetworkTests):
def test_qos_rule_create_delete(self):
# This is to check the output of qos rule delete
policy_name = uuid.uuid4().hex
- self.openstack('network qos policy create -f json %s' % policy_name)
+ self.openstack('network qos policy create %s' % policy_name)
self.addCleanup(self.openstack,
'network qos policy delete %s' % policy_name)
- rule = json.loads(self.openstack(
- 'network qos rule create -f json '
+ rule = self.openstack(
+ 'network qos rule create '
'--type minimum-bandwidth '
'--min-kbps 2800 '
- '--egress %s' % policy_name
- ))
+ '--egress %s' % policy_name,
+ parse_output=True,
+ )
raw_output = self.openstack(
'network qos rule delete %s %s' %
(policy_name, rule['id']))
self.assertEqual('', raw_output)
def test_qos_rule_list(self):
- cmd_output = json.loads(self.openstack(
- 'network qos rule list -f json %s' % self.QOS_POLICY_NAME))
+ cmd_output = self.openstack(
+ 'network qos rule list %s' % self.QOS_POLICY_NAME,
+ parse_output=True,)
self.assertIn(self.RULE_ID, [rule['ID'] for rule in cmd_output])
def test_qos_rule_show(self):
- cmd_output = json.loads(self.openstack(
- 'network qos rule show -f json %s %s' %
- (self.QOS_POLICY_NAME, self.RULE_ID)))
+ cmd_output = self.openstack(
+ 'network qos rule show %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID),
+ parse_output=True,
+ )
self.assertEqual(self.RULE_ID, cmd_output['id'])
def test_qos_rule_set(self):
self.openstack('network qos rule set --min-kbps 7500 %s %s' %
(self.QOS_POLICY_NAME, self.RULE_ID))
- cmd_output = json.loads(self.openstack(
- 'network qos rule show -f json %s %s' %
- (self.QOS_POLICY_NAME, self.RULE_ID)))
+ cmd_output = self.openstack(
+ 'network qos rule show %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID),
+ parse_output=True,
+ )
self.assertEqual(7500, cmd_output['min_kbps'])
+class NetworkQosRuleTestsMinimumPacketRate(common.NetworkTests):
+ """Functional tests for QoS minimum packet rate rule"""
+
+ def setUp(self):
+ super(NetworkQosRuleTestsMinimumPacketRate, self).setUp()
+ # Nothing in this class works with Nova Network
+ if not self.haz_network:
+ self.skipTest("No Network service present")
+
+ self.QOS_POLICY_NAME = 'qos_policy_%s' % uuid.uuid4().hex
+
+ self.openstack(
+ 'network qos policy create %s' % self.QOS_POLICY_NAME
+ )
+ self.addCleanup(self.openstack,
+ 'network qos policy delete %s' % self.QOS_POLICY_NAME)
+ cmd_output = self.openstack(
+ 'network qos rule create '
+ '--type minimum-packet-rate '
+ '--min-kpps 2800 '
+ '--egress %s' %
+ self.QOS_POLICY_NAME,
+ parse_output=True,
+ )
+ self.RULE_ID = cmd_output['id']
+ self.addCleanup(self.openstack,
+ 'network qos rule delete %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID))
+ self.assertTrue(self.RULE_ID)
+
+ def test_qos_rule_create_delete(self):
+ # This is to check the output of qos rule delete
+ policy_name = uuid.uuid4().hex
+ self.openstack('network qos policy create %s' % policy_name)
+ self.addCleanup(self.openstack,
+ 'network qos policy delete %s' % policy_name)
+ rule = self.openstack(
+ 'network qos rule create '
+ '--type minimum-packet-rate '
+ '--min-kpps 2800 '
+ '--egress %s' % policy_name,
+ parse_output=True,
+ )
+ raw_output = self.openstack(
+ 'network qos rule delete %s %s' %
+ (policy_name, rule['id']))
+ self.assertEqual('', raw_output)
+
+ def test_qos_rule_list(self):
+ cmd_output = self.openstack(
+ 'network qos rule list %s' % self.QOS_POLICY_NAME,
+ parse_output=True,)
+ self.assertIn(self.RULE_ID, [rule['ID'] for rule in cmd_output])
+
+ def test_qos_rule_show(self):
+ cmd_output = self.openstack(
+ 'network qos rule show %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID),
+ parse_output=True,
+ )
+ self.assertEqual(self.RULE_ID, cmd_output['id'])
+
+ def test_qos_rule_set(self):
+ self.openstack('network qos rule set --min-kpps 7500 --any %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID))
+ cmd_output = self.openstack(
+ 'network qos rule show %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID),
+ parse_output=True,
+ )
+ self.assertEqual(7500, cmd_output['min_kpps'])
+ self.assertEqual('any', cmd_output['direction'])
+
+
class NetworkQosRuleTestsDSCPMarking(common.NetworkTests):
"""Functional tests for QoS DSCP marking rule"""
@@ -100,12 +180,13 @@ class NetworkQosRuleTestsDSCPMarking(common.NetworkTests):
)
self.addCleanup(self.openstack,
'network qos policy delete %s' % self.QOS_POLICY_NAME)
- cmd_output = json.loads(self.openstack(
- 'network qos rule create -f json '
+ cmd_output = self.openstack(
+ 'network qos rule create '
'--type dscp-marking '
'--dscp-mark 8 %s' %
- self.QOS_POLICY_NAME
- ))
+ self.QOS_POLICY_NAME,
+ parse_output=True,
+ )
self.RULE_ID = cmd_output['id']
self.addCleanup(self.openstack,
'network qos rule delete %s %s' %
@@ -115,36 +196,42 @@ class NetworkQosRuleTestsDSCPMarking(common.NetworkTests):
def test_qos_rule_create_delete(self):
# This is to check the output of qos rule delete
policy_name = uuid.uuid4().hex
- self.openstack('network qos policy create -f json %s' % policy_name)
+ self.openstack('network qos policy create %s' % policy_name)
self.addCleanup(self.openstack,
'network qos policy delete %s' % policy_name)
- rule = json.loads(self.openstack(
- 'network qos rule create -f json '
+ rule = self.openstack(
+ 'network qos rule create '
'--type dscp-marking '
- '--dscp-mark 8 %s' % policy_name
- ))
+ '--dscp-mark 8 %s' % policy_name,
+ parse_output=True,
+ )
raw_output = self.openstack(
'network qos rule delete %s %s' %
(policy_name, rule['id']))
self.assertEqual('', raw_output)
def test_qos_rule_list(self):
- cmd_output = json.loads(self.openstack(
- 'network qos rule list -f json %s' % self.QOS_POLICY_NAME))
+ cmd_output = self.openstack(
+ 'network qos rule list %s' % self.QOS_POLICY_NAME,
+ parse_output=True,)
self.assertIn(self.RULE_ID, [rule['ID'] for rule in cmd_output])
def test_qos_rule_show(self):
- cmd_output = json.loads(self.openstack(
- 'network qos rule show -f json %s %s' %
- (self.QOS_POLICY_NAME, self.RULE_ID)))
+ cmd_output = self.openstack(
+ 'network qos rule show %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID),
+ parse_output=True,
+ )
self.assertEqual(self.RULE_ID, cmd_output['id'])
def test_qos_rule_set(self):
self.openstack('network qos rule set --dscp-mark 32 %s %s' %
(self.QOS_POLICY_NAME, self.RULE_ID))
- cmd_output = json.loads(self.openstack(
- 'network qos rule show -f json %s %s' %
- (self.QOS_POLICY_NAME, self.RULE_ID)))
+ cmd_output = self.openstack(
+ 'network qos rule show %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID),
+ parse_output=True,
+ )
self.assertEqual(32, cmd_output['dscp_mark'])
@@ -163,13 +250,14 @@ class NetworkQosRuleTestsBandwidthLimit(common.NetworkTests):
)
self.addCleanup(self.openstack,
'network qos policy delete %s' % self.QOS_POLICY_NAME)
- cmd_output = json.loads(self.openstack(
- 'network qos rule create -f json '
+ cmd_output = self.openstack(
+ 'network qos rule create '
'--type bandwidth-limit '
'--max-kbps 10000 '
'--egress %s' %
- self.QOS_POLICY_NAME
- ))
+ self.QOS_POLICY_NAME,
+ parse_output=True,
+ )
self.RULE_ID = cmd_output['id']
self.addCleanup(self.openstack,
'network qos rule delete %s %s' %
@@ -179,31 +267,35 @@ class NetworkQosRuleTestsBandwidthLimit(common.NetworkTests):
def test_qos_rule_create_delete(self):
# This is to check the output of qos rule delete
policy_name = uuid.uuid4().hex
- self.openstack('network qos policy create -f json %s' % policy_name)
+ self.openstack('network qos policy create %s' % policy_name)
self.addCleanup(self.openstack,
'network qos policy delete %s' % policy_name)
- rule = json.loads(self.openstack(
- 'network qos rule create -f json '
+ rule = self.openstack(
+ 'network qos rule create '
'--type bandwidth-limit '
'--max-kbps 10000 '
'--max-burst-kbits 1400 '
- '--egress %s' % policy_name
- ))
+ '--egress %s' % policy_name,
+ parse_output=True,
+ )
raw_output = self.openstack(
'network qos rule delete %s %s' %
(policy_name, rule['id']))
self.assertEqual('', raw_output)
def test_qos_rule_list(self):
- cmd_output = json.loads(self.openstack(
- 'network qos rule list -f json %s' %
- self.QOS_POLICY_NAME))
+ cmd_output = self.openstack(
+ 'network qos rule list %s' %
+ self.QOS_POLICY_NAME,
+ parse_output=True,)
self.assertIn(self.RULE_ID, [rule['ID'] for rule in cmd_output])
def test_qos_rule_show(self):
- cmd_output = json.loads(self.openstack(
- 'network qos rule show -f json %s %s' %
- (self.QOS_POLICY_NAME, self.RULE_ID)))
+ cmd_output = self.openstack(
+ 'network qos rule show %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID),
+ parse_output=True,
+ )
self.assertEqual(self.RULE_ID, cmd_output['id'])
def test_qos_rule_set(self):
@@ -211,9 +303,11 @@ class NetworkQosRuleTestsBandwidthLimit(common.NetworkTests):
'--max-burst-kbits 1800 '
'--ingress %s %s' %
(self.QOS_POLICY_NAME, self.RULE_ID))
- cmd_output = json.loads(self.openstack(
- 'network qos rule show -f json %s %s' %
- (self.QOS_POLICY_NAME, self.RULE_ID)))
+ cmd_output = self.openstack(
+ 'network qos rule show %s %s' %
+ (self.QOS_POLICY_NAME, self.RULE_ID),
+ parse_output=True,
+ )
self.assertEqual(15000, cmd_output['max_kbps'])
self.assertEqual(1800, cmd_output['max_burst_kbps'])
self.assertEqual('ingress', cmd_output['direction'])
diff --git a/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py b/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py
index 56cd8920..4ead65cc 100644
--- a/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py
+++ b/openstackclient/tests/functional/network/v2/test_network_qos_rule_type.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from openstackclient.tests.functional.network.v2 import common
@@ -23,6 +21,13 @@ class NetworkQosRuleTypeTests(common.NetworkTests):
AVAILABLE_RULE_TYPES = ['dscp_marking',
'bandwidth_limit']
+ # NOTE(ralonsoh): this list was updated in Yoga (February 2022)
+ ALL_AVAILABLE_RULE_TYPES = ['dscp_marking',
+ 'bandwidth_limit',
+ 'minimum_bandwidth',
+ 'packet_rate_limit',
+ 'minimum_packet_rate',
+ ]
def setUp(self):
super(NetworkQosRuleTypeTests, self).setUp()
@@ -31,14 +36,40 @@ class NetworkQosRuleTypeTests(common.NetworkTests):
self.skipTest("No Network service present")
def test_qos_rule_type_list(self):
- cmd_output = json.loads(self.openstack(
- 'network qos rule type list -f json'))
+ cmd_output = self.openstack(
+ 'network qos rule type list -f json',
+ parse_output=True,
+ )
+ for rule_type in self.AVAILABLE_RULE_TYPES:
+ self.assertIn(rule_type, [x['Type'] for x in cmd_output])
+
+ def test_qos_rule_type_list_all_supported(self):
+ if not self.is_extension_enabled('qos-rule-type-filter'):
+ self.skipTest('No "qos-rule-type-filter" extension present')
+
+ cmd_output = self.openstack(
+ 'network qos rule type list --all-supported -f json',
+ parse_output=True
+ )
for rule_type in self.AVAILABLE_RULE_TYPES:
self.assertIn(rule_type, [x['Type'] for x in cmd_output])
+ def test_qos_rule_type_list_all_rules(self):
+ if not self.is_extension_enabled('qos-rule-type-filter'):
+ self.skipTest('No "qos-rule-type-filter" extension present')
+
+ cmd_output = self.openstack(
+ 'network qos rule type list --all-rules -f json',
+ parse_output=True
+ )
+ for rule_type in self.ALL_AVAILABLE_RULE_TYPES:
+ self.assertIn(rule_type, [x['Type'] for x in cmd_output])
+
def test_qos_rule_type_details(self):
for rule_type in self.AVAILABLE_RULE_TYPES:
- cmd_output = json.loads(self.openstack(
- 'network qos rule type show %s -f json' % rule_type))
+ cmd_output = self.openstack(
+ 'network qos rule type show %s -f json' % rule_type,
+ parse_output=True,
+ )
self.assertEqual(rule_type, cmd_output['rule_type_name'])
self.assertIn("drivers", cmd_output.keys())
diff --git a/openstackclient/tests/functional/network/v2/test_network_rbac.py b/openstackclient/tests/functional/network/v2/test_network_rbac.py
index 3bbe4f27..cb66759a 100644
--- a/openstackclient/tests/functional/network/v2/test_network_rbac.py
+++ b/openstackclient/tests/functional/network/v2/test_network_rbac.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -32,41 +31,47 @@ class NetworkRBACTests(common.NetworkTests):
self.NET_NAME = uuid.uuid4().hex
self.PROJECT_NAME = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'network create -f json ' + self.NET_NAME
- ))
+ cmd_output = self.openstack(
+ 'network create ' + self.NET_NAME,
+ parse_output=True,
+ )
self.addCleanup(self.openstack,
'network delete ' + cmd_output['id'])
self.OBJECT_ID = cmd_output['id']
- cmd_output = json.loads(self.openstack(
- 'network rbac create -f json ' +
+ cmd_output = self.openstack(
+ 'network rbac create ' +
self.OBJECT_ID +
' --action access_as_shared' +
' --target-project admin' +
- ' --type network'
- ))
+ ' --type network',
+ parse_output=True,
+ )
self.addCleanup(self.openstack,
'network rbac delete ' + cmd_output['id'])
self.ID = cmd_output['id']
self.assertEqual(self.OBJECT_ID, cmd_output['object_id'])
def test_network_rbac_list(self):
- cmd_output = json.loads(self.openstack('network rbac list -f json'))
+ cmd_output = self.openstack('network rbac list', parse_output=True)
self.assertIn(self.ID, [rbac['ID'] for rbac in cmd_output])
def test_network_rbac_show(self):
- cmd_output = json.loads(self.openstack(
- 'network rbac show -f json ' + self.ID))
+ cmd_output = self.openstack(
+ 'network rbac show ' + self.ID,
+ parse_output=True,)
self.assertEqual(self.ID, cmd_output['id'])
def test_network_rbac_set(self):
- project_id = json.loads(self.openstack(
- 'project create -f json ' + self.PROJECT_NAME))['id']
+ project_id = self.openstack(
+ 'project create ' + self.PROJECT_NAME,
+ parse_output=True,)['id']
self.openstack('network rbac set ' + self.ID +
' --target-project ' + self.PROJECT_NAME)
- cmd_output_rbac = json.loads(self.openstack(
- 'network rbac show -f json ' + self.ID))
+ cmd_output_rbac = self.openstack(
+ 'network rbac show ' + self.ID,
+ parse_output=True,
+ )
self.assertEqual(project_id, cmd_output_rbac['target_project_id'])
raw_output_project = self.openstack(
'project delete ' + self.PROJECT_NAME)
diff --git a/openstackclient/tests/functional/network/v2/test_network_segment.py b/openstackclient/tests/functional/network/v2/test_network_segment.py
index 6ffb11cf..111c4dc3 100644
--- a/openstackclient/tests/functional/network/v2/test_network_segment.py
+++ b/openstackclient/tests/functional/network/v2/test_network_segment.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -27,10 +26,9 @@ class NetworkSegmentTests(common.NetworkTests):
cls.PHYSICAL_NETWORK_NAME = uuid.uuid4().hex
# Create a network for the all subnet tests
- cmd_output = json.loads(cls.openstack(
- 'network create -f json ' +
- cls.NETWORK_NAME
- ))
+ cmd_output = cls.openstack(
+ 'network create ' + cls.NETWORK_NAME, parse_output=True,
+ )
# Get network_id for assertEqual
cls.NETWORK_ID = cmd_output["id"]
@@ -54,13 +52,14 @@ class NetworkSegmentTests(common.NetworkTests):
def test_network_segment_create_delete(self):
name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- ' network segment create -f json ' +
+ json_output = self.openstack(
+ ' network segment create ' +
'--network ' + self.NETWORK_ID + ' ' +
'--network-type geneve ' +
'--segment 2055 ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
json_output["name"],
@@ -73,13 +72,14 @@ class NetworkSegmentTests(common.NetworkTests):
def test_network_segment_list(self):
name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- ' network segment create -f json ' +
+ json_output = self.openstack(
+ ' network segment create ' +
'--network ' + self.NETWORK_ID + ' ' +
'--network-type geneve ' +
'--segment 2055 ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
network_segment_id = json_output.get('id')
network_segment_name = json_output.get('name')
self.addCleanup(
@@ -91,9 +91,10 @@ class NetworkSegmentTests(common.NetworkTests):
json_output["name"],
)
- json_output = json.loads(self.openstack(
- 'network segment list -f json'
- ))
+ json_output = self.openstack(
+ 'network segment list',
+ parse_output=True,
+ )
item_map = {
item.get('ID'): item.get('Name') for item in json_output
}
@@ -102,21 +103,23 @@ class NetworkSegmentTests(common.NetworkTests):
def test_network_segment_set_show(self):
name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- ' network segment create -f json ' +
+ json_output = self.openstack(
+ ' network segment create ' +
'--network ' + self.NETWORK_ID + ' ' +
'--network-type geneve ' +
'--segment 2055 ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'network segment delete ' + name
)
- extension_output = json.loads(self.openstack(
- "extension list -f json "
- ))
+ extension_output = self.openstack(
+ "extension list ",
+ parse_output=True,
+ )
ext_alias = [x["Alias"] for x in extension_output]
if "standard-attr-segment" in ext_alias:
self.assertEqual(
@@ -136,10 +139,11 @@ class NetworkSegmentTests(common.NetworkTests):
)
self.assertOutput('', cmd_output)
- json_output = json.loads(self.openstack(
- 'network segment show -f json ' +
- name
- ))
+ json_output = self.openstack(
+ 'network segment show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
new_description,
json_output["description"],
diff --git a/openstackclient/tests/functional/network/v2/test_network_segment_range.py b/openstackclient/tests/functional/network/v2/test_network_segment_range.py
index 37c87dd5..5cdf5812 100644
--- a/openstackclient/tests/functional/network/v2/test_network_segment_range.py
+++ b/openstackclient/tests/functional/network/v2/test_network_segment_range.py
@@ -14,7 +14,6 @@
# under the License.
#
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -34,18 +33,20 @@ class NetworkSegmentRangeTests(common.NetworkTests):
def test_network_segment_range_create_delete(self):
# Make a project
- project_id = json.loads(self.openstack(
- 'project create -f json ' + self.PROJECT_NAME))['id']
+ project_id = self.openstack(
+ 'project create ' + self.PROJECT_NAME,
+ parse_output=True,)['id']
name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- ' network segment range create -f json ' +
+ json_output = self.openstack(
+ ' network segment range create ' +
'--private ' +
"--project " + self.PROJECT_NAME + " " +
'--network-type vxlan ' +
'--minimum 2005 ' +
'--maximum 2009 ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
json_output["name"],
@@ -65,14 +66,15 @@ class NetworkSegmentRangeTests(common.NetworkTests):
def test_network_segment_range_list(self):
name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- ' network segment range create -f json ' +
+ json_output = self.openstack(
+ ' network segment range create ' +
'--shared ' +
'--network-type geneve ' +
'--minimum 2013 ' +
'--maximum 2017 ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
network_segment_range_id = json_output.get('id')
network_segment_range_name = json_output.get('name')
self.addCleanup(
@@ -84,9 +86,10 @@ class NetworkSegmentRangeTests(common.NetworkTests):
json_output["name"],
)
- json_output = json.loads(self.openstack(
- 'network segment range list -f json'
- ))
+ json_output = self.openstack(
+ 'network segment range list',
+ parse_output=True,
+ )
item_map = {
item.get('ID'): item.get('Name') for item in json_output
}
@@ -94,18 +97,20 @@ class NetworkSegmentRangeTests(common.NetworkTests):
self.assertIn(network_segment_range_name, item_map.values())
def test_network_segment_range_set_show(self):
- project_id = json.loads(self.openstack(
- 'project create -f json ' + self.PROJECT_NAME))['id']
+ project_id = self.openstack(
+ 'project create ' + self.PROJECT_NAME,
+ parse_output=True,)['id']
name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- ' network segment range create -f json ' +
+ json_output = self.openstack(
+ ' network segment range create ' +
'--private ' +
"--project " + self.PROJECT_NAME + " " +
'--network-type geneve ' +
'--minimum 2021 ' +
'--maximum 2025 ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'network segment range delete ' + name
@@ -127,10 +132,11 @@ class NetworkSegmentRangeTests(common.NetworkTests):
)
self.assertOutput('', cmd_output)
- json_output = json.loads(self.openstack(
- 'network segment range show -f json ' +
- name
- ))
+ json_output = self.openstack(
+ 'network segment range show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
new_minimum,
json_output["minimum"],
diff --git a/openstackclient/tests/functional/network/v2/test_network_service_provider.py b/openstackclient/tests/functional/network/v2/test_network_service_provider.py
index c571a756..9d513564 100644
--- a/openstackclient/tests/functional/network/v2/test_network_service_provider.py
+++ b/openstackclient/tests/functional/network/v2/test_network_service_provider.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from openstackclient.tests.functional.network.v2 import common
@@ -29,13 +27,15 @@ class TestNetworkServiceProvider(common.NetworkTests):
# NOTE(slaweq):
# that tests should works only when "standard" Neutron L3 agent is
# used, as e.g. OVN L3 plugin don't supports that.
- l3_agent_list = json.loads(self.openstack(
- 'network agent list -f json --agent-type l3 -c ID'
- ))
+ l3_agent_list = self.openstack(
+ 'network agent list --agent-type l3 -c ID',
+ parse_output=True,
+ )
if not l3_agent_list:
self.skipTest("No Neutron L3 Agents present")
def test_network_service_provider_list(self):
- cmd_output = json.loads(self.openstack(
- 'network service provider list -f json'))
+ cmd_output = self.openstack(
+ 'network service provider list',
+ parse_output=True,)
self.assertIn('L3_ROUTER_NAT', [x['Service Type'] for x in cmd_output])
diff --git a/openstackclient/tests/functional/network/v2/test_network_trunk.py b/openstackclient/tests/functional/network/v2/test_network_trunk.py
new file mode 100644
index 00000000..bbb77a0d
--- /dev/null
+++ b/openstackclient/tests/functional/network/v2/test_network_trunk.py
@@ -0,0 +1,149 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import uuid
+
+from openstackclient.tests.functional.network.v2 import common
+
+
+class NetworkTrunkTests(common.NetworkTests):
+ """Functional tests for Network Trunks"""
+
+ def setUp(self):
+ super().setUp()
+ # Nothing in this class works with Nova Network
+ if not self.haz_network:
+ self.skipTest("No Network service present")
+
+ network_name = uuid.uuid4().hex
+ subnet_name = uuid.uuid4().hex
+ self.parent_port_name = uuid.uuid4().hex
+ self.sub_port_name = uuid.uuid4().hex
+
+ self.openstack('network create %s' % network_name)
+ self.addCleanup(self.openstack, 'network delete %s' % network_name)
+
+ self.openstack(
+ 'subnet create %s '
+ '--network %s --subnet-range 10.0.0.0/24' % (
+ subnet_name, network_name))
+ self.openstack('port create %s --network %s' %
+ (self.parent_port_name, network_name))
+ self.addCleanup(self.openstack, 'port delete %s' %
+ self.parent_port_name)
+ json_out = self.openstack('port create %s --network %s -f json' %
+ (self.sub_port_name, network_name))
+ self.sub_port_id = json.loads(json_out)['id']
+ self.addCleanup(self.openstack, 'port delete %s' % self.sub_port_name)
+
+ def test_network_trunk_create_delete(self):
+ trunk_name = uuid.uuid4().hex
+ self.openstack('network trunk create %s --parent-port %s -f json ' %
+ (trunk_name, self.parent_port_name))
+ raw_output = self.openstack(
+ 'network trunk delete ' +
+ trunk_name
+ )
+ self.assertEqual('', raw_output)
+
+ def test_network_trunk_list(self):
+ trunk_name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
+ 'network trunk create %s --parent-port %s -f json ' %
+ (trunk_name, self.parent_port_name)))
+ self.addCleanup(self.openstack,
+ 'network trunk delete ' + trunk_name)
+ self.assertEqual(trunk_name, json_output['name'])
+
+ json_output = json.loads(self.openstack(
+ 'network trunk list -f json'
+ ))
+ self.assertIn(trunk_name, [tr['Name'] for tr in json_output])
+
+ def test_network_trunk_set_unset(self):
+ trunk_name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
+ 'network trunk create %s --parent-port %s -f json ' %
+ (trunk_name, self.parent_port_name)))
+ self.addCleanup(self.openstack,
+ 'network trunk delete ' + trunk_name)
+ self.assertEqual(trunk_name, json_output['name'])
+
+ self.openstack(
+ 'network trunk set '
+ '--enable ' +
+ trunk_name
+ )
+
+ json_output = json.loads(self.openstack(
+ 'network trunk show -f json ' +
+ trunk_name
+ ))
+ self.assertTrue(json_output['is_admin_state_up'])
+
+ # Add subport to trunk
+ self.openstack(
+ 'network trunk set ' +
+ '--subport port=%s,segmentation-type=vlan,segmentation-id=42 ' %
+ (self.sub_port_name) +
+ trunk_name
+ )
+ json_output = json.loads(self.openstack(
+ 'network trunk show -f json ' +
+ trunk_name
+ ))
+ self.assertEqual(
+ [{
+ 'port_id': self.sub_port_id,
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'
+ }],
+ json_output['sub_ports'])
+
+ # Remove subport from trunk
+ self.openstack(
+ 'network trunk unset ' +
+ trunk_name +
+ ' --subport ' +
+ self.sub_port_name
+ )
+ json_output = json.loads(self.openstack(
+ 'network trunk show -f json ' +
+ trunk_name
+ ))
+ self.assertEqual(
+ [],
+ json_output['sub_ports'])
+
+ def test_network_trunk_list_subports(self):
+ trunk_name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
+ 'network trunk create %s --parent-port %s '
+ '--subport port=%s,segmentation-type=vlan,segmentation-id=42 '
+ '-f json ' %
+ (trunk_name, self.parent_port_name, self.sub_port_name)))
+ self.addCleanup(self.openstack,
+ 'network trunk delete ' + trunk_name)
+ self.assertEqual(trunk_name, json_output['name'])
+
+ json_output = json.loads(self.openstack(
+ 'network subport list --trunk %s -f json' % trunk_name))
+ self.assertEqual(
+ [{
+ 'Port': self.sub_port_id,
+ 'Segmentation ID': 42,
+ 'Segmentation Type': 'vlan'
+ }],
+ json_output)
diff --git a/openstackclient/tests/functional/network/v2/test_port.py b/openstackclient/tests/functional/network/v2/test_port.py
index a20d2043..f5bc9c4a 100644
--- a/openstackclient/tests/functional/network/v2/test_port.py
+++ b/openstackclient/tests/functional/network/v2/test_port.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -55,19 +54,21 @@ class PortTests(common.NetworkTagTests):
def test_port_delete(self):
"""Test create, delete multiple"""
- json_output = json.loads(self.openstack(
- 'port create -f json --network %s %s' %
- (self.NETWORK_NAME, self.NAME)
- ))
+ json_output = self.openstack(
+ 'port create --network %s %s' %
+ (self.NETWORK_NAME, self.NAME),
+ parse_output=True,
+ )
id1 = json_output.get('id')
self.assertIsNotNone(id1)
self.assertIsNotNone(json_output.get('mac_address'))
self.assertEqual(self.NAME, json_output.get('name'))
- json_output = json.loads(self.openstack(
- 'port create -f json --network %s %sx' %
- (self.NETWORK_NAME, self.NAME)
- ))
+ json_output = self.openstack(
+ 'port create --network %s %sx' %
+ (self.NETWORK_NAME, self.NAME),
+ parse_output=True,
+ )
id2 = json_output.get('id')
self.assertIsNotNone(id2)
self.assertIsNotNone(json_output.get('mac_address'))
@@ -79,10 +80,11 @@ class PortTests(common.NetworkTagTests):
def test_port_list(self):
"""Test create defaults, list, delete"""
- json_output = json.loads(self.openstack(
- 'port create -f json --network %s %s' %
- (self.NETWORK_NAME, self.NAME)
- ))
+ json_output = self.openstack(
+ 'port create --network %s %s' %
+ (self.NETWORK_NAME, self.NAME),
+ parse_output=True,
+ )
id1 = json_output.get('id')
self.assertIsNotNone(id1)
mac1 = json_output.get('mac_address')
@@ -90,10 +92,11 @@ class PortTests(common.NetworkTagTests):
self.addCleanup(self.openstack, 'port delete %s' % id1)
self.assertEqual(self.NAME, json_output.get('name'))
- json_output = json.loads(self.openstack(
- 'port create -f json --network %s %sx' %
- (self.NETWORK_NAME, self.NAME)
- ))
+ json_output = self.openstack(
+ 'port create --network %s %sx' %
+ (self.NETWORK_NAME, self.NAME),
+ parse_output=True,
+ )
id2 = json_output.get('id')
self.assertIsNotNone(id2)
mac2 = json_output.get('mac_address')
@@ -102,9 +105,10 @@ class PortTests(common.NetworkTagTests):
self.assertEqual(self.NAME + 'x', json_output.get('name'))
# Test list
- json_output = json.loads(self.openstack(
- 'port list -f json'
- ))
+ json_output = self.openstack(
+ 'port list',
+ parse_output=True,
+ )
item_map = {item.get('ID'): item.get('MAC Address') for item in
json_output}
self.assertIn(id1, item_map.keys())
@@ -113,17 +117,19 @@ class PortTests(common.NetworkTagTests):
self.assertIn(mac2, item_map.values())
# Test list --long
- json_output = json.loads(self.openstack(
- 'port list --long -f json'
- ))
+ json_output = self.openstack(
+ 'port list --long',
+ parse_output=True,
+ )
id_list = [item.get('ID') for item in json_output]
self.assertIn(id1, id_list)
self.assertIn(id2, id_list)
# Test list --mac-address
- json_output = json.loads(self.openstack(
- 'port list -f json --mac-address %s' % mac2
- ))
+ json_output = self.openstack(
+ 'port list --mac-address %s' % mac2,
+ parse_output=True,
+ )
item_map = {item.get('ID'): item.get('MAC Address') for item in
json_output}
self.assertNotIn(id1, item_map.keys())
@@ -132,9 +138,10 @@ class PortTests(common.NetworkTagTests):
self.assertIn(mac2, item_map.values())
# Test list with unknown fields
- json_output = json.loads(self.openstack(
- 'port list -f json -c ID -c Name -c device_id'
- ))
+ json_output = self.openstack(
+ 'port list -c ID -c Name -c device_id',
+ parse_output=True,
+ )
id_list = [p['ID'] for p in json_output]
self.assertIn(id1, id_list)
self.assertIn(id2, id_list)
@@ -144,13 +151,14 @@ class PortTests(common.NetworkTagTests):
def test_port_set(self):
"""Test create, set, show, delete"""
name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'port create -f json '
+ json_output = self.openstack(
+ 'port create '
'--network %s '
'--description xyzpdq '
'--disable %s' %
- (self.NETWORK_NAME, name)
- ))
+ (self.NETWORK_NAME, name),
+ parse_output=True,
+ )
id1 = json_output.get('id')
self.addCleanup(self.openstack, 'port delete %s' % id1)
self.assertEqual(name, json_output.get('name'))
@@ -163,9 +171,10 @@ class PortTests(common.NetworkTagTests):
)
self.assertOutput('', raw_output)
- json_output = json.loads(self.openstack(
- 'port show -f json %s' % name
- ))
+ json_output = self.openstack(
+ 'port show %s' % name,
+ parse_output=True,
+ )
sg_id = json_output.get('security_group_ids')[0]
self.assertEqual(name, json_output.get('name'))
@@ -177,17 +186,19 @@ class PortTests(common.NetworkTagTests):
'port unset --security-group %s %s' % (sg_id, id1))
self.assertOutput('', raw_output)
- json_output = json.loads(self.openstack(
- 'port show -f json %s' % name
- ))
+ json_output = self.openstack(
+ 'port show %s' % name,
+ parse_output=True,
+ )
self.assertEqual([], json_output.get('security_group_ids'))
def test_port_admin_set(self):
"""Test create, set (as admin), show, delete"""
- json_output = json.loads(self.openstack(
- 'port create -f json '
- '--network %s %s' % (self.NETWORK_NAME, self.NAME)
- ))
+ json_output = self.openstack(
+ 'port create '
+ '--network %s %s' % (self.NETWORK_NAME, self.NAME),
+ parse_output=True,
+ )
id_ = json_output.get('id')
self.addCleanup(self.openstack, 'port delete %s' % id_)
@@ -196,36 +207,40 @@ class PortTests(common.NetworkTagTests):
'port set --mac-address 11:22:33:44:55:66 %s' %
self.NAME)
self.assertOutput('', raw_output)
- json_output = json.loads(self.openstack(
- 'port show -f json %s' % self.NAME
- ))
+ json_output = self.openstack(
+ 'port show %s' % self.NAME,
+ parse_output=True,
+ )
self.assertEqual(json_output.get('mac_address'), '11:22:33:44:55:66')
def test_port_set_sg(self):
"""Test create, set, show, delete"""
sg_name1 = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'security group create -f json %s' %
- sg_name1
- ))
+ json_output = self.openstack(
+ 'security group create %s' %
+ sg_name1,
+ parse_output=True,
+ )
sg_id1 = json_output.get('id')
self.addCleanup(self.openstack, 'security group delete %s' % sg_id1)
sg_name2 = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'security group create -f json %s' %
- sg_name2
- ))
+ json_output = self.openstack(
+ 'security group create %s' %
+ sg_name2,
+ parse_output=True,
+ )
sg_id2 = json_output.get('id')
self.addCleanup(self.openstack, 'security group delete %s' % sg_id2)
name = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'port create -f json '
+ json_output = self.openstack(
+ 'port create '
'--network %s '
'--security-group %s %s' %
- (self.NETWORK_NAME, sg_name1, name)
- ))
+ (self.NETWORK_NAME, sg_name1, name),
+ parse_output=True,
+ )
id1 = json_output.get('id')
self.addCleanup(self.openstack, 'port delete %s' % id1)
self.assertEqual(name, json_output.get('name'))
@@ -238,9 +253,10 @@ class PortTests(common.NetworkTagTests):
)
self.assertOutput('', raw_output)
- json_output = json.loads(self.openstack(
- 'port show -f json %s' % name
- ))
+ json_output = self.openstack(
+ 'port show %s' % name,
+ parse_output=True,
+ )
self.assertEqual(name, json_output.get('name'))
# NOTE(amotoki): The order of the field is not predictable,
self.assertIsInstance(json_output.get('security_group_ids'), list)
@@ -251,16 +267,18 @@ class PortTests(common.NetworkTagTests):
'port unset --security-group %s %s' % (sg_id1, id1))
self.assertOutput('', raw_output)
- json_output = json.loads(self.openstack(
- 'port show -f json %s' % name
- ))
+ json_output = self.openstack(
+ 'port show %s' % name,
+ parse_output=True,
+ )
self.assertEqual(
[sg_id2],
json_output.get('security_group_ids')
)
def _create_resource_for_tag_test(self, name, args):
- return json.loads(self.openstack(
- '{} create -f json --network {} {} {}'
- .format(self.base_command, self.NETWORK_NAME, args, name)
- ))
+ return self.openstack(
+ '{} create --network {} {} {}'
+ .format(self.base_command, self.NETWORK_NAME, args, name),
+ parse_output=True,
+ )
diff --git a/openstackclient/tests/functional/network/v2/test_router.py b/openstackclient/tests/functional/network/v2/test_router.py
index 2464b681..07a5a633 100644
--- a/openstackclient/tests/functional/network/v2/test_router.py
+++ b/openstackclient/tests/functional/network/v2/test_router.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -31,18 +30,20 @@ class RouterTests(common.NetworkTagTests):
"""Test create options, delete multiple"""
name1 = uuid.uuid4().hex
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'router create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'router create ' +
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output["name"],
)
- cmd_output = json.loads(self.openstack(
- 'router create -f json ' +
- name2
- ))
+ cmd_output = self.openstack(
+ 'router create ' +
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output["name"],
@@ -55,10 +56,10 @@ class RouterTests(common.NetworkTagTests):
def test_router_list(self):
"""Test create, list filter"""
# Get project IDs
- cmd_output = json.loads(self.openstack('token issue -f json '))
+ cmd_output = self.openstack('token issue', parse_output=True)
auth_project_id = cmd_output['project_id']
- cmd_output = json.loads(self.openstack('project list -f json '))
+ cmd_output = self.openstack('project list', parse_output=True)
admin_project_id = None
demo_project_id = None
for p in cmd_output:
@@ -78,11 +79,12 @@ class RouterTests(common.NetworkTagTests):
name1 = uuid.uuid4().hex
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'router create -f json ' +
+ cmd_output = self.openstack(
+ 'router create ' +
'--disable ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'router delete ' + name1)
self.assertEqual(
@@ -97,11 +99,12 @@ class RouterTests(common.NetworkTagTests):
admin_project_id,
cmd_output["project_id"],
)
- cmd_output = json.loads(self.openstack(
- 'router create -f json ' +
+ cmd_output = self.openstack(
+ 'router create ' +
'--project ' + demo_project_id +
- ' ' + name2
- ))
+ ' ' + name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'router delete ' + name2)
self.assertEqual(
@@ -118,37 +121,41 @@ class RouterTests(common.NetworkTagTests):
)
# Test list --project
- cmd_output = json.loads(self.openstack(
- 'router list -f json ' +
- '--project ' + demo_project_id
- ))
+ cmd_output = self.openstack(
+ 'router list ' +
+ '--project ' + demo_project_id,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
# Test list --disable
- cmd_output = json.loads(self.openstack(
- 'router list -f json ' +
- '--disable '
- ))
+ cmd_output = self.openstack(
+ 'router list ' +
+ '--disable ',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
# Test list --name
- cmd_output = json.loads(self.openstack(
- 'router list -f json ' +
- '--name ' + name1
- ))
+ cmd_output = self.openstack(
+ 'router list ' +
+ '--name ' + name1,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
# Test list --long
- cmd_output = json.loads(self.openstack(
- 'router list -f json ' +
- '--long '
- ))
+ cmd_output = self.openstack(
+ 'router list ' +
+ '--long ',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
@@ -160,15 +167,17 @@ class RouterTests(common.NetworkTagTests):
self.skipTest("No l3_agent_scheduler extension present")
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'router create -f json ' + name))
+ cmd_output = self.openstack(
+ 'router create ' + name,
+ parse_output=True,)
self.addCleanup(self.openstack, 'router delete ' + name)
# Get router ID
router_id = cmd_output['id']
# Get l3 agent id
- cmd_output = json.loads(self.openstack(
- 'network agent list -f json --agent-type l3'))
+ cmd_output = self.openstack(
+ 'network agent list --agent-type l3',
+ parse_output=True,)
# Check at least one L3 agent is included in the response.
self.assertTrue(cmd_output)
@@ -178,16 +187,18 @@ class RouterTests(common.NetworkTagTests):
self.openstack(
'network agent add router --l3 ' + agent_id + ' ' + router_id)
- cmd_output = json.loads(self.openstack(
- 'router list -f json --agent ' + agent_id))
+ cmd_output = self.openstack(
+ 'router list --agent ' + agent_id,
+ parse_output=True,)
router_ids = [x['ID'] for x in cmd_output]
self.assertIn(router_id, router_ids)
# Remove router from agent
self.openstack(
'network agent remove router --l3 ' + agent_id + ' ' + router_id)
- cmd_output = json.loads(self.openstack(
- 'router list -f json --agent ' + agent_id))
+ cmd_output = self.openstack(
+ 'router list --agent ' + agent_id,
+ parse_output=True,)
router_ids = [x['ID'] for x in cmd_output]
self.assertNotIn(router_id, router_ids)
@@ -196,11 +207,12 @@ class RouterTests(common.NetworkTagTests):
name = uuid.uuid4().hex
new_name = name + "_"
- cmd_output = json.loads(self.openstack(
- 'router create -f json ' +
+ cmd_output = self.openstack(
+ 'router create ' +
'--description aaaa ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'router delete ' + new_name)
self.assertEqual(
name,
@@ -221,10 +233,11 @@ class RouterTests(common.NetworkTagTests):
)
self.assertOutput('', cmd_output)
- cmd_output = json.loads(self.openstack(
- 'router show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'router show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["name"],
@@ -247,10 +260,11 @@ class RouterTests(common.NetworkTagTests):
'--external-gateway ' +
new_name
)
- cmd_output = json.loads(self.openstack(
- 'router show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'router show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertIsNone(cmd_output["external_gateway_info"])
def _test_set_router_distributed(self, router_name):
@@ -265,10 +279,11 @@ class RouterTests(common.NetworkTagTests):
)
self.assertOutput('', cmd_output)
- cmd_output = json.loads(self.openstack(
- 'router show -f json ' +
- router_name
- ))
+ cmd_output = self.openstack(
+ 'router show ' +
+ router_name,
+ parse_output=True,
+ )
self.assertTrue(cmd_output["distributed"])
self.assertIsNotNone(cmd_output["external_gateway_info"])
@@ -292,25 +307,28 @@ class RouterTests(common.NetworkTagTests):
self.addCleanup(self.openstack, 'router remove subnet %s %s' % (
router_name, subnet_name))
- out1 = json.loads(self.openstack(
- 'router add route -f json %s '
+ out1 = self.openstack(
+ 'router add route %s '
'--route destination=10.0.10.0/24,gateway=10.0.0.10' %
- router_name)),
+ router_name,
+ parse_output=True,),
self.assertEqual(1, len(out1[0]['routes']))
self.addCleanup(
self.openstack, 'router set %s --no-route' % router_name)
- out2 = json.loads(self.openstack(
- 'router add route -f json %s '
+ out2 = self.openstack(
+ 'router add route %s '
'--route destination=10.0.10.0/24,gateway=10.0.0.10 '
'--route destination=10.0.11.0/24,gateway=10.0.0.11' %
- router_name)),
+ router_name,
+ parse_output=True,),
self.assertEqual(2, len(out2[0]['routes']))
- out3 = json.loads(self.openstack(
- 'router remove route -f json %s '
+ out3 = self.openstack(
+ 'router remove route %s '
'--route destination=10.0.11.0/24,gateway=10.0.0.11 '
'--route destination=10.0.12.0/24,gateway=10.0.0.12' %
- router_name)),
+ router_name,
+ parse_output=True,),
self.assertEqual(1, len(out3[0]['routes']))
diff --git a/openstackclient/tests/functional/network/v2/test_security_group.py b/openstackclient/tests/functional/network/v2/test_security_group.py
index d46f8db7..c9d929f5 100644
--- a/openstackclient/tests/functional/network/v2/test_security_group.py
+++ b/openstackclient/tests/functional/network/v2/test_security_group.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -27,16 +26,17 @@ class SecurityGroupTests(common.NetworkTests):
self.NAME = uuid.uuid4().hex
self.OTHER_NAME = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'security group create -f json ' +
- self.NAME
- ))
+ cmd_output = self.openstack(
+ 'security group create ' +
+ self.NAME,
+ parse_output=True,
+ )
self.addCleanup(self.openstack,
'security group delete ' + cmd_output['id'])
self.assertEqual(self.NAME, cmd_output['name'])
def test_security_group_list(self):
- cmd_output = json.loads(self.openstack('security group list -f json'))
+ cmd_output = self.openstack('security group list', parse_output=True)
self.assertIn(self.NAME, [sg['Name'] for sg in cmd_output])
def test_security_group_set(self):
@@ -47,13 +47,17 @@ class SecurityGroupTests(common.NetworkTests):
)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'security group show -f json ' + other_name))
+ cmd_output = self.openstack(
+ 'security group show ' + other_name,
+ parse_output=True,
+ )
self.assertEqual('NSA', cmd_output['description'])
self.assertFalse(cmd_output['stateful'])
def test_security_group_show(self):
- cmd_output = json.loads(self.openstack(
- 'security group show -f json ' + self.NAME))
+ cmd_output = self.openstack(
+ 'security group show ' + self.NAME,
+ parse_output=True,
+ )
self.assertEqual(self.NAME, cmd_output['name'])
self.assertTrue(cmd_output['stateful'])
diff --git a/openstackclient/tests/functional/network/v2/test_security_group_rule.py b/openstackclient/tests/functional/network/v2/test_security_group_rule.py
index fe78bf47..d64fb420 100644
--- a/openstackclient/tests/functional/network/v2/test_security_group_rule.py
+++ b/openstackclient/tests/functional/network/v2/test_security_group_rule.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.network.v2 import common
@@ -28,32 +27,38 @@ class SecurityGroupRuleTests(common.NetworkTests):
self.SECURITY_GROUP_NAME = uuid.uuid4().hex
# Create the security group to hold the rule
- cmd_output = json.loads(self.openstack(
- 'security group create -f json ' +
- self.SECURITY_GROUP_NAME
- ))
+ cmd_output = self.openstack(
+ 'security group create ' +
+ self.SECURITY_GROUP_NAME,
+ parse_output=True,
+ )
self.addCleanup(self.openstack,
'security group delete ' + self.SECURITY_GROUP_NAME)
self.assertEqual(self.SECURITY_GROUP_NAME, cmd_output['name'])
# Create the security group rule.
- cmd_output = json.loads(self.openstack(
- 'security group rule create -f json ' +
+ cmd_output = self.openstack(
+ 'security group rule create ' +
self.SECURITY_GROUP_NAME + ' ' +
'--protocol tcp --dst-port 80:80 ' +
- '--ingress --ethertype IPv4 '
- ))
+ '--ingress --ethertype IPv4 ',
+ parse_output=True,
+ )
self.addCleanup(self.openstack,
'security group rule delete ' + cmd_output['id'])
self.SECURITY_GROUP_RULE_ID = cmd_output['id']
def test_security_group_rule_list(self):
- cmd_output = json.loads(self.openstack(
- 'security group rule list -f json ' + self.SECURITY_GROUP_NAME))
+ cmd_output = self.openstack(
+ 'security group rule list ' + self.SECURITY_GROUP_NAME,
+ parse_output=True,
+ )
self.assertIn(self.SECURITY_GROUP_RULE_ID,
[rule['ID'] for rule in cmd_output])
def test_security_group_rule_show(self):
- cmd_output = json.loads(self.openstack(
- 'security group rule show -f json ' + self.SECURITY_GROUP_RULE_ID))
+ cmd_output = self.openstack(
+ 'security group rule show ' + self.SECURITY_GROUP_RULE_ID,
+ parse_output=True,
+ )
self.assertEqual(self.SECURITY_GROUP_RULE_ID, cmd_output['id'])
diff --git a/openstackclient/tests/functional/network/v2/test_subnet.py b/openstackclient/tests/functional/network/v2/test_subnet.py
index 38030e01..041ec9f0 100644
--- a/openstackclient/tests/functional/network/v2/test_subnet.py
+++ b/openstackclient/tests/functional/network/v2/test_subnet.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import random
import uuid
@@ -29,10 +28,11 @@ class SubnetTests(common.NetworkTagTests):
cls.NETWORK_NAME = uuid.uuid4().hex
# Create a network for the all subnet tests
- cmd_output = json.loads(cls.openstack(
- 'network create -f json ' +
- cls.NETWORK_NAME
- ))
+ cmd_output = cls.openstack(
+ 'network create ' +
+ cls.NETWORK_NAME,
+ parse_output=True,
+ )
# Get network_id for assertEqual
cls.NETWORK_ID = cmd_output["id"]
@@ -57,7 +57,7 @@ class SubnetTests(common.NetworkTagTests):
def test_subnet_create_and_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd = ('subnet create -f json --network ' +
+ cmd = ('subnet create --network ' +
self.NETWORK_NAME +
' --subnet-range')
cmd_output = self._subnet_create(cmd, name1)
@@ -70,7 +70,7 @@ class SubnetTests(common.NetworkTagTests):
cmd_output["network_id"],
)
name2 = uuid.uuid4().hex
- cmd = ('subnet create -f json --network ' +
+ cmd = ('subnet create --network ' +
self.NETWORK_NAME +
' --subnet-range')
cmd_output = self._subnet_create(cmd, name2)
@@ -91,7 +91,7 @@ class SubnetTests(common.NetworkTagTests):
"""Test create, list filter"""
name1 = uuid.uuid4().hex
name2 = uuid.uuid4().hex
- cmd = ('subnet create -f json ' +
+ cmd = ('subnet create ' +
'--network ' + self.NETWORK_NAME +
' --dhcp --subnet-range')
cmd_output = self._subnet_create(cmd, name1)
@@ -114,7 +114,7 @@ class SubnetTests(common.NetworkTagTests):
cmd_output["ip_version"],
)
- cmd = ('subnet create -f json ' +
+ cmd = ('subnet create ' +
'--network ' + self.NETWORK_NAME +
' --ip-version 6 --no-dhcp ' +
'--subnet-range')
@@ -139,46 +139,51 @@ class SubnetTests(common.NetworkTagTests):
)
# Test list --long
- cmd_output = json.loads(self.openstack(
- 'subnet list -f json ' +
- '--long '
- ))
+ cmd_output = self.openstack(
+ 'subnet list ' +
+ '--long ',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --name
- cmd_output = json.loads(self.openstack(
- 'subnet list -f json ' +
- '--name ' + name1
- ))
+ cmd_output = self.openstack(
+ 'subnet list ' +
+ '--name ' + name1,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
# Test list --ip-version
- cmd_output = json.loads(self.openstack(
- 'subnet list -f json ' +
- '--ip-version 6'
- ))
+ cmd_output = self.openstack(
+ 'subnet list ' +
+ '--ip-version 6',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
# Test list --network
- cmd_output = json.loads(self.openstack(
- 'subnet list -f json ' +
- '--network ' + self.NETWORK_ID
- ))
+ cmd_output = self.openstack(
+ 'subnet list ' +
+ '--network ' + self.NETWORK_ID,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --no-dhcp
- cmd_output = json.loads(self.openstack(
- 'subnet list -f json ' +
- '--no-dhcp '
- ))
+ cmd_output = self.openstack(
+ 'subnet list ' +
+ '--no-dhcp ',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
@@ -188,7 +193,7 @@ class SubnetTests(common.NetworkTagTests):
name = uuid.uuid4().hex
new_name = name + "_"
- cmd = ('subnet create -f json ' +
+ cmd = ('subnet create ' +
'--network ' + self.NETWORK_NAME +
' --description aaaa --subnet-range')
cmd_output = self._subnet_create(cmd, name)
@@ -215,10 +220,11 @@ class SubnetTests(common.NetworkTagTests):
)
self.assertOutput('', cmd_output)
- cmd_output = json.loads(self.openstack(
- 'subnet show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'subnet show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["name"],
@@ -248,10 +254,11 @@ class SubnetTests(common.NetworkTagTests):
)
self.assertOutput('', cmd_output)
- cmd_output = json.loads(self.openstack(
- 'subnet show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'subnet show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
[],
cmd_output["service_types"],
@@ -274,10 +281,11 @@ class SubnetTests(common.NetworkTagTests):
(hex(random.randint(0, 65535))[2:] for _ in range(7))
)) + ":0/112"
try:
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
cmd + ' ' + subnet + ' ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
except Exception:
if (i == 3):
# raise the exception at the last time
@@ -289,7 +297,7 @@ class SubnetTests(common.NetworkTagTests):
return cmd_output
def _create_resource_for_tag_test(self, name, args):
- cmd = ('subnet create -f json --network ' +
+ cmd = ('subnet create --network ' +
self.NETWORK_NAME + ' ' + args +
' --subnet-range')
return self._subnet_create(cmd, name)
diff --git a/openstackclient/tests/functional/network/v2/test_subnet_pool.py b/openstackclient/tests/functional/network/v2/test_subnet_pool.py
index dbcf01e2..8dc5e7a1 100644
--- a/openstackclient/tests/functional/network/v2/test_subnet_pool.py
+++ b/openstackclient/tests/functional/network/v2/test_subnet_pool.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import random
import uuid
@@ -61,10 +60,10 @@ class SubnetPoolTests(common.NetworkTagTests):
def test_subnet_pool_list(self):
"""Test create, list filter"""
- cmd_output = json.loads(self.openstack('token issue -f json'))
+ cmd_output = self.openstack('token issue', parse_output=True)
auth_project_id = cmd_output['project_id']
- cmd_output = json.loads(self.openstack('project list -f json'))
+ cmd_output = self.openstack('project list', parse_output=True)
admin_project_id = None
demo_project_id = None
for p in cmd_output:
@@ -131,37 +130,41 @@ class SubnetPoolTests(common.NetworkTagTests):
)
# Test list --project
- cmd_output = json.loads(self.openstack(
- 'subnet pool list -f json ' +
- '--project ' + demo_project_id
- ))
+ cmd_output = self.openstack(
+ 'subnet pool list ' +
+ '--project ' + demo_project_id,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
# Test list --share
- cmd_output = json.loads(self.openstack(
- 'subnet pool list -f json ' +
- '--share'
- ))
+ cmd_output = self.openstack(
+ 'subnet pool list ' +
+ '--share',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
# Test list --name
- cmd_output = json.loads(self.openstack(
- 'subnet pool list -f json ' +
- '--name ' + name1
- ))
+ cmd_output = self.openstack(
+ 'subnet pool list ' +
+ '--name ' + name1,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
# Test list --long
- cmd_output = json.loads(self.openstack(
- 'subnet pool list -f json ' +
- '--long '
- ))
+ cmd_output = self.openstack(
+ 'subnet pool list ' +
+ '--long ',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
@@ -227,10 +230,11 @@ class SubnetPoolTests(common.NetworkTagTests):
)
self.assertOutput('', cmd_output)
- cmd_output = json.loads(self.openstack(
- 'subnet pool show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'subnet pool show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["name"],
@@ -307,12 +311,13 @@ class SubnetPoolTests(common.NetworkTagTests):
)) + ":0:0/96"
try:
- cmd_output = json.loads(self.openstack(
- 'subnet pool create -f json ' +
+ cmd_output = self.openstack(
+ 'subnet pool create ' +
cmd + ' ' +
'--pool-prefix ' + pool_prefix + ' ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
except Exception:
if (i == 3):
# Raise the exception the last time
diff --git a/openstackclient/tests/functional/volume/base.py b/openstackclient/tests/functional/volume/base.py
index 53032606..041d8d07 100644
--- a/openstackclient/tests/functional/volume/base.py
+++ b/openstackclient/tests/functional/volume/base.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import time
from openstackclient.tests.functional import base
@@ -27,10 +26,12 @@ class BaseVolumeTests(base.TestCase):
failures = ['error']
total_sleep = 0
while total_sleep < wait:
- output = json.loads(cls.openstack(
- check_type + ' show -f json ' + check_name))
+ output = cls.openstack(
+ check_type + ' show ' + check_name,
+ parse_output=True,
+ )
current_status = output['status']
- if (current_status == desired_status):
+ if current_status == desired_status:
print('{} {} now has status {}'
.format(check_type, check_name, current_status))
return
@@ -51,7 +52,7 @@ class BaseVolumeTests(base.TestCase):
total_sleep = 0
name_field = name_field or 'Name'
while total_sleep < wait:
- result = json.loads(cls.openstack(check_type + ' list -f json'))
+ result = cls.openstack(check_type + ' list', parse_output=True)
names = [x[name_field] for x in result]
if check_name not in names:
print('{} {} is now deleted'.format(check_type, check_name))
diff --git a/openstackclient/tests/functional/volume/v1/test_qos.py b/openstackclient/tests/functional/volume/v1/test_qos.py
index d8277dfc..c449938e 100644
--- a/openstackclient/tests/functional/volume/v1/test_qos.py
+++ b/openstackclient/tests/functional/volume/v1/test_qos.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v1 import common
@@ -22,29 +21,32 @@ class QosTests(common.BaseVolumeTests):
def test_volume_qos_create_list(self):
"""Test create, list, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'volume qos create ' +
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name']
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
- name2
- ))
+ cmd_output = self.openstack(
+ 'volume qos create ' +
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name']
)
# Test list
- cmd_output = json.loads(self.openstack(
- 'volume qos list -f json'
- ))
+ cmd_output = self.openstack(
+ 'volume qos list',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
@@ -57,12 +59,13 @@ class QosTests(common.BaseVolumeTests):
"""Tests create volume qos, set, unset, show, delete"""
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
+ cmd_output = self.openstack(
+ 'volume qos create ' +
'--consumer front-end '
'--property Alpha=a ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume qos delete ' + name)
self.assertEqual(
name,
@@ -84,10 +87,11 @@ class QosTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Test volume qos show
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
cmd_output['name']
@@ -105,10 +109,11 @@ class QosTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
cmd_output['name']
diff --git a/openstackclient/tests/functional/volume/v1/test_service.py b/openstackclient/tests/functional/volume/v1/test_service.py
index fee73f18..7de2de55 100644
--- a/openstackclient/tests/functional/volume/v1/test_service.py
+++ b/openstackclient/tests/functional/volume/v1/test_service.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from openstackclient.tests.functional.volume.v1 import common
@@ -19,18 +17,18 @@ class VolumeServiceTests(common.BaseVolumeTests):
"""Functional tests for volume service."""
def test_volume_service_list(self):
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json'))
+ cmd_output = self.openstack('volume service list', parse_output=True)
# Get the nonredundant services and hosts
services = list(set([x['Binary'] for x in cmd_output]))
# Test volume service list --service
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json ' +
+ cmd_output = self.openstack(
+ 'volume service list ' +
'--service ' +
- services[0]
- ))
+ services[0],
+ parse_output=True,
+ )
for x in cmd_output:
self.assertEqual(
services[0],
@@ -43,9 +41,10 @@ class VolumeServiceTests(common.BaseVolumeTests):
def test_volume_service_set(self):
# Get a service and host
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json'
- ))
+ cmd_output = self.openstack(
+ 'volume service list',
+ parse_output=True,
+ )
service_1 = cmd_output[0]['Binary']
host_1 = cmd_output[0]['Host']
@@ -57,9 +56,10 @@ class VolumeServiceTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json --long'
- ))
+ cmd_output = self.openstack(
+ 'volume service list --long',
+ parse_output=True,
+ )
self.assertEqual(
'enabled',
cmd_output[0]['Status']
@@ -77,9 +77,10 @@ class VolumeServiceTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json --long'
- ))
+ cmd_output = self.openstack(
+ 'volume service list --long',
+ parse_output=True,
+ )
self.assertEqual(
'disabled',
cmd_output[0]['Status']
diff --git a/openstackclient/tests/functional/volume/v1/test_snapshot.py b/openstackclient/tests/functional/volume/v1/test_snapshot.py
index 5a76a2e9..c8c956d1 100644
--- a/openstackclient/tests/functional/volume/v1/test_snapshot.py
+++ b/openstackclient/tests/functional/volume/v1/test_snapshot.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v1 import common
@@ -25,11 +24,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
def setUpClass(cls):
super(VolumeSnapshotTests, cls).setUpClass()
# create a volume for all tests to create snapshot
- cmd_output = json.loads(cls.openstack(
- 'volume create -f json ' +
+ cmd_output = cls.openstack(
+ 'volume create ' +
'--size 1 ' +
- cls.VOLLY
- ))
+ cls.VOLLY,
+ parse_output=True,
+ )
cls.wait_for_status('volume', cls.VOLLY, 'available')
cls.VOLUME_ID = cmd_output['id']
@@ -45,22 +45,24 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
def test_volume_snapshot_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name1 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output["display_name"],
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name2 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output["display_name"],
@@ -78,11 +80,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
def test_volume_snapshot_list(self):
"""Test create, list filter"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name1 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.addCleanup(self.wait_for_delete, 'volume snapshot', name1)
self.addCleanup(self.openstack, 'volume snapshot delete ' + name1)
self.assertEqual(
@@ -100,11 +103,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.wait_for_status('volume snapshot', name1, 'available')
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name2 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.addCleanup(self.wait_for_delete, 'volume snapshot', name2)
self.addCleanup(self.openstack, 'volume snapshot delete ' + name2)
self.assertEqual(
@@ -122,29 +126,32 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.wait_for_status('volume snapshot', name2, 'available')
# Test list --long, --status
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
'--long ' +
- '--status error'
- ))
+ '--status error',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertNotIn(name2, names)
# Test list --volume
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
- '--volume ' + self.VOLLY
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
+ '--volume ' + self.VOLLY,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --name
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
- '--name ' + name1
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
+ '--name ' + name1,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
@@ -153,12 +160,13 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
"""Test create, set, unset, show, delete volume snapshot"""
name = uuid.uuid4().hex
new_name = name + "_"
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
'--volume ' + self.VOLLY +
' --description aaaa ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.wait_for_delete, 'volume snapshot', new_name)
self.addCleanup(self.openstack, 'volume snapshot delete ' + new_name)
self.assertEqual(
@@ -187,10 +195,11 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Show snapshot set result
- cmd_output = json.loads(self.openstack(
- 'volume snapshot show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["display_name"],
@@ -216,10 +225,11 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume snapshot show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
{'Beta': 'b'},
cmd_output["properties"],
@@ -232,8 +242,9 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
new_name,
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume snapshot show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual({}, cmd_output["properties"])
diff --git a/openstackclient/tests/functional/volume/v1/test_transfer_request.py b/openstackclient/tests/functional/volume/v1/test_transfer_request.py
index 0399e6cc..0ee73d8a 100644
--- a/openstackclient/tests/functional/volume/v1/test_transfer_request.py
+++ b/openstackclient/tests/functional/volume/v1/test_transfer_request.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v1 import common
@@ -25,8 +24,10 @@ class TransferRequestTests(common.BaseVolumeTests):
@classmethod
def setUpClass(cls):
super(TransferRequestTests, cls).setUpClass()
- cmd_output = json.loads(cls.openstack(
- 'volume create -f json --size 1 ' + cls.VOLUME_NAME))
+ cmd_output = cls.openstack(
+ 'volume create --size 1 ' + cls.VOLUME_NAME,
+ parse_output=True,
+ )
cls.assertOutput(cls.VOLUME_NAME, cmd_output['name'])
cls.wait_for_status("volume", cls.VOLUME_NAME, "available")
@@ -45,26 +46,31 @@ class TransferRequestTests(common.BaseVolumeTests):
name = uuid.uuid4().hex
# create a volume
- cmd_output = json.loads(self.openstack(
- 'volume create -f json --size 1 ' + volume_name))
+ cmd_output = self.openstack(
+ 'volume create --size 1 ' + volume_name,
+ parse_output=True,
+ )
self.assertEqual(volume_name, cmd_output['name'])
# create volume transfer request for the volume
# and get the auth_key of the new transfer request
- cmd_output = json.loads(self.openstack(
- 'volume transfer request create -f json ' +
+ cmd_output = self.openstack(
+ 'volume transfer request create ' +
volume_name +
- ' --name ' + name))
+ ' --name ' + name,
+ parse_output=True,
+ )
auth_key = cmd_output['auth_key']
self.assertTrue(auth_key)
# accept the volume transfer request
- json_output = json.loads(self.openstack(
- 'volume transfer request accept -f json ' +
+ output = self.openstack(
+ 'volume transfer request accept ' +
name + ' ' +
- '--auth-key ' + auth_key
- ))
- self.assertEqual(name, json_output.get('name'))
+ '--auth-key ' + auth_key,
+ parse_output=True,
+ )
+ self.assertEqual(name, output.get('name'))
# the volume transfer will be removed by default after accepted
# so just need to delete the volume here
@@ -74,11 +80,12 @@ class TransferRequestTests(common.BaseVolumeTests):
def test_volume_transfer_request_list_show(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume transfer request create -f json ' +
+ cmd_output = self.openstack(
+ 'volume transfer request create ' +
' --name ' + name + ' ' +
- self.VOLUME_NAME
- ))
+ self.VOLUME_NAME,
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume transfer request delete ' + name
@@ -87,13 +94,15 @@ class TransferRequestTests(common.BaseVolumeTests):
auth_key = cmd_output['auth_key']
self.assertTrue(auth_key)
- cmd_output = json.loads(self.openstack(
- 'volume transfer request list -f json'
- ))
+ cmd_output = self.openstack(
+ 'volume transfer request list',
+ parse_output=True,
+ )
self.assertIn(name, [req['Name'] for req in cmd_output])
- cmd_output = json.loads(self.openstack(
- 'volume transfer request show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume transfer request show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(name, cmd_output['name'])
diff --git a/openstackclient/tests/functional/volume/v1/test_volume.py b/openstackclient/tests/functional/volume/v1/test_volume.py
index 013bc6a4..727ee73b 100644
--- a/openstackclient/tests/functional/volume/v1/test_volume.py
+++ b/openstackclient/tests/functional/volume/v1/test_volume.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v1 import common
@@ -22,22 +21,24 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_create_and_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
1,
cmd_output["size"],
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 2 ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
2,
cmd_output["size"],
@@ -51,11 +52,12 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_list(self):
"""Test create, list filter"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name1)
self.assertEqual(
1,
@@ -64,11 +66,12 @@ class VolumeTests(common.BaseVolumeTests):
self.wait_for_status("volume", name1, "available")
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 2 ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name2)
self.assertEqual(
2,
@@ -77,25 +80,28 @@ class VolumeTests(common.BaseVolumeTests):
self.wait_for_status("volume", name2, "available")
# Test list
- cmd_output = json.loads(self.openstack(
- 'volume list -f json '
- ))
+ cmd_output = self.openstack(
+ 'volume list ',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --long
- cmd_output = json.loads(self.openstack(
- 'volume list -f json --long'
- ))
+ cmd_output = self.openstack(
+ 'volume list --long',
+ parse_output=True,
+ )
bootable = [x["Bootable"] for x in cmd_output]
self.assertIn('false', bootable)
# Test list --name
- cmd_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '--name ' + name1
- ))
+ cmd_output = self.openstack(
+ 'volume list ' +
+ '--name ' + name1,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
@@ -103,13 +109,14 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_set_and_unset(self):
"""Tests create volume, set, unset, show, delete"""
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
'--description aaaa ' +
'--property Alpha=a ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
cmd_output["name"],
@@ -148,10 +155,11 @@ class VolumeTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["name"],
@@ -181,10 +189,11 @@ class VolumeTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
{'Gamma': 'c'},
cmd_output["properties"],
@@ -193,42 +202,46 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_create_and_list_and_show_backward_compatibility(self):
"""Test backward compatibility of create, list, show"""
name1 = uuid.uuid4().hex
- json_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ output = self.openstack(
+ 'volume create ' +
'-c display_name -c id ' +
'--size 1 ' +
- name1
- ))
- self.assertIn('display_name', json_output)
- self.assertEqual(name1, json_output['display_name'])
- self.assertIn('id', json_output)
- volume_id = json_output['id']
+ name1,
+ parse_output=True,
+ )
+ self.assertIn('display_name', output)
+ self.assertEqual(name1, output['display_name'])
+ self.assertIn('id', output)
+ volume_id = output['id']
self.assertIsNotNone(volume_id)
- self.assertNotIn('name', json_output)
+ self.assertNotIn('name', output)
self.addCleanup(self.openstack, 'volume delete ' + volume_id)
self.wait_for_status("volume", name1, "available")
- json_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '-c "Display Name"'
- ))
- for each_volume in json_output:
+ output = self.openstack(
+ 'volume list ' +
+ '-c "Display Name"',
+ parse_output=True,
+ )
+ for each_volume in output:
self.assertIn('Display Name', each_volume)
- json_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '-c "Name"'
- ))
- for each_volume in json_output:
+ output = self.openstack(
+ 'volume list ' +
+ '-c "Name"',
+ parse_output=True,
+ )
+ for each_volume in output:
self.assertIn('Name', each_volume)
- json_output = json.loads(self.openstack(
- 'volume show -f json ' +
+ output = self.openstack(
+ 'volume show ' +
'-c display_name -c id ' +
- name1
- ))
- self.assertIn('display_name', json_output)
- self.assertEqual(name1, json_output['display_name'])
- self.assertIn('id', json_output)
- self.assertNotIn('name', json_output)
+ name1,
+ parse_output=True,
+ )
+ self.assertIn('display_name', output)
+ self.assertEqual(name1, output['display_name'])
+ self.assertIn('id', output)
+ self.assertNotIn('name', output)
diff --git a/openstackclient/tests/functional/volume/v1/test_volume_type.py b/openstackclient/tests/functional/volume/v1/test_volume_type.py
index fb8dabdb..037d45f0 100644
--- a/openstackclient/tests/functional/volume/v1/test_volume_type.py
+++ b/openstackclient/tests/functional/volume/v1/test_volume_type.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import time
import uuid
@@ -22,10 +21,11 @@ class VolumeTypeTests(common.BaseVolumeTests):
def test_volume_type_create_list(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' +
@@ -33,26 +33,29 @@ class VolumeTypeTests(common.BaseVolumeTests):
)
self.assertEqual(name, cmd_output['name'])
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual(self.NAME, cmd_output['name'])
- cmd_output = json.loads(self.openstack('volume type list -f json'))
+ cmd_output = self.openstack('volume type list', parse_output=True)
self.assertIn(self.NAME, [t['Name'] for t in cmd_output])
- cmd_output = json.loads(self.openstack(
- 'volume type list -f json --default'
- ))
+ cmd_output = self.openstack(
+ 'volume type list --default',
+ parse_output=True,
+ )
self.assertEqual(1, len(cmd_output))
self.assertEqual('lvmdriver-1', cmd_output[0]['Name'])
def test_volume_type_set_unset_properties(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name
@@ -63,26 +66,29 @@ class VolumeTypeTests(common.BaseVolumeTests):
'volume type set --property a=b --property c=d %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties'])
raw_output = self.openstack(
'volume type unset --property a %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({'c': 'd'}, cmd_output['properties'])
def test_volume_type_set_unset_multiple_properties(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name
@@ -93,18 +99,20 @@ class VolumeTypeTests(common.BaseVolumeTests):
'volume type set --property a=b --property c=d %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties'])
raw_output = self.openstack(
'volume type unset --property a --property c %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({}, cmd_output['properties'])
def test_multi_delete(self):
@@ -118,20 +126,20 @@ class VolumeTypeTests(common.BaseVolumeTests):
raw_output = self.openstack(cmd)
self.assertOutput('', raw_output)
- # NOTE: Add some basic funtional tests with the old format to
+ # NOTE: Add some basic functional tests with the old format to
# make sure the command works properly, need to change
# these to new test format when beef up all tests for
- # volume tye commands.
+ # volume type commands.
def test_encryption_type(self):
encryption_type = uuid.uuid4().hex
# test create new encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json '
+ cmd_output = self.openstack(
+ 'volume type create '
'--encryption-provider LuksEncryptor '
'--encryption-cipher aes-xts-plain64 '
'--encryption-key-size 128 '
'--encryption-control-location front-end ' +
- encryption_type))
+ encryption_type)
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
@@ -139,8 +147,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
for attr, value in expected.items():
self.assertEqual(value, cmd_output['encryption'][attr])
# test show encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + encryption_type))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + encryption_type,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
@@ -148,8 +158,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
for attr, value in expected.items():
self.assertEqual(value, cmd_output['encryption'][attr])
# test list encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type list -f json --encryption-type'))
+ cmd_output = self.openstack(
+ 'volume type list --encryption-type',
+ parse_output=True,
+ )
encryption_output = [t['Encryption'] for t in cmd_output
if t['Name'] == encryption_type][0]
expected = {'provider': 'LuksEncryptor',
@@ -169,19 +181,20 @@ class VolumeTypeTests(common.BaseVolumeTests):
self.assertEqual('', raw_output)
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
- name,
- ))
+ cmd_output = self.openstack(
+ 'volume type create --private ' + name,
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name,
)
self.assertEqual(name, cmd_output['name'])
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + name
- ))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + name,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
@@ -193,9 +206,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
'volume type unset --encryption-type ' + name
)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + name
- ))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + name,
+ parse_output=True,
+ )
self.assertEqual({}, cmd_output['encryption'])
# test delete encryption type
raw_output = self.openstack('volume type delete ' + encryption_type)
diff --git a/openstackclient/tests/functional/volume/v2/test_qos.py b/openstackclient/tests/functional/volume/v2/test_qos.py
index f9f6e099..0a540573 100644
--- a/openstackclient/tests/functional/volume/v2/test_qos.py
+++ b/openstackclient/tests/functional/volume/v2/test_qos.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v2 import common
@@ -22,29 +21,32 @@ class QosTests(common.BaseVolumeTests):
def test_volume_qos_create_delete_list(self):
"""Test create, list, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'volume qos create ' +
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name']
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
- name2
- ))
+ cmd_output = self.openstack(
+ 'volume qos create ' +
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name']
)
# Test list
- cmd_output = json.loads(self.openstack(
- 'volume qos list -f json'
- ))
+ cmd_output = self.openstack(
+ 'volume qos list',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
@@ -57,12 +59,13 @@ class QosTests(common.BaseVolumeTests):
"""Tests create volume qos, set, unset, show, delete"""
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
+ cmd_output = self.openstack(
+ 'volume qos create ' +
'--consumer front-end '
'--property Alpha=a ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume qos delete ' + name)
self.assertEqual(
name,
@@ -88,10 +91,11 @@ class QosTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Test volume qos show
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
cmd_output['name']
@@ -109,10 +113,11 @@ class QosTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
cmd_output['name']
@@ -125,10 +130,11 @@ class QosTests(common.BaseVolumeTests):
def test_volume_qos_asso_disasso(self):
"""Tests associate and disassociate qos with volume type"""
vol_type1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json ' +
- vol_type1
- ))
+ cmd_output = self.openstack(
+ 'volume type create ' +
+ vol_type1,
+ parse_output=True,
+ )
self.assertEqual(
vol_type1,
cmd_output['name']
@@ -136,10 +142,11 @@ class QosTests(common.BaseVolumeTests):
self.addCleanup(self.openstack, 'volume type delete ' + vol_type1)
vol_type2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json ' +
- vol_type2
- ))
+ cmd_output = self.openstack(
+ 'volume type create ' +
+ vol_type2,
+ parse_output=True,
+ )
self.assertEqual(
vol_type2,
cmd_output['name']
@@ -147,10 +154,11 @@ class QosTests(common.BaseVolumeTests):
self.addCleanup(self.openstack, 'volume type delete ' + vol_type2)
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos create ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
cmd_output['name']
@@ -169,10 +177,11 @@ class QosTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
types = cmd_output["associations"]
self.assertIn(vol_type1, types)
self.assertIn(vol_type2, types)
@@ -184,10 +193,11 @@ class QosTests(common.BaseVolumeTests):
' ' + name
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
types = cmd_output["associations"]
self.assertNotIn(vol_type1, types)
self.assertIn(vol_type2, types)
@@ -198,10 +208,11 @@ class QosTests(common.BaseVolumeTests):
name + ' ' + vol_type1
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
types = cmd_output["associations"]
self.assertIn(vol_type1, types)
self.assertIn(vol_type2, types)
@@ -211,8 +222,9 @@ class QosTests(common.BaseVolumeTests):
'--all ' + name
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
self.assertNotIn("associations", cmd_output.keys())
diff --git a/openstackclient/tests/functional/volume/v2/test_service.py b/openstackclient/tests/functional/volume/v2/test_service.py
index 7ec43fe8..5794f81f 100644
--- a/openstackclient/tests/functional/volume/v2/test_service.py
+++ b/openstackclient/tests/functional/volume/v2/test_service.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from openstackclient.tests.functional.volume.v2 import common
@@ -19,19 +17,19 @@ class VolumeServiceTests(common.BaseVolumeTests):
"""Functional tests for volume service."""
def test_volume_service_list(self):
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json'))
+ cmd_output = self.openstack('volume service list', parse_output=True)
# Get the nonredundant services and hosts
services = list(set([x['Binary'] for x in cmd_output]))
hosts = list(set([x['Host'] for x in cmd_output]))
# Test volume service list --service
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json ' +
+ cmd_output = self.openstack(
+ 'volume service list ' +
'--service ' +
- services[0]
- ))
+ services[0],
+ parse_output=True,
+ )
for x in cmd_output:
self.assertEqual(
services[0],
@@ -39,11 +37,12 @@ class VolumeServiceTests(common.BaseVolumeTests):
)
# Test volume service list --host
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json ' +
+ cmd_output = self.openstack(
+ 'volume service list ' +
'--host ' +
- hosts[0]
- ))
+ hosts[0],
+ parse_output=True,
+ )
for x in cmd_output:
self.assertIn(
hosts[0],
@@ -53,9 +52,10 @@ class VolumeServiceTests(common.BaseVolumeTests):
def test_volume_service_set(self):
# Get a service and host
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json'
- ))
+ cmd_output = self.openstack(
+ 'volume service list',
+ parse_output=True,
+ )
service_1 = cmd_output[0]['Binary']
host_1 = cmd_output[0]['Host']
@@ -67,9 +67,10 @@ class VolumeServiceTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json --long'
- ))
+ cmd_output = self.openstack(
+ 'volume service list --long',
+ parse_output=True,
+ )
self.assertEqual(
'enabled',
cmd_output[0]['Status']
@@ -89,9 +90,10 @@ class VolumeServiceTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume service list -f json --long'
- ))
+ cmd_output = self.openstack(
+ 'volume service list --long',
+ parse_output=True,
+ )
self.assertEqual(
'disabled',
cmd_output[0]['Status']
diff --git a/openstackclient/tests/functional/volume/v2/test_transfer_request.py b/openstackclient/tests/functional/volume/v2/test_transfer_request.py
index 00d0865c..ac71cba2 100644
--- a/openstackclient/tests/functional/volume/v2/test_transfer_request.py
+++ b/openstackclient/tests/functional/volume/v2/test_transfer_request.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v2 import common
@@ -26,11 +25,12 @@ class TransferRequestTests(common.BaseVolumeTests):
xfer_name = uuid.uuid4().hex
# create a volume
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- volume_name
- ))
+ volume_name,
+ parse_output=True,
+ )
self.assertEqual(volume_name, cmd_output['name'])
self.addCleanup(
self.openstack,
@@ -42,12 +42,13 @@ class TransferRequestTests(common.BaseVolumeTests):
# create volume transfer request for the volume
# and get the auth_key of the new transfer request
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request create -f json ' +
+ 'volume transfer request create ' +
' --name ' + xfer_name + ' ' +
- volume_name
- ))
+ volume_name,
+ parse_output=True,
+ )
self.assertEqual(xfer_name, cmd_output['name'])
xfer_id = cmd_output['id']
auth_key = cmd_output['auth_key']
@@ -55,12 +56,13 @@ class TransferRequestTests(common.BaseVolumeTests):
self.wait_for_status("volume", volume_name, "awaiting-transfer")
# accept the volume transfer request
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request accept -f json ' +
+ 'volume transfer request accept ' +
'--auth-key ' + auth_key + ' ' +
- xfer_id
- ))
+ xfer_id,
+ parse_output=True,
+ )
self.assertEqual(xfer_name, cmd_output['name'])
self.wait_for_status("volume", volume_name, "available")
@@ -69,11 +71,12 @@ class TransferRequestTests(common.BaseVolumeTests):
xfer_name = uuid.uuid4().hex
# create a volume
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- volume_name
- ))
+ volume_name,
+ parse_output=True,
+ )
self.assertEqual(volume_name, cmd_output['name'])
self.addCleanup(
self.openstack,
@@ -83,29 +86,32 @@ class TransferRequestTests(common.BaseVolumeTests):
)
self.wait_for_status("volume", volume_name, "available")
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request create -f json ' +
+ 'volume transfer request create ' +
' --name ' + xfer_name + ' ' +
- volume_name
- ))
+ volume_name,
+ parse_output=True,
+ )
self.assertEqual(xfer_name, cmd_output['name'])
xfer_id = cmd_output['id']
auth_key = cmd_output['auth_key']
self.assertTrue(auth_key)
self.wait_for_status("volume", volume_name, "awaiting-transfer")
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request list -f json'
- ))
+ 'volume transfer request list',
+ parse_output=True,
+ )
self.assertIn(xfer_name, [req['Name'] for req in cmd_output])
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request show -f json ' +
- xfer_id
- ))
+ 'volume transfer request show ' +
+ xfer_id,
+ parse_output=True,
+ )
self.assertEqual(xfer_name, cmd_output['name'])
# NOTE(dtroyer): We need to delete the transfer request to allow the
diff --git a/openstackclient/tests/functional/volume/v2/test_volume.py b/openstackclient/tests/functional/volume/v2/test_volume.py
index 19fd5895..832dabe6 100644
--- a/openstackclient/tests/functional/volume/v2/test_volume.py
+++ b/openstackclient/tests/functional/volume/v2/test_volume.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v2 import common
@@ -22,22 +21,24 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
1,
cmd_output["size"],
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 2 ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
2,
cmd_output["size"],
@@ -51,11 +52,12 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_list(self):
"""Test create, list filter"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name1)
self.assertEqual(
1,
@@ -64,11 +66,12 @@ class VolumeTests(common.BaseVolumeTests):
self.wait_for_status("volume", name1, "available")
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 2 ' +
- name2
- ))
+ name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name2)
self.assertEqual(
2,
@@ -83,19 +86,21 @@ class VolumeTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Test list --long
- cmd_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '--long'
- ))
+ cmd_output = self.openstack(
+ 'volume list ' +
+ '--long',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --status
- cmd_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '--status error'
- ))
+ cmd_output = self.openstack(
+ 'volume list ' +
+ '--status error',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
@@ -107,13 +112,14 @@ class VolumeTests(common.BaseVolumeTests):
"""Tests create volume, set, unset, show, delete"""
name = uuid.uuid4().hex
new_name = name + "_"
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
'--description aaaa ' +
'--property Alpha=a ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + new_name)
self.assertEqual(
name,
@@ -153,10 +159,11 @@ class VolumeTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["name"],
@@ -191,10 +198,11 @@ class VolumeTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
{'Gamma': 'c'},
cmd_output["properties"],
@@ -210,30 +218,33 @@ class VolumeTests(common.BaseVolumeTests):
volume_name = uuid.uuid4().hex
snapshot_name = uuid.uuid4().hex
# Make a snapshot
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- volume_name
- ))
+ volume_name,
+ parse_output=True,
+ )
self.wait_for_status("volume", volume_name, "available")
self.assertEqual(
volume_name,
cmd_output["name"],
)
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
snapshot_name +
- ' --volume ' + volume_name
- ))
+ ' --volume ' + volume_name,
+ parse_output=True,
+ )
self.wait_for_status("volume snapshot", snapshot_name, "available")
name = uuid.uuid4().hex
# Create volume from snapshot
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--snapshot ' + snapshot_name +
- ' ' + name
- ))
+ ' ' + name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name)
self.addCleanup(self.openstack, 'volume delete ' + volume_name)
self.assertEqual(
@@ -253,11 +264,12 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_list_backward_compatibility(self):
"""Test backward compatibility of list command"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- name1
- ))
+ name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name1)
self.assertEqual(
1,
@@ -266,17 +278,19 @@ class VolumeTests(common.BaseVolumeTests):
self.wait_for_status("volume", name1, "available")
# Test list -c "Display Name"
- cmd_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '-c "Display Name"'
- ))
+ cmd_output = self.openstack(
+ 'volume list ' +
+ '-c "Display Name"',
+ parse_output=True,
+ )
for each_volume in cmd_output:
self.assertIn('Display Name', each_volume)
# Test list -c "Name"
- cmd_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '-c "Name"'
- ))
+ cmd_output = self.openstack(
+ 'volume list ' +
+ '-c "Name"',
+ parse_output=True,
+ )
for each_volume in cmd_output:
self.assertIn('Name', each_volume)
diff --git a/openstackclient/tests/functional/volume/v2/test_volume_backup.py b/openstackclient/tests/functional/volume/v2/test_volume_backup.py
index 6868bd40..07bd2d16 100644
--- a/openstackclient/tests/functional/volume/v2/test_volume_backup.py
+++ b/openstackclient/tests/functional/volume/v2/test_volume_backup.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v2 import common
@@ -22,7 +21,7 @@ class VolumeBackupTests(common.BaseVolumeTests):
def setUp(self):
super(VolumeBackupTests, self).setUp()
self.backup_enabled = False
- serv_list = json.loads(self.openstack('volume service list -f json'))
+ serv_list = self.openstack('volume service list', parse_output=True)
for service in serv_list:
if service['Binary'] == 'cinder-backup':
if service['Status'] == 'enabled':
@@ -34,24 +33,28 @@ class VolumeBackupTests(common.BaseVolumeTests):
self.skipTest('Backup service is not enabled')
vol_id = uuid.uuid4().hex
# create a volume
- json.loads(self.openstack(
- 'volume create -f json ' +
+ self.openstack(
+ 'volume create ' +
'--size 1 ' +
- vol_id
- ))
+ vol_id,
+ parse_output=True,
+ )
self.wait_for_status("volume", vol_id, "available")
# create a backup
- backup = json.loads(self.openstack(
- 'volume backup create -f json ' +
- vol_id
- ))
+ backup = self.openstack(
+ 'volume backup create ' +
+ vol_id,
+ parse_output=True,
+ )
self.wait_for_status("volume backup", backup['id'], "available")
# restore the backup
- backup_restored = json.loads(self.openstack(
- 'volume backup restore -f json %s %s'
- % (backup['id'], vol_id)))
+ backup_restored = self.openstack(
+ 'volume backup restore %s %s'
+ % (backup['id'], vol_id),
+ parse_output=True,
+ )
self.assertEqual(backup_restored['backup_id'], backup['id'])
self.wait_for_status("volume backup", backup['id'], "available")
self.wait_for_status("volume", backup_restored['volume_id'],
diff --git a/openstackclient/tests/functional/volume/v2/test_volume_snapshot.py b/openstackclient/tests/functional/volume/v2/test_volume_snapshot.py
index 4977a73e..12fdad2c 100644
--- a/openstackclient/tests/functional/volume/v2/test_volume_snapshot.py
+++ b/openstackclient/tests/functional/volume/v2/test_volume_snapshot.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v2 import common
@@ -25,11 +24,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
def setUpClass(cls):
super(VolumeSnapshotTests, cls).setUpClass()
# create a volume for all tests to create snapshot
- cmd_output = json.loads(cls.openstack(
- 'volume create -f json ' +
+ cmd_output = cls.openstack(
+ 'volume create ' +
'--size 1 ' +
- cls.VOLLY
- ))
+ cls.VOLLY,
+ parse_output=True,
+ )
cls.wait_for_status('volume', cls.VOLLY, 'available')
cls.VOLUME_ID = cmd_output['id']
@@ -46,22 +46,24 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
def test_volume_snapshot_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name1 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output["name"],
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name2 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output["name"],
@@ -79,11 +81,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
def test_volume_snapshot_list(self):
"""Test create, list filter"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name1 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.addCleanup(self.wait_for_delete, 'volume snapshot', name1)
self.addCleanup(self.openstack, 'volume snapshot delete ' + name1)
self.assertEqual(
@@ -101,11 +104,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.wait_for_status('volume snapshot', name1, 'available')
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name2 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.addCleanup(self.wait_for_delete, 'volume snapshot', name2)
self.addCleanup(self.openstack, 'volume snapshot delete ' + name2)
self.assertEqual(
@@ -130,11 +134,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Test list --long, --status
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
'--long ' +
- '--status error_deleting'
- ))
+ '--status error_deleting',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
@@ -147,29 +152,32 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Test list --long, --status
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
'--long ' +
- '--status error'
- ))
+ '--status error',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
# Test list --volume
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
- '--volume ' + self.VOLLY
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
+ '--volume ' + self.VOLLY,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --name
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
- '--name ' + name1
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
+ '--name ' + name1,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
@@ -178,13 +186,14 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
"""Test create, set, unset, show, delete volume snapshot"""
name = uuid.uuid4().hex
new_name = name + "_"
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
'--volume ' + self.VOLLY +
' --description aaaa ' +
'--property Alpha=a ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.wait_for_delete, 'volume snapshot', new_name)
self.addCleanup(self.openstack, 'volume snapshot delete ' + new_name)
self.assertEqual(
@@ -217,10 +226,11 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Show snapshot set result
- cmd_output = json.loads(self.openstack(
- 'volume snapshot show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["name"],
@@ -246,10 +256,11 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume snapshot show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
{'Beta': 'b'},
cmd_output["properties"],
@@ -262,10 +273,11 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
new_name,
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume snapshot show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertNotIn(
{'Beta': 'b'},
cmd_output["properties"],
diff --git a/openstackclient/tests/functional/volume/v2/test_volume_type.py b/openstackclient/tests/functional/volume/v2/test_volume_type.py
index 3f1a6ea8..5cad9297 100644
--- a/openstackclient/tests/functional/volume/v2/test_volume_type.py
+++ b/openstackclient/tests/functional/volume/v2/test_volume_type.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import time
import uuid
@@ -22,36 +21,40 @@ class VolumeTypeTests(common.BaseVolumeTests):
def test_volume_type_create_list(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name,
)
self.assertEqual(name, cmd_output['name'])
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual(name, cmd_output['name'])
- cmd_output = json.loads(self.openstack('volume type list -f json'))
+ cmd_output = self.openstack('volume type list', parse_output=True)
self.assertIn(name, [t['Name'] for t in cmd_output])
- cmd_output = json.loads(self.openstack(
- 'volume type list -f json --default'
- ))
+ cmd_output = self.openstack(
+ 'volume type list --default',
+ parse_output=True,
+ )
self.assertEqual(1, len(cmd_output))
self.assertEqual('lvmdriver-1', cmd_output[0]['Name'])
def test_volume_type_set_unset_properties(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name
@@ -62,26 +65,29 @@ class VolumeTypeTests(common.BaseVolumeTests):
'volume type set --property a=b --property c=d %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties'])
raw_output = self.openstack(
'volume type unset --property a %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({'c': 'd'}, cmd_output['properties'])
def test_volume_type_set_unset_multiple_properties(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name
@@ -92,26 +98,29 @@ class VolumeTypeTests(common.BaseVolumeTests):
'volume type set --property a=b --property c=d %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties'])
raw_output = self.openstack(
'volume type unset --property a --property c %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({}, cmd_output['properties'])
def test_volume_type_set_unset_project(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name
@@ -139,21 +148,23 @@ class VolumeTypeTests(common.BaseVolumeTests):
raw_output = self.openstack(cmd)
self.assertOutput('', raw_output)
- # NOTE: Add some basic funtional tests with the old format to
+ # NOTE: Add some basic functional tests with the old format to
# make sure the command works properly, need to change
# these to new test format when beef up all tests for
- # volume tye commands.
+ # volume type commands.
def test_encryption_type(self):
name = uuid.uuid4().hex
encryption_type = uuid.uuid4().hex
# test create new encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json '
+ cmd_output = self.openstack(
+ 'volume type create '
'--encryption-provider LuksEncryptor '
'--encryption-cipher aes-xts-plain64 '
'--encryption-key-size 128 '
'--encryption-control-location front-end ' +
- encryption_type))
+ encryption_type,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
@@ -161,8 +172,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
for attr, value in expected.items():
self.assertEqual(value, cmd_output['encryption'][attr])
# test show encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + encryption_type))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + encryption_type,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
@@ -170,8 +183,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
for attr, value in expected.items():
self.assertEqual(value, cmd_output['encryption'][attr])
# test list encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type list -f json --encryption-type'))
+ cmd_output = self.openstack(
+ 'volume type list --encryption-type',
+ parse_output=True,
+ )
encryption_output = [t['Encryption'] for t in cmd_output
if t['Name'] == encryption_type][0]
expected = {'provider': 'LuksEncryptor',
@@ -187,8 +202,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
'--encryption-control-location back-end ' +
encryption_type)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + encryption_type))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + encryption_type,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 256,
@@ -196,10 +213,11 @@ class VolumeTypeTests(common.BaseVolumeTests):
for attr, value in expected.items():
self.assertEqual(value, cmd_output['encryption'][attr])
# test set new encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name,
@@ -215,9 +233,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
name)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + name
- ))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + name,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
@@ -229,9 +248,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
'volume type unset --encryption-type ' + name
)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + name
- ))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + name,
+ parse_output=True,
+ )
self.assertEqual({}, cmd_output['encryption'])
# test delete encryption type
raw_output = self.openstack('volume type delete ' + encryption_type)
diff --git a/openstackclient/tests/functional/volume/v3/test_qos.py b/openstackclient/tests/functional/volume/v3/test_qos.py
index fdfa6827..51578e14 100644
--- a/openstackclient/tests/functional/volume/v3/test_qos.py
+++ b/openstackclient/tests/functional/volume/v3/test_qos.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v3 import common
@@ -22,29 +21,32 @@ class QosTests(common.BaseVolumeTests):
def test_volume_qos_create_delete_list(self):
"""Test create, list, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'volume qos create ' +
+ name1,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output['name']
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
- name2
- ))
+ cmd_output = self.openstack(
+ 'volume qos create ' +
+ name2,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output['name']
)
# Test list
- cmd_output = json.loads(self.openstack(
- 'volume qos list -f json'
- ))
+ cmd_output = self.openstack(
+ 'volume qos list',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
@@ -57,12 +59,13 @@ class QosTests(common.BaseVolumeTests):
"""Tests create volume qos, set, unset, show, delete"""
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
+ cmd_output = self.openstack(
+ 'volume qos create ' +
'--consumer front-end '
'--property Alpha=a ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume qos delete ' + name)
self.assertEqual(
name,
@@ -88,10 +91,11 @@ class QosTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Test volume qos show
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
cmd_output['name']
@@ -109,10 +113,11 @@ class QosTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
cmd_output['name']
@@ -125,10 +130,11 @@ class QosTests(common.BaseVolumeTests):
def test_volume_qos_asso_disasso(self):
"""Tests associate and disassociate qos with volume type"""
vol_type1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json ' +
- vol_type1
- ))
+ cmd_output = self.openstack(
+ 'volume type create ' +
+ vol_type1,
+ parse_output=True,
+ )
self.assertEqual(
vol_type1,
cmd_output['name']
@@ -136,10 +142,11 @@ class QosTests(common.BaseVolumeTests):
self.addCleanup(self.openstack, 'volume type delete ' + vol_type1)
vol_type2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json ' +
- vol_type2
- ))
+ cmd_output = self.openstack(
+ 'volume type create ' +
+ vol_type2,
+ parse_output=True,
+ )
self.assertEqual(
vol_type2,
cmd_output['name']
@@ -147,10 +154,11 @@ class QosTests(common.BaseVolumeTests):
self.addCleanup(self.openstack, 'volume type delete ' + vol_type2)
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume qos create -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos create ' +
+ name,
+ parse_output=True,
+ )
self.assertEqual(
name,
cmd_output['name']
@@ -169,10 +177,11 @@ class QosTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
types = cmd_output["associations"]
self.assertIn(vol_type1, types)
self.assertIn(vol_type2, types)
@@ -184,10 +193,11 @@ class QosTests(common.BaseVolumeTests):
' ' + name
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
types = cmd_output["associations"]
self.assertNotIn(vol_type1, types)
self.assertIn(vol_type2, types)
@@ -198,10 +208,11 @@ class QosTests(common.BaseVolumeTests):
name + ' ' + vol_type1
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
types = cmd_output["associations"]
self.assertIn(vol_type1, types)
self.assertIn(vol_type2, types)
@@ -211,8 +222,9 @@ class QosTests(common.BaseVolumeTests):
'--all ' + name
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume qos show -f json ' +
- name
- ))
+ cmd_output = self.openstack(
+ 'volume qos show ' +
+ name,
+ parse_output=True,
+ )
self.assertNotIn("associations", cmd_output.keys())
diff --git a/openstackclient/tests/functional/volume/v3/test_transfer_request.py b/openstackclient/tests/functional/volume/v3/test_transfer_request.py
index 1bbfedc9..449fa08e 100644
--- a/openstackclient/tests/functional/volume/v3/test_transfer_request.py
+++ b/openstackclient/tests/functional/volume/v3/test_transfer_request.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v3 import common
@@ -26,11 +25,12 @@ class TransferRequestTests(common.BaseVolumeTests):
xfer_name = uuid.uuid4().hex
# create a volume
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
- volume_name
- ))
+ volume_name,
+ parse_output=True,
+ )
self.assertEqual(volume_name, cmd_output['name'])
self.addCleanup(
self.openstack,
@@ -42,12 +42,12 @@ class TransferRequestTests(common.BaseVolumeTests):
# create volume transfer request for the volume
# and get the auth_key of the new transfer request
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request create -f json ' +
- ' --name ' + xfer_name + ' ' +
- volume_name
- ))
+ 'volume transfer request create ' +
+ ' --name ' + xfer_name + ' ' + volume_name,
+ parse_output=True,
+ )
self.assertEqual(xfer_name, cmd_output['name'])
xfer_id = cmd_output['id']
auth_key = cmd_output['auth_key']
@@ -55,12 +55,12 @@ class TransferRequestTests(common.BaseVolumeTests):
self.wait_for_status("volume", volume_name, "awaiting-transfer")
# accept the volume transfer request
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request accept -f json ' +
- '--auth-key ' + auth_key + ' ' +
- xfer_id
- ))
+ 'volume transfer request accept ' +
+ '--auth-key ' + auth_key + ' ' + xfer_id,
+ parse_output=True,
+ )
self.assertEqual(xfer_name, cmd_output['name'])
self.wait_for_status("volume", volume_name, "available")
@@ -69,11 +69,11 @@ class TransferRequestTests(common.BaseVolumeTests):
xfer_name = uuid.uuid4().hex
# create a volume
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
- '--size 1 ' +
- volume_name
- ))
+ cmd_output = self.openstack(
+ 'volume create ' +
+ '--size 1 ' + volume_name,
+ parse_output=True,
+ )
self.assertEqual(volume_name, cmd_output['name'])
self.addCleanup(
self.openstack,
@@ -83,29 +83,31 @@ class TransferRequestTests(common.BaseVolumeTests):
)
self.wait_for_status("volume", volume_name, "available")
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request create -f json ' +
- ' --name ' + xfer_name + ' ' +
- volume_name
- ))
+ 'volume transfer request create ' +
+ ' --name ' + xfer_name + ' ' + volume_name,
+ parse_output=True,
+ )
self.assertEqual(xfer_name, cmd_output['name'])
xfer_id = cmd_output['id']
auth_key = cmd_output['auth_key']
self.assertTrue(auth_key)
self.wait_for_status("volume", volume_name, "awaiting-transfer")
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request list -f json'
- ))
+ 'volume transfer request list',
+ parse_output=True,
+ )
self.assertIn(xfer_name, [req['Name'] for req in cmd_output])
- cmd_output = json.loads(self.openstack(
+ cmd_output = self.openstack(
'--os-volume-api-version ' + self.API_VERSION + ' ' +
- 'volume transfer request show -f json ' +
- xfer_id
- ))
+ 'volume transfer request show ' +
+ xfer_id,
+ parse_output=True,
+ )
self.assertEqual(xfer_name, cmd_output['name'])
# NOTE(dtroyer): We need to delete the transfer request to allow the
diff --git a/openstackclient/tests/functional/volume/v3/test_volume.py b/openstackclient/tests/functional/volume/v3/test_volume.py
index c1b45e2f..8a394e75 100644
--- a/openstackclient/tests/functional/volume/v3/test_volume.py
+++ b/openstackclient/tests/functional/volume/v3/test_volume.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v3 import common
@@ -22,22 +21,20 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
- '--size 1 ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'volume create --size 1 ' + name1,
+ parse_output=True,
+ )
self.assertEqual(
1,
cmd_output["size"],
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
- '--size 2 ' +
- name2
- ))
+ cmd_output = self.openstack(
+ 'volume create --size 2 ' + name2,
+ parse_output=True,
+ )
self.assertEqual(
2,
cmd_output["size"],
@@ -51,11 +48,10 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_list(self):
"""Test create, list filter"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
- '--size 1 ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'volume create --size 1 ' + name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name1)
self.assertEqual(
1,
@@ -64,11 +60,10 @@ class VolumeTests(common.BaseVolumeTests):
self.wait_for_status("volume", name1, "available")
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
- '--size 2 ' +
- name2
- ))
+ cmd_output = self.openstack(
+ 'volume create --size 2 ' + name2,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name2)
self.assertEqual(
2,
@@ -83,19 +78,19 @@ class VolumeTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Test list --long
- cmd_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '--long'
- ))
+ cmd_output = self.openstack(
+ 'volume list --long',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --status
- cmd_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '--status error'
- ))
+ cmd_output = self.openstack(
+ 'volume list --status error',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
@@ -107,13 +102,14 @@ class VolumeTests(common.BaseVolumeTests):
"""Tests create volume, set, unset, show, delete"""
name = uuid.uuid4().hex
new_name = name + "_"
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--size 1 ' +
'--description aaaa ' +
'--property Alpha=a ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + new_name)
self.assertEqual(
name,
@@ -154,10 +150,10 @@ class VolumeTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
self.wait_for_status("volume", new_name, "available")
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume show ' + new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["name"],
@@ -192,10 +188,10 @@ class VolumeTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume show ' + new_name,
+ parse_output=True,
+ )
self.assertEqual(
{'Gamma': 'c'},
cmd_output["properties"],
@@ -211,30 +207,31 @@ class VolumeTests(common.BaseVolumeTests):
volume_name = uuid.uuid4().hex
snapshot_name = uuid.uuid4().hex
# Make a snapshot
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
- '--size 1 ' +
- volume_name
- ))
+ cmd_output = self.openstack(
+ 'volume create --size 1 ' + volume_name,
+ parse_output=True,
+ )
self.wait_for_status("volume", volume_name, "available")
self.assertEqual(
volume_name,
cmd_output["name"],
)
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
snapshot_name +
- ' --volume ' + volume_name
- ))
+ ' --volume ' + volume_name,
+ parse_output=True,
+ )
self.wait_for_status("volume snapshot", snapshot_name, "available")
name = uuid.uuid4().hex
# Create volume from snapshot
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
+ cmd_output = self.openstack(
+ 'volume create ' +
'--snapshot ' + snapshot_name +
- ' ' + name
- ))
+ ' ' + name,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name)
self.addCleanup(self.openstack, 'volume delete ' + volume_name)
self.assertEqual(
@@ -254,11 +251,10 @@ class VolumeTests(common.BaseVolumeTests):
def test_volume_list_backward_compatibility(self):
"""Test backward compatibility of list command"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume create -f json ' +
- '--size 1 ' +
- name1
- ))
+ cmd_output = self.openstack(
+ 'volume create --size 1 ' + name1,
+ parse_output=True,
+ )
self.addCleanup(self.openstack, 'volume delete ' + name1)
self.assertEqual(
1,
@@ -267,17 +263,17 @@ class VolumeTests(common.BaseVolumeTests):
self.wait_for_status("volume", name1, "available")
# Test list -c "Display Name"
- cmd_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '-c "Display Name"'
- ))
+ cmd_output = self.openstack(
+ 'volume list -c "Display Name"',
+ parse_output=True,
+ )
for each_volume in cmd_output:
self.assertIn('Display Name', each_volume)
# Test list -c "Name"
- cmd_output = json.loads(self.openstack(
- 'volume list -f json ' +
- '-c "Name"'
- ))
+ cmd_output = self.openstack(
+ 'volume list -c "Name"',
+ parse_output=True,
+ )
for each_volume in cmd_output:
self.assertIn('Name', each_volume)
diff --git a/openstackclient/tests/functional/volume/v3/test_volume_snapshot.py b/openstackclient/tests/functional/volume/v3/test_volume_snapshot.py
index edfdafb6..7b2d88d0 100644
--- a/openstackclient/tests/functional/volume/v3/test_volume_snapshot.py
+++ b/openstackclient/tests/functional/volume/v3/test_volume_snapshot.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import uuid
from openstackclient.tests.functional.volume.v3 import common
@@ -25,11 +24,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
def setUpClass(cls):
super(VolumeSnapshotTests, cls).setUpClass()
# create a volume for all tests to create snapshot
- cmd_output = json.loads(cls.openstack(
- 'volume create -f json ' +
+ cmd_output = cls.openstack(
+ 'volume create ' +
'--size 1 ' +
- cls.VOLLY
- ))
+ cls.VOLLY,
+ parse_output=True,
+ )
cls.wait_for_status('volume', cls.VOLLY, 'available')
cls.VOLUME_ID = cmd_output['id']
@@ -46,22 +46,24 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
def test_volume_snapshot_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name1 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.assertEqual(
name1,
cmd_output["name"],
)
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name2 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.assertEqual(
name2,
cmd_output["name"],
@@ -79,11 +81,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
def test_volume_snapshot_list(self):
"""Test create, list filter"""
name1 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name1 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.addCleanup(self.wait_for_delete, 'volume snapshot', name1)
self.addCleanup(self.openstack, 'volume snapshot delete ' + name1)
self.assertEqual(
@@ -101,11 +104,12 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.wait_for_status('volume snapshot', name1, 'available')
name2 = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
name2 +
- ' --volume ' + self.VOLLY
- ))
+ ' --volume ' + self.VOLLY,
+ parse_output=True,
+ )
self.addCleanup(self.wait_for_delete, 'volume snapshot', name2)
self.addCleanup(self.openstack, 'volume snapshot delete ' + name2)
self.assertEqual(
@@ -129,29 +133,32 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Test list --long, --status
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
'--long ' +
- '--status error'
- ))
+ '--status error',
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertNotIn(name1, names)
self.assertIn(name2, names)
# Test list --volume
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
- '--volume ' + self.VOLLY
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
+ '--volume ' + self.VOLLY,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertIn(name2, names)
# Test list --name
- cmd_output = json.loads(self.openstack(
- 'volume snapshot list -f json ' +
- '--name ' + name1
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot list ' +
+ '--name ' + name1,
+ parse_output=True,
+ )
names = [x["Name"] for x in cmd_output]
self.assertIn(name1, names)
self.assertNotIn(name2, names)
@@ -160,13 +167,14 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
"""Test create, set, unset, show, delete volume snapshot"""
name = uuid.uuid4().hex
new_name = name + "_"
- cmd_output = json.loads(self.openstack(
- 'volume snapshot create -f json ' +
+ cmd_output = self.openstack(
+ 'volume snapshot create ' +
'--volume ' + self.VOLLY +
' --description aaaa ' +
'--property Alpha=a ' +
- name
- ))
+ name,
+ parse_output=True,
+ )
self.addCleanup(self.wait_for_delete, 'volume snapshot', new_name)
self.addCleanup(self.openstack, 'volume snapshot delete ' + new_name)
self.assertEqual(
@@ -199,10 +207,11 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
self.assertOutput('', raw_output)
# Show snapshot set result
- cmd_output = json.loads(self.openstack(
- 'volume snapshot show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
new_name,
cmd_output["name"],
@@ -228,10 +237,11 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume snapshot show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertEqual(
{'Beta': 'b'},
cmd_output["properties"],
@@ -244,10 +254,11 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
new_name,
)
self.assertOutput('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume snapshot show -f json ' +
- new_name
- ))
+ cmd_output = self.openstack(
+ 'volume snapshot show ' +
+ new_name,
+ parse_output=True,
+ )
self.assertNotIn(
{'Beta': 'b'},
cmd_output["properties"],
diff --git a/openstackclient/tests/functional/volume/v3/test_volume_type.py b/openstackclient/tests/functional/volume/v3/test_volume_type.py
index 79d40969..18e46c52 100644
--- a/openstackclient/tests/functional/volume/v3/test_volume_type.py
+++ b/openstackclient/tests/functional/volume/v3/test_volume_type.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import time
import uuid
@@ -22,36 +21,40 @@ class VolumeTypeTests(common.BaseVolumeTests):
def test_volume_type_create_list(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name,
)
self.assertEqual(name, cmd_output['name'])
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual(name, cmd_output['name'])
- cmd_output = json.loads(self.openstack('volume type list -f json'))
+ cmd_output = self.openstack('volume type list', parse_output=True)
self.assertIn(name, [t['Name'] for t in cmd_output])
- cmd_output = json.loads(self.openstack(
- 'volume type list -f json --default'
- ))
+ cmd_output = self.openstack(
+ 'volume type list --default',
+ parse_output=True,
+ )
self.assertEqual(1, len(cmd_output))
self.assertEqual('lvmdriver-1', cmd_output[0]['Name'])
def test_volume_type_set_unset_properties(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name
@@ -62,26 +65,29 @@ class VolumeTypeTests(common.BaseVolumeTests):
'volume type set --property a=b --property c=d %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties'])
raw_output = self.openstack(
'volume type unset --property a %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({'c': 'd'}, cmd_output['properties'])
def test_volume_type_set_unset_multiple_properties(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name
@@ -92,26 +98,29 @@ class VolumeTypeTests(common.BaseVolumeTests):
'volume type set --property a=b --property c=d %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({'a': 'b', 'c': 'd'}, cmd_output['properties'])
raw_output = self.openstack(
'volume type unset --property a --property c %s' % name
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % name
- ))
+ cmd_output = self.openstack(
+ 'volume type show %s' % name,
+ parse_output=True,
+ )
self.assertEqual({}, cmd_output['properties'])
def test_volume_type_set_unset_project(self):
name = uuid.uuid4().hex
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name
@@ -139,21 +148,23 @@ class VolumeTypeTests(common.BaseVolumeTests):
raw_output = self.openstack(cmd)
self.assertOutput('', raw_output)
- # NOTE: Add some basic funtional tests with the old format to
+ # NOTE: Add some basic functional tests with the old format to
# make sure the command works properly, need to change
# these to new test format when beef up all tests for
- # volume tye commands.
+ # volume type commands.
def test_encryption_type(self):
name = uuid.uuid4().hex
encryption_type = uuid.uuid4().hex
# test create new encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json '
+ cmd_output = self.openstack(
+ 'volume type create '
'--encryption-provider LuksEncryptor '
'--encryption-cipher aes-xts-plain64 '
'--encryption-key-size 128 '
'--encryption-control-location front-end ' +
- encryption_type))
+ encryption_type,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
@@ -161,8 +172,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
for attr, value in expected.items():
self.assertEqual(value, cmd_output['encryption'][attr])
# test show encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + encryption_type))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + encryption_type,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
@@ -170,8 +183,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
for attr, value in expected.items():
self.assertEqual(value, cmd_output['encryption'][attr])
# test list encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type list -f json --encryption-type'))
+ cmd_output = self.openstack(
+ 'volume type list --encryption-type',
+ parse_output=True,
+ )
encryption_output = [t['Encryption'] for t in cmd_output
if t['Name'] == encryption_type][0]
expected = {'provider': 'LuksEncryptor',
@@ -187,8 +202,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
'--encryption-control-location back-end ' +
encryption_type)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + encryption_type))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + encryption_type,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 256,
@@ -196,10 +213,11 @@ class VolumeTypeTests(common.BaseVolumeTests):
for attr, value in expected.items():
self.assertEqual(value, cmd_output['encryption'][attr])
# test set new encryption type
- cmd_output = json.loads(self.openstack(
- 'volume type create -f json --private ' +
+ cmd_output = self.openstack(
+ 'volume type create --private ' +
name,
- ))
+ parse_output=True,
+ )
self.addCleanup(
self.openstack,
'volume type delete ' + name,
@@ -215,9 +233,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
name)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + name
- ))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + name,
+ parse_output=True,
+ )
expected = {'provider': 'LuksEncryptor',
'cipher': 'aes-xts-plain64',
'key_size': 128,
@@ -229,9 +248,10 @@ class VolumeTypeTests(common.BaseVolumeTests):
'volume type unset --encryption-type ' + name
)
self.assertEqual('', raw_output)
- cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + name
- ))
+ cmd_output = self.openstack(
+ 'volume type show --encryption-type ' + name,
+ parse_output=True,
+ )
self.assertEqual({}, cmd_output['encryption'])
# test delete encryption type
raw_output = self.openstack('volume type delete ' + encryption_type)
diff --git a/openstackclient/tests/unit/common/test_availability_zone.py b/openstackclient/tests/unit/common/test_availability_zone.py
index e5348ec3..096038ca 100644
--- a/openstackclient/tests/unit/common/test_availability_zone.py
+++ b/openstackclient/tests/unit/common/test_availability_zone.py
@@ -78,7 +78,7 @@ def _build_network_az_datalist(network_az, long_datalist=False):
class TestAvailabilityZone(utils.TestCommand):
def setUp(self):
- super(TestAvailabilityZone, self).setUp()
+ super().setUp()
compute_client = compute_fakes.FakeComputev2Client(
endpoint=fakes.AUTH_URL,
@@ -113,8 +113,7 @@ class TestAvailabilityZoneList(TestAvailabilityZone):
compute_azs = \
compute_fakes.FakeAvailabilityZone.create_availability_zones()
- volume_azs = \
- volume_fakes.FakeAvailabilityZone.create_availability_zones(count=1)
+ volume_azs = volume_fakes.create_availability_zones(count=1)
network_azs = network_fakes.create_availability_zones()
short_columnslist = ('Zone Name', 'Zone Status')
@@ -128,7 +127,7 @@ class TestAvailabilityZoneList(TestAvailabilityZone):
)
def setUp(self):
- super(TestAvailabilityZoneList, self).setUp()
+ super().setUp()
self.compute_azs_mock.list.return_value = self.compute_azs
self.volume_azs_mock.list.return_value = self.volume_azs
diff --git a/openstackclient/tests/unit/common/test_configuration.py b/openstackclient/tests/unit/common/test_configuration.py
index bdd3debf..148228ec 100644
--- a/openstackclient/tests/unit/common/test_configuration.py
+++ b/openstackclient/tests/unit/common/test_configuration.py
@@ -35,11 +35,14 @@ class TestConfiguration(utils.TestCommand):
fakes.REGION_NAME,
)
- opts = [mock.Mock(secret=True, dest="password"),
- mock.Mock(secret=True, dest="token")]
+ opts = [
+ mock.Mock(secret=True, dest="password"),
+ mock.Mock(secret=True, dest="token"),
+ ]
- @mock.patch("keystoneauth1.loading.base.get_plugin_options",
- return_value=opts)
+ @mock.patch(
+ "keystoneauth1.loading.base.get_plugin_options", return_value=opts
+ )
def test_show(self, m_get_plugin_opts):
arglist = []
verifylist = [('mask', True)]
@@ -51,12 +54,14 @@ class TestConfiguration(utils.TestCommand):
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
- @mock.patch("keystoneauth1.loading.base.get_plugin_options",
- return_value=opts)
+ @mock.patch(
+ "keystoneauth1.loading.base.get_plugin_options", return_value=opts
+ )
def test_show_unmask(self, m_get_plugin_opts):
arglist = ['--unmask']
verifylist = [('mask', False)]
cmd = configuration.ShowConfiguration(self.app, None)
+
parsed_args = self.check_parser(cmd, arglist, verifylist)
columns, data = cmd.take_action(parsed_args)
@@ -71,15 +76,49 @@ class TestConfiguration(utils.TestCommand):
)
self.assertEqual(datalist, data)
- @mock.patch("keystoneauth1.loading.base.get_plugin_options",
- return_value=opts)
- def test_show_mask(self, m_get_plugin_opts):
+ @mock.patch(
+ "keystoneauth1.loading.base.get_plugin_options", return_value=opts
+ )
+ def test_show_mask_with_cloud_config(self, m_get_plugin_opts):
arglist = ['--mask']
verifylist = [('mask', True)]
+ self.app.client_manager.configuration_type = "cloud_config"
cmd = configuration.ShowConfiguration(self.app, None)
+
parsed_args = self.check_parser(cmd, arglist, verifylist)
columns, data = cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
+
+ @mock.patch(
+ "keystoneauth1.loading.base.get_plugin_options", return_value=opts
+ )
+ def test_show_mask_with_global_env(self, m_get_plugin_opts):
+ arglist = ['--mask']
+ verifylist = [('mask', True)]
+ self.app.client_manager.configuration_type = "global_env"
+ column_list = (
+ 'identity_api_version',
+ 'password',
+ 'region',
+ 'token',
+ 'username',
+ )
+ datalist = (
+ fakes.VERSION,
+ configuration.REDACTED,
+ fakes.REGION_NAME,
+ configuration.REDACTED,
+ fakes.USERNAME,
+ )
+
+ cmd = configuration.ShowConfiguration(self.app, None)
+
+ parsed_args = self.check_parser(cmd, arglist, verifylist)
+
+ columns, data = cmd.take_action(parsed_args)
+
+ self.assertEqual(column_list, columns)
+ self.assertEqual(datalist, data)
diff --git a/openstackclient/tests/unit/common/test_extension.py b/openstackclient/tests/unit/common/test_extension.py
index 5093cbbb..bd90b32d 100644
--- a/openstackclient/tests/unit/common/test_extension.py
+++ b/openstackclient/tests/unit/common/test_extension.py
@@ -26,7 +26,7 @@ from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
class TestExtension(utils.TestCommand):
def setUp(self):
- super(TestExtension, self).setUp()
+ super().setUp()
identity_client = identity_fakes.FakeIdentityv2Client(
endpoint=fakes.AUTH_URL,
@@ -66,13 +66,13 @@ class TestExtensionList(TestExtension):
long_columns = ('Name', 'Alias', 'Description', 'Namespace', 'Updated',
'Links')
- volume_extension = volume_fakes.FakeExtension.create_one_extension()
+ volume_extension = volume_fakes.create_one_extension()
identity_extension = identity_fakes.FakeExtension.create_one_extension()
compute_extension = compute_fakes.FakeExtension.create_one_extension()
network_extension = network_fakes.FakeExtension.create_one_extension()
def setUp(self):
- super(TestExtensionList, self).setUp()
+ super().setUp()
self.identity_extensions_mock.list.return_value = [
self.identity_extension]
@@ -310,7 +310,7 @@ class TestExtensionShow(TestExtension):
)
def setUp(self):
- super(TestExtensionShow, self).setUp()
+ super().setUp()
self.cmd = extension.ShowExtension(self.app, None)
diff --git a/openstackclient/tests/unit/common/test_limits.py b/openstackclient/tests/unit/common/test_limits.py
index d73db2cb..e3cdcf45 100644
--- a/openstackclient/tests/unit/common/test_limits.py
+++ b/openstackclient/tests/unit/common/test_limits.py
@@ -33,7 +33,7 @@ class TestComputeLimits(compute_fakes.TestComputev2):
]
def setUp(self):
- super(TestComputeLimits, self).setUp()
+ super().setUp()
self.app.client_manager.volume_endpoint_enabled = False
self.compute = self.app.client_manager.compute
@@ -87,7 +87,7 @@ class TestVolumeLimits(volume_fakes.TestVolume):
]
def setUp(self):
- super(TestVolumeLimits, self).setUp()
+ super().setUp()
self.app.client_manager.compute_endpoint_enabled = False
self.volume = self.app.client_manager.volume
diff --git a/openstackclient/tests/unit/common/test_project_purge.py b/openstackclient/tests/unit/common/test_project_purge.py
index 5199093c..26333d70 100644
--- a/openstackclient/tests/unit/common/test_project_purge.py
+++ b/openstackclient/tests/unit/common/test_project_purge.py
@@ -26,7 +26,7 @@ from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
class TestProjectPurgeInit(tests_utils.TestCommand):
def setUp(self):
- super(TestProjectPurgeInit, self).setUp()
+ super().setUp()
compute_client = compute_fakes.FakeComputev2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
@@ -71,12 +71,12 @@ class TestProjectPurge(TestProjectPurgeInit):
project = identity_fakes.FakeProject.create_one_project()
server = compute_fakes.FakeServer.create_one_server()
image = image_fakes.create_one_image()
- volume = volume_fakes.FakeVolume.create_one_volume()
- backup = volume_fakes.FakeBackup.create_one_backup()
- snapshot = volume_fakes.FakeSnapshot.create_one_snapshot()
+ volume = volume_fakes.create_one_volume()
+ backup = volume_fakes.create_one_backup()
+ snapshot = volume_fakes.create_one_snapshot()
def setUp(self):
- super(TestProjectPurge, self).setUp()
+ super().setUp()
self.projects_mock.get.return_value = self.project
self.projects_mock.delete.return_value = None
self.images_mock.list.return_value = [self.image]
diff --git a/openstackclient/tests/unit/common/test_quota.py b/openstackclient/tests/unit/common/test_quota.py
index 70fd1436..2470a96f 100644
--- a/openstackclient/tests/unit/common/test_quota.py
+++ b/openstackclient/tests/unit/common/test_quota.py
@@ -62,6 +62,9 @@ class TestQuota(compute_fakes.TestComputev2):
self.app.client_manager.volume.quota_classes
self.volume_quotas_class_mock.reset_mock()
+ self.app.client_manager.network = mock.Mock()
+ self.network_mock = self.app.client_manager.network
+
self.app.client_manager.auth_ref = mock.Mock()
self.app.client_manager.auth_ref.service_catalog = mock.Mock()
self.service_catalog_mock = \
@@ -173,12 +176,12 @@ class TestQuotaList(TestQuota):
)
self.volume_quotas = [
- volume_fakes.FakeQuota.create_one_vol_quota(),
- volume_fakes.FakeQuota.create_one_vol_quota(),
+ volume_fakes.create_one_vol_quota(),
+ volume_fakes.create_one_vol_quota(),
]
self.volume_default_quotas = [
- volume_fakes.FakeQuota.create_one_default_vol_quota(),
- volume_fakes.FakeQuota.create_one_default_vol_quota(),
+ volume_fakes.create_one_default_vol_quota(),
+ volume_fakes.create_one_default_vol_quota(),
]
self.volume = self.app.client_manager.volume
self.volume.quotas.defaults = mock.Mock(
@@ -276,6 +279,36 @@ class TestQuotaList(TestQuota):
self.assertEqual(
sorted(detailed_reference_data), sorted(ret_quotas))
+ def test_quota_list_details_volume(self):
+ detailed_quota = volume_fakes.create_one_detailed_quota()
+
+ detailed_column_header = (
+ 'Resource',
+ 'In Use',
+ 'Reserved',
+ 'Limit',
+ )
+ detailed_reference_data = (
+ self._get_detailed_reference_data(detailed_quota))
+
+ self.volume.quotas.get = mock.Mock(return_value=detailed_quota)
+
+ arglist = [
+ '--detail',
+ '--volume',
+ ]
+ verifylist = [
+ ('detail', True),
+ ('volume', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+ ret_quotas = list(data)
+
+ self.assertEqual(detailed_column_header, columns)
+ self.assertEqual(sorted(detailed_reference_data), sorted(ret_quotas))
+
def test_quota_list_compute(self):
# Two projects with non-default quotas
self.compute.quotas.get = mock.Mock(
@@ -533,7 +566,7 @@ class TestQuotaList(TestQuota):
self.volume.quotas.get = mock.Mock(
side_effect=[
self.volume_quotas[0],
- volume_fakes.FakeQuota.create_one_default_vol_quota(),
+ volume_fakes.create_one_default_vol_quota(),
],
)
@@ -557,7 +590,7 @@ class TestQuotaList(TestQuota):
self.volume.quotas.get = mock.Mock(
side_effect=[
self.volume_quotas[0],
- volume_fakes.FakeQuota.create_one_default_vol_quota(),
+ volume_fakes.create_one_default_vol_quota(),
],
)
@@ -627,7 +660,6 @@ class TestQuotaSet(TestQuota):
loaded=True,
)
- self.network_mock = self.app.client_manager.network
self.network_mock.update_quota = mock.Mock()
self.cmd = quota.SetQuota(self.app, None)
@@ -951,19 +983,19 @@ class TestQuotaSet(TestQuota):
)
self.assertIsNone(result)
- def test_quota_set_with_check_limit(self):
+ def test_quota_set_with_no_force(self):
arglist = [
'--subnets', str(network_fakes.QUOTA['subnet']),
'--volumes', str(volume_fakes.QUOTA['volumes']),
'--cores', str(compute_fakes.core_num),
- '--check-limit',
+ '--no-force',
self.projects[0].name,
]
verifylist = [
('subnet', network_fakes.QUOTA['subnet']),
('volumes', volume_fakes.QUOTA['volumes']),
('cores', compute_fakes.core_num),
- ('check_limit', True),
+ ('force', False),
('project', self.projects[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -972,6 +1004,7 @@ class TestQuotaSet(TestQuota):
kwargs_compute = {
'cores': compute_fakes.core_num,
+ 'force': False,
}
kwargs_volume = {
'volumes': volume_fakes.QUOTA['volumes'],
@@ -998,7 +1031,7 @@ class TestQuotaSet(TestQuota):
class TestQuotaShow(TestQuota):
def setUp(self):
- super(TestQuotaShow, self).setUp()
+ super().setUp()
self.compute_quota = compute_fakes.FakeQuota.create_one_comp_quota()
self.compute_quotas_mock.get.return_value = self.compute_quota
@@ -1012,10 +1045,9 @@ class TestQuotaShow(TestQuota):
loaded=True,
)
- self.volume_quota = volume_fakes.FakeQuota.create_one_vol_quota()
+ self.volume_quota = volume_fakes.create_one_vol_quota()
self.volume_quotas_mock.get.return_value = self.volume_quota
- self.volume_default_quota = \
- volume_fakes.FakeQuota.create_one_default_vol_quota()
+ self.volume_default_quota = volume_fakes.create_one_default_vol_quota()
self.volume_quotas_mock.defaults.return_value = \
self.volume_default_quota
self.volume_quotas_class_mock.get.return_value = FakeQuotaResource(
@@ -1053,6 +1085,7 @@ class TestQuotaShow(TestQuota):
self.projects[0].name,
]
verifylist = [
+ ('service', 'all'),
('project', self.projects[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -1060,17 +1093,81 @@ class TestQuotaShow(TestQuota):
self.cmd.take_action(parsed_args)
self.compute_quotas_mock.get.assert_called_once_with(
- self.projects[0].id, detail=False
+ self.projects[0].id,
+ detail=False,
)
self.volume_quotas_mock.get.assert_called_once_with(
self.projects[0].id,
+ usage=False,
)
self.network.get_quota.assert_called_once_with(
- self.projects[0].id, details=False
+ self.projects[0].id,
+ details=False,
)
self.assertNotCalled(self.network.get_quota_default)
- def test_quota_show_with_default(self):
+ def test_quota_show__with_compute(self):
+ arglist = [
+ '--compute',
+ self.projects[0].name,
+ ]
+ verifylist = [
+ ('service', 'compute'),
+ ('project', self.projects[0].name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.compute_quotas_mock.get.assert_called_once_with(
+ self.projects[0].id,
+ detail=False,
+ )
+ self.volume_quotas_mock.get.assert_not_called()
+ self.network.get_quota.assert_not_called()
+
+ def test_quota_show__with_volume(self):
+ arglist = [
+ '--volume',
+ self.projects[0].name,
+ ]
+ verifylist = [
+ ('service', 'volume'),
+ ('project', self.projects[0].name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.compute_quotas_mock.get.assert_not_called()
+ self.volume_quotas_mock.get.assert_called_once_with(
+ self.projects[0].id,
+ usage=False,
+ )
+ self.network.get_quota.assert_not_called()
+
+ def test_quota_show__with_network(self):
+ arglist = [
+ '--network',
+ self.projects[0].name,
+ ]
+ verifylist = [
+ ('service', 'network'),
+ ('project', self.projects[0].name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.compute_quotas_mock.get.assert_not_called()
+ self.volume_quotas_mock.get.assert_not_called()
+ self.network.get_quota.assert_called_once_with(
+ self.projects[0].id,
+ details=False,
+ )
+ self.assertNotCalled(self.network.get_quota_default)
+
+ def test_quota_show__with_default(self):
arglist = [
'--default',
self.projects[0].name,
@@ -1094,30 +1191,66 @@ class TestQuotaShow(TestQuota):
)
self.assertNotCalled(self.network.get_quota)
- def test_quota_show_with_class(self):
+ def test_quota_show__with_class(self):
arglist = [
'--class',
- self.projects[0].name,
+ 'default',
]
verifylist = [
('quota_class', True),
- ('project', self.projects[0].name),
+ ('project', 'default'), # project is actually a class here
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
- self.compute_quotas_class_mock.get.assert_called_once_with(
+ self.compute_quotas_class_mock.get.assert_called_once_with('default')
+ self.volume_quotas_class_mock.get.assert_called_once_with('default')
+ # neutron doesn't have the concept of quota classes
+ self.assertNotCalled(self.network.get_quota)
+ self.assertNotCalled(self.network.get_quota_default)
+
+ def test_quota_show__with_usage(self):
+ # update mocks to return detailed quota instead
+ self.compute_quota = \
+ compute_fakes.FakeQuota.create_one_comp_detailed_quota()
+ self.compute_quotas_mock.get.return_value = self.compute_quota
+ self.volume_quota = volume_fakes.create_one_detailed_quota()
+ self.volume_quotas_mock.get.return_value = self.volume_quota
+ self.network.get_quota.return_value = \
+ network_fakes.FakeQuota.create_one_net_detailed_quota()
+
+ arglist = [
+ '--usage',
self.projects[0].name,
+ ]
+ verifylist = [
+ ('usage', True),
+ ('project', self.projects[0].name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.compute_quotas_mock.get.assert_called_once_with(
+ self.projects[0].id,
+ detail=True,
)
- self.volume_quotas_class_mock.get.assert_called_once_with(
- self.projects[0].name,
+ self.volume_quotas_mock.get.assert_called_once_with(
+ self.projects[0].id,
+ usage=True,
+ )
+ self.network.get_quota.assert_called_once_with(
+ self.projects[0].id,
+ details=True,
)
- self.assertNotCalled(self.network.get_quota)
- self.assertNotCalled(self.network.get_quota_default)
- def test_quota_show_no_project(self):
- parsed_args = self.check_parser(self.cmd, [], [])
+ def test_quota_show__no_project(self):
+ arglist = []
+ verifylist = [
+ ('project', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
@@ -1125,32 +1258,115 @@ class TestQuotaShow(TestQuota):
identity_fakes.project_id, detail=False
)
self.volume_quotas_mock.get.assert_called_once_with(
- identity_fakes.project_id,
+ identity_fakes.project_id, usage=False
)
self.network.get_quota.assert_called_once_with(
identity_fakes.project_id, details=False
)
self.assertNotCalled(self.network.get_quota_default)
- def test_network_quota_show_remove_empty(self):
+
+class TestQuotaDelete(TestQuota):
+ """Test cases for quota delete command"""
+
+ def setUp(self):
+ super().setUp()
+
+ self.network_mock.delete_quota = mock.Mock()
+
+ self.cmd = quota.DeleteQuota(self.app, None)
+
+ def test_delete(self):
+ """Delete all quotas"""
arglist = [
- self.projects[0].name,
+ self.projects[0].id,
]
verifylist = [
- ('project', self.projects[0].name),
+ ('service', 'all'),
+ ('project', self.projects[0].id),
]
+
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- # First check that all regular values are returned
- result = self.cmd.get_network_quota(parsed_args)
- self.assertEqual(len(network_fakes.QUOTA), len(result))
-
- # set 1 of the values to None, and verify it is not returned
- orig_get_quota = self.network.get_quota
- network_quotas = copy.copy(network_fakes.QUOTA)
- network_quotas['healthmonitor'] = None
- self.network.get_quota = mock.Mock(return_value=network_quotas)
- result = self.cmd.get_network_quota(parsed_args)
- self.assertEqual(len(network_fakes.QUOTA) - 1, len(result))
- # Go back to default mock
- self.network.get_quota = orig_get_quota
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+ self.projects_mock.get.assert_called_once_with(self.projects[0].id)
+ self.compute_quotas_mock.delete.assert_called_once_with(
+ self.projects[0].id,
+ )
+ self.volume_quotas_mock.delete.assert_called_once_with(
+ self.projects[0].id,
+ )
+ self.network_mock.delete_quota.assert_called_once_with(
+ self.projects[0].id,
+ )
+
+ def test_delete__compute(self):
+ """Delete compute quotas only"""
+ arglist = [
+ '--compute',
+ self.projects[0].id,
+ ]
+ verifylist = [
+ ('service', 'compute'),
+ ('project', self.projects[0].id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+ self.projects_mock.get.assert_called_once_with(self.projects[0].id)
+ self.compute_quotas_mock.delete.assert_called_once_with(
+ self.projects[0].id,
+ )
+ self.volume_quotas_mock.delete.assert_not_called()
+ self.network_mock.delete_quota.assert_not_called()
+
+ def test_delete__volume(self):
+ """Delete volume quotas only"""
+ arglist = [
+ '--volume',
+ self.projects[0].id,
+ ]
+ verifylist = [
+ ('service', 'volume'),
+ ('project', self.projects[0].id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+ self.projects_mock.get.assert_called_once_with(self.projects[0].id)
+ self.compute_quotas_mock.delete.assert_not_called()
+ self.volume_quotas_mock.delete.assert_called_once_with(
+ self.projects[0].id,
+ )
+ self.network_mock.delete_quota.assert_not_called()
+
+ def test_delete__network(self):
+ """Delete network quotas only"""
+ arglist = [
+ '--network',
+ self.projects[0].id,
+ ]
+ verifylist = [
+ ('service', 'network'),
+ ('project', self.projects[0].id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+ self.projects_mock.get.assert_called_once_with(self.projects[0].id)
+ self.compute_quotas_mock.delete.assert_not_called()
+ self.volume_quotas_mock.delete.assert_not_called()
+ self.network_mock.delete_quota.assert_called_once_with(
+ self.projects[0].id,
+ )
diff --git a/openstackclient/tests/unit/compute/v2/fakes.py b/openstackclient/tests/unit/compute/v2/fakes.py
index 55572cd8..f7f07509 100644
--- a/openstackclient/tests/unit/compute/v2/fakes.py
+++ b/openstackclient/tests/unit/compute/v2/fakes.py
@@ -20,8 +20,12 @@ import uuid
from novaclient import api_versions
from openstack.compute.v2 import flavor as _flavor
-from openstack.compute.v2 import server
+from openstack.compute.v2 import hypervisor as _hypervisor
+from openstack.compute.v2 import migration as _migration
+from openstack.compute.v2 import server as _server
+from openstack.compute.v2 import server_group as _server_group
from openstack.compute.v2 import server_interface as _server_interface
+from openstack.compute.v2 import server_migration as _server_migration
from openstack.compute.v2 import service
from openstack.compute.v2 import volume_attachment
@@ -339,136 +343,6 @@ class FakeExtension(object):
return extension
-class FakeHypervisor(object):
- """Fake one or more hypervisor."""
-
- @staticmethod
- def create_one_hypervisor(attrs=None):
- """Create a fake hypervisor.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with id, hypervisor_hostname, and so on
- """
- attrs = attrs or {}
-
- # Set default attributes.
- hypervisor_info = {
- 'id': 'hypervisor-id-' + uuid.uuid4().hex,
- 'hypervisor_hostname': 'hypervisor-hostname-' + uuid.uuid4().hex,
- 'status': 'enabled',
- 'host_ip': '192.168.0.10',
- 'cpu_info': {
- 'aaa': 'aaa',
- },
- 'free_disk_gb': 50,
- 'hypervisor_version': 2004001,
- 'disk_available_least': 50,
- 'local_gb': 50,
- 'free_ram_mb': 1024,
- 'service': {
- 'host': 'aaa',
- 'disabled_reason': None,
- 'id': 1,
- },
- 'vcpus_used': 0,
- 'hypervisor_type': 'QEMU',
- 'local_gb_used': 0,
- 'vcpus': 4,
- 'memory_mb_used': 512,
- 'memory_mb': 1024,
- 'current_workload': 0,
- 'state': 'up',
- 'running_vms': 0,
- }
-
- # Overwrite default attributes.
- hypervisor_info.update(attrs)
-
- hypervisor = fakes.FakeResource(info=copy.deepcopy(hypervisor_info),
- loaded=True)
- return hypervisor
-
- @staticmethod
- def create_hypervisors(attrs=None, count=2):
- """Create multiple fake hypervisors.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of hypervisors to fake
- :return:
- A list of FakeResource objects faking the hypervisors
- """
- hypervisors = []
- for i in range(0, count):
- hypervisors.append(FakeHypervisor.create_one_hypervisor(attrs))
-
- return hypervisors
-
-
-class FakeHypervisorStats(object):
- """Fake one or more hypervisor stats."""
-
- @staticmethod
- def create_one_hypervisor_stats(attrs=None):
- """Create a fake hypervisor stats.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with count, current_workload, and so on
- """
- attrs = attrs or {}
-
- # Set default attributes.
- stats_info = {
- 'count': 2,
- 'current_workload': 0,
- 'disk_available_least': 50,
- 'free_disk_gb': 100,
- 'free_ram_mb': 23000,
- 'local_gb': 100,
- 'local_gb_used': 0,
- 'memory_mb': 23800,
- 'memory_mb_used': 1400,
- 'running_vms': 3,
- 'vcpus': 8,
- 'vcpus_used': 3,
- }
-
- # Overwrite default attributes.
- stats_info.update(attrs)
-
- # Set default method.
- hypervisor_stats_method = {'to_dict': stats_info}
-
- hypervisor_stats = fakes.FakeResource(
- info=copy.deepcopy(stats_info),
- methods=copy.deepcopy(hypervisor_stats_method),
- loaded=True)
- return hypervisor_stats
-
- @staticmethod
- def create_hypervisors_stats(attrs=None, count=2):
- """Create multiple fake hypervisors stats.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of hypervisors to fake
- :return:
- A list of FakeResource objects faking the hypervisors
- """
- hypervisors = []
- for i in range(0, count):
- hypervisors.append(
- FakeHypervisorStats.create_one_hypervisor_stats(attrs))
-
- return hypervisors
-
-
class FakeSecurityGroup(object):
"""Fake one or more security groups."""
@@ -672,7 +546,12 @@ class FakeServer(object):
# Overwrite default attributes.
server_info.update(attrs)
- return server.Server(**server_info)
+ server = _server.Server(**server_info)
+
+ # Override methods
+ server.trigger_crash_dump = mock.MagicMock()
+
+ return server
@staticmethod
def create_sdk_servers(attrs=None, methods=None, count=2):
@@ -1290,72 +1169,6 @@ class FakeHost(object):
return host_info
-class FakeServerGroup(object):
- """Fake one server group"""
-
- @staticmethod
- def _create_one_server_group(attrs=None):
- """Create a fake server group
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with id and other attributes
- """
- if attrs is None:
- attrs = {}
-
- # Set default attributes.
- server_group_info = {
- 'id': 'server-group-id-' + uuid.uuid4().hex,
- 'members': [],
- 'metadata': {},
- 'name': 'server-group-name-' + uuid.uuid4().hex,
- 'project_id': 'server-group-project-id-' + uuid.uuid4().hex,
- 'user_id': 'server-group-user-id-' + uuid.uuid4().hex,
- }
-
- # Overwrite default attributes.
- server_group_info.update(attrs)
-
- server_group = fakes.FakeResource(
- info=copy.deepcopy(server_group_info),
- loaded=True)
- return server_group
-
- @staticmethod
- def create_one_server_group(attrs=None):
- """Create a fake server group
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with id and other attributes
- """
- if attrs is None:
- attrs = {}
- attrs.setdefault('policies', ['policy1', 'policy2'])
- return FakeServerGroup._create_one_server_group(attrs)
-
-
-class FakeServerGroupV264(object):
- """Fake one server group fo API >= 2.64"""
-
- @staticmethod
- def create_one_server_group(attrs=None):
- """Create a fake server group
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with id and other attributes
- """
- if attrs is None:
- attrs = {}
- attrs.setdefault('policy', 'policy1')
- return FakeServerGroup._create_one_server_group(attrs)
-
-
class FakeUsage(object):
"""Fake one or more usage."""
@@ -1454,7 +1267,7 @@ class FakeQuota(object):
@staticmethod
def create_one_default_comp_quota(attrs=None):
- """Crate one quota"""
+ """Create one quota"""
attrs = attrs or {}
@@ -1622,242 +1435,247 @@ class FakeRateLimit(object):
self.next_available = next_available
-class FakeMigration(object):
- """Fake one or more migrations."""
+def create_one_migration(attrs=None):
+ """Create a fake migration.
- @staticmethod
- def create_one_migration(attrs=None, methods=None):
- """Create a fake migration.
+ :param dict attrs: A dictionary with all attributes
+ :return: A fake openstack.compute.v2.migration.Migration object
+ """
+ attrs = attrs or {}
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :return:
- A FakeResource object, with id, type, and so on
- """
- attrs = attrs or {}
- methods = methods or {}
+ # Set default attributes.
+ migration_info = {
+ "created_at": "2017-01-31T08:03:21.000000",
+ "dest_compute": "compute-" + uuid.uuid4().hex,
+ "dest_host": "10.0.2.15",
+ "dest_node": "node-" + uuid.uuid4().hex,
+ "id": random.randint(1, 999),
+ "migration_type": "migration",
+ "new_flavor_id": uuid.uuid4().hex,
+ "old_flavor_id": uuid.uuid4().hex,
+ "project_id": uuid.uuid4().hex,
+ "server_id": uuid.uuid4().hex,
+ "source_compute": "compute-" + uuid.uuid4().hex,
+ "source_node": "node-" + uuid.uuid4().hex,
+ "status": "migrating",
+ "updated_at": "2017-01-31T08:03:25.000000",
+ "user_id": uuid.uuid4().hex,
+ "uuid": uuid.uuid4().hex,
+ }
- # Set default attributes.
- migration_info = {
- "dest_host": "10.0.2.15",
- "status": "migrating",
- "migration_type": "migration",
- "updated_at": "2017-01-31T08:03:25.000000",
- "created_at": "2017-01-31T08:03:21.000000",
- "dest_compute": "compute-" + uuid.uuid4().hex,
- "id": random.randint(1, 999),
- "source_node": "node-" + uuid.uuid4().hex,
- "instance_uuid": uuid.uuid4().hex,
- "dest_node": "node-" + uuid.uuid4().hex,
- "source_compute": "compute-" + uuid.uuid4().hex,
- "uuid": uuid.uuid4().hex,
- "old_instance_type_id": uuid.uuid4().hex,
- "new_instance_type_id": uuid.uuid4().hex,
- "project_id": uuid.uuid4().hex,
- "user_id": uuid.uuid4().hex
- }
+ # Overwrite default attributes.
+ migration_info.update(attrs)
- # Overwrite default attributes.
- migration_info.update(attrs)
+ migration = _migration.Migration(**migration_info)
+ return migration
- migration = fakes.FakeResource(info=copy.deepcopy(migration_info),
- methods=methods,
- loaded=True)
- return migration
- @staticmethod
- def create_migrations(attrs=None, methods=None, count=2):
- """Create multiple fake migrations.
+def create_migrations(attrs=None, count=2):
+ """Create multiple fake migrations.
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :param int count:
- The number of migrations to fake
- :return:
- A list of FakeResource objects faking the migrations
- """
- migrations = []
- for i in range(0, count):
- migrations.append(
- FakeMigration.create_one_migration(
- attrs, methods))
+ :param dict attrs: A dictionary with all attributes
+ :param int count: The number of migrations to fake
+ :return: A list of fake openstack.compute.v2.migration.Migration objects
+ """
+ migrations = []
+ for i in range(0, count):
+ migrations.append(create_one_migration(attrs))
- return migrations
+ return migrations
-class FakeServerMigration(object):
- """Fake one or more server migrations."""
+def create_one_server_migration(attrs=None):
+ """Create a fake server migration.
- @staticmethod
- def create_one_server_migration(attrs=None, methods=None):
- """Create a fake server migration.
+ :param dict attrs: A dictionary with all attributes
+ :return A fake openstack.compute.v2.server_migration.ServerMigration object
+ """
+ attrs = attrs or {}
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :return:
- A FakeResource object, with id, type, and so on
- """
- attrs = attrs or {}
- methods = methods or {}
+ # Set default attributes.
- # Set default attributes.
+ migration_info = {
+ "created_at": "2016-01-29T13:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": random.randint(1, 999),
+ "server_uuid": uuid.uuid4().hex,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "memory_total_bytes": random.randint(1, 99999),
+ "memory_processed_bytes": random.randint(1, 99999),
+ "memory_remaining_bytes": random.randint(1, 99999),
+ "disk_total_bytes": random.randint(1, 99999),
+ "disk_processed_bytes": random.randint(1, 99999),
+ "disk_remaining_bytes": random.randint(1, 99999),
+ "updated_at": "2016-01-29T13:42:02.000000",
+ # added in 2.59
+ "uuid": uuid.uuid4().hex,
+ # added in 2.80
+ "user_id": uuid.uuid4().hex,
+ "project_id": uuid.uuid4().hex,
+ }
- migration_info = {
- "created_at": "2016-01-29T13:42:02.000000",
- "dest_compute": "compute2",
- "dest_host": "1.2.3.4",
- "dest_node": "node2",
- "id": random.randint(1, 999),
- "server_uuid": uuid.uuid4().hex,
- "source_compute": "compute1",
- "source_node": "node1",
- "status": "running",
- "memory_total_bytes": random.randint(1, 99999),
- "memory_processed_bytes": random.randint(1, 99999),
- "memory_remaining_bytes": random.randint(1, 99999),
- "disk_total_bytes": random.randint(1, 99999),
- "disk_processed_bytes": random.randint(1, 99999),
- "disk_remaining_bytes": random.randint(1, 99999),
- "updated_at": "2016-01-29T13:42:02.000000",
- # added in 2.59
- "uuid": uuid.uuid4().hex,
- # added in 2.80
- "user_id": uuid.uuid4().hex,
- "project_id": uuid.uuid4().hex,
- }
+ # Overwrite default attributes.
+ migration_info.update(attrs)
- # Overwrite default attributes.
- migration_info.update(attrs)
+ migration = _server_migration.ServerMigration(**migration_info)
+ return migration
- migration = fakes.FakeResource(
- info=copy.deepcopy(migration_info),
- methods=methods,
- loaded=True)
- return migration
+def create_server_migrations(attrs=None, methods=None, count=2):
+ """Create multiple server migrations.
-class FakeVolumeAttachment(object):
- """Fake one or more volume attachments (BDMs)."""
+ :param dict attrs: A dictionary with all attributes
+ :param int count: The number of server migrations to fake
+ :return A list of fake
+ openstack.compute.v2.server_migration.ServerMigration objects
+ """
+ migrations = []
+ for i in range(0, count):
+ migrations.append(
+ create_one_server_migration(attrs, methods))
- @staticmethod
- def create_one_volume_attachment(attrs=None, methods=None):
- """Create a fake volume attachment.
+ return migrations
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :return:
- A FakeResource object, with id, device, and so on
- """
- attrs = attrs or {}
- methods = methods or {}
- # Set default attributes.
- volume_attachment_info = {
- "id": uuid.uuid4().hex,
- "device": "/dev/sdb",
- "serverId": uuid.uuid4().hex,
- "volumeId": uuid.uuid4().hex,
- # introduced in API microversion 2.70
- "tag": "foo",
- # introduced in API microversion 2.79
- "delete_on_termination": True,
- # introduced in API microversion 2.89
- "attachment_id": uuid.uuid4().hex,
- "bdm_uuid": uuid.uuid4().hex
- }
+def create_one_volume_attachment(attrs=None):
+ """Create a fake volume attachment.
- # Overwrite default attributes.
- volume_attachment_info.update(attrs)
+ :param dict attrs: A dictionary with all attributes
+ :return: A fake openstack.compute.v2.volume_attachment.VolumeAttachment
+ object
+ """
+ attrs = attrs or {}
- volume_attachment = fakes.FakeResource(
- info=copy.deepcopy(volume_attachment_info),
- methods=methods,
- loaded=True)
- return volume_attachment
+ # Set default attributes.
+ volume_attachment_info = {
+ "id": uuid.uuid4().hex,
+ "device": "/dev/sdb",
+ "server_id": uuid.uuid4().hex,
+ "volume_id": uuid.uuid4().hex,
+ # introduced in API microversion 2.70
+ "tag": "foo",
+ # introduced in API microversion 2.79
+ "delete_on_termination": True,
+ # introduced in API microversion 2.89
+ "attachment_id": uuid.uuid4().hex,
+ "bdm_id": uuid.uuid4().hex,
+ }
- @staticmethod
- def create_volume_attachments(attrs=None, methods=None, count=2):
- """Create multiple fake volume attachments (BDMs).
+ # Overwrite default attributes.
+ volume_attachment_info.update(attrs)
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :param int count:
- The number of volume attachments to fake
- :return:
- A list of FakeResource objects faking the volume attachments.
- """
- volume_attachments = []
- for i in range(0, count):
- volume_attachments.append(
- FakeVolumeAttachment.create_one_volume_attachment(
- attrs, methods))
+ return volume_attachment.VolumeAttachment(**volume_attachment_info)
- return volume_attachments
- @staticmethod
- def create_one_sdk_volume_attachment(attrs=None, methods=None):
- """Create a fake sdk VolumeAttachment.
+def create_volume_attachments(attrs=None, count=2):
+ """Create multiple fake volume attachments.
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :return:
- A fake VolumeAttachment object, with id, device, and so on
- """
- attrs = attrs or {}
- methods = methods or {}
+ :param dict attrs: A dictionary with all attributes
+ :param int count: The number of volume attachments to fake
+ :return: A list of fake
+ openstack.compute.v2.volume_attachment.VolumeAttachment objects
+ """
+ volume_attachments = []
+ for i in range(0, count):
+ volume_attachments.append(create_one_volume_attachment(attrs))
- # Set default attributes.
- volume_attachment_info = {
- "id": uuid.uuid4().hex,
- "device": "/dev/sdb",
- "server_id": uuid.uuid4().hex,
- "volume_id": uuid.uuid4().hex,
- # introduced in API microversion 2.70
- "tag": "foo",
- # introduced in API microversion 2.79
- "delete_on_termination": True,
- # introduced in API microversion 2.89
- "attachment_id": uuid.uuid4().hex,
- "bdm_uuid": uuid.uuid4().hex
- }
+ return volume_attachments
- # Overwrite default attributes.
- volume_attachment_info.update(attrs)
- return volume_attachment.VolumeAttachment(**volume_attachment_info)
+def create_one_hypervisor(attrs=None):
+ """Create a fake hypervisor.
- @staticmethod
- def create_sdk_volume_attachments(attrs=None, methods=None, count=2):
- """Create multiple fake VolumeAttachment objects (BDMs).
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object, with id, hypervisor_hostname, and so on
+ """
+ attrs = attrs or {}
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :param int count:
- The number of volume attachments to fake
- :return:
- A list of VolumeAttachment objects faking the volume attachments.
- """
- volume_attachments = []
- for i in range(0, count):
- volume_attachments.append(
- FakeVolumeAttachment.create_one_sdk_volume_attachment(
- attrs, methods))
+ # Set default attributes.
+ hypervisor_info = {
+ 'id': 'hypervisor-id-' + uuid.uuid4().hex,
+ 'hypervisor_hostname': 'hypervisor-hostname-' + uuid.uuid4().hex,
+ 'status': 'enabled',
+ 'host_ip': '192.168.0.10',
+ 'cpu_info': {
+ 'aaa': 'aaa',
+ },
+ 'free_disk_gb': 50,
+ 'hypervisor_version': 2004001,
+ 'disk_available_least': 50,
+ 'local_gb': 50,
+ 'free_ram_mb': 1024,
+ 'service': {
+ 'host': 'aaa',
+ 'disabled_reason': None,
+ 'id': 1,
+ },
+ 'vcpus_used': 0,
+ 'hypervisor_type': 'QEMU',
+ 'local_gb_used': 0,
+ 'vcpus': 4,
+ 'memory_mb_used': 512,
+ 'memory_mb': 1024,
+ 'current_workload': 0,
+ 'state': 'up',
+ 'running_vms': 0,
+ }
+
+ # Overwrite default attributes.
+ hypervisor_info.update(attrs)
+
+ hypervisor = _hypervisor.Hypervisor(**hypervisor_info, loaded=True)
+ return hypervisor
+
+
+def create_hypervisors(attrs=None, count=2):
+ """Create multiple fake hypervisors.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of hypervisors to fake
+ :return:
+ A list of FakeResource objects faking the hypervisors
+ """
+ hypervisors = []
+ for i in range(0, count):
+ hypervisors.append(create_one_hypervisor(attrs))
+
+ return hypervisors
+
+
+def create_one_server_group(attrs=None):
+ """Create a fake server group
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A fake ServerGroup object, with id and other attributes
+ """
+ if attrs is None:
+ attrs = {}
+
+ # Set default attributes.
+ server_group_info = {
+ 'id': 'server-group-id-' + uuid.uuid4().hex,
+ 'member_ids': '',
+ 'metadata': {},
+ 'name': 'server-group-name-' + uuid.uuid4().hex,
+ 'project_id': 'server-group-project-id-' + uuid.uuid4().hex,
+ 'user_id': 'server-group-user-id-' + uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes.
+ server_group_info.update(attrs)
- return volume_attachments
+ server_group = _server_group.ServerGroup(**server_group_info)
+ return server_group
def create_one_server_interface(attrs=None):
diff --git a/openstackclient/tests/unit/compute/v2/test_flavor.py b/openstackclient/tests/unit/compute/v2/test_flavor.py
index 14dd3df2..33ebf546 100644
--- a/openstackclient/tests/unit/compute/v2/test_flavor.py
+++ b/openstackclient/tests/unit/compute/v2/test_flavor.py
@@ -523,6 +523,7 @@ class TestFlavorList(TestFlavor):
self.sdk_client.flavors.assert_called_with(
**kwargs
)
+ self.sdk_client.fetch_flavor_extra_specs.assert_not_called()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -550,6 +551,7 @@ class TestFlavorList(TestFlavor):
self.sdk_client.flavors.assert_called_with(
**kwargs
)
+ self.sdk_client.fetch_flavor_extra_specs.assert_not_called()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -577,6 +579,7 @@ class TestFlavorList(TestFlavor):
self.sdk_client.flavors.assert_called_with(
**kwargs
)
+ self.sdk_client.fetch_flavor_extra_specs.assert_not_called()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -604,6 +607,7 @@ class TestFlavorList(TestFlavor):
self.sdk_client.flavors.assert_called_with(
**kwargs
)
+ self.sdk_client.fetch_flavor_extra_specs.assert_not_called()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -631,6 +635,58 @@ class TestFlavorList(TestFlavor):
self.sdk_client.flavors.assert_called_with(
**kwargs
)
+ self.sdk_client.fetch_flavor_extra_specs.assert_not_called()
+
+ self.assertEqual(self.columns_long, columns)
+ self.assertCountEqual(self.data_long, tuple(data))
+
+ def test_flavor_list_long_no_extra_specs(self):
+ # use flavor with no extra specs for this test
+ flavor = compute_fakes.FakeFlavor.create_one_flavor(
+ attrs={"extra_specs": {}})
+ self.data = ((
+ flavor.id,
+ flavor.name,
+ flavor.ram,
+ flavor.disk,
+ flavor.ephemeral,
+ flavor.vcpus,
+ flavor.is_public,
+ ),)
+ self.data_long = (self.data[0] + (
+ flavor.swap,
+ flavor.rxtx_factor,
+ format_columns.DictColumn(flavor.extra_specs)
+ ),)
+ self.api_mock.side_effect = [[flavor], [], ]
+
+ self.sdk_client.flavors = self.api_mock
+ self.sdk_client.fetch_flavor_extra_specs = mock.Mock(return_value=None)
+
+ arglist = [
+ '--long',
+ ]
+ verifylist = [
+ ('long', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Set expected values
+ kwargs = {
+ 'is_public': True,
+ }
+
+ self.sdk_client.flavors.assert_called_with(
+ **kwargs
+ )
+ self.sdk_client.fetch_flavor_extra_specs.assert_called_once_with(
+ flavor)
self.assertEqual(self.columns_long, columns)
self.assertCountEqual(self.data_long, tuple(data))
@@ -662,6 +718,7 @@ class TestFlavorList(TestFlavor):
self.sdk_client.flavors.assert_called_with(
**kwargs
)
+ self.sdk_client.fetch_flavor_extra_specs.assert_not_called()
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
diff --git a/openstackclient/tests/unit/compute/v2/test_hypervisor.py b/openstackclient/tests/unit/compute/v2/test_hypervisor.py
index 7dbd6e19..e5804665 100644
--- a/openstackclient/tests/unit/compute/v2/test_hypervisor.py
+++ b/openstackclient/tests/unit/compute/v2/test_hypervisor.py
@@ -13,41 +13,37 @@
# under the License.
#
-import copy
import json
+from unittest import mock
-from novaclient import api_versions
from novaclient import exceptions as nova_exceptions
+from openstack import utils as sdk_utils
from osc_lib.cli import format_columns
from osc_lib import exceptions
from openstackclient.compute.v2 import hypervisor
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
-from openstackclient.tests.unit import fakes
class TestHypervisor(compute_fakes.TestComputev2):
def setUp(self):
- super(TestHypervisor, self).setUp()
+ super().setUp()
- # Get a shortcut to the compute client hypervisors mock
- self.hypervisors_mock = self.app.client_manager.compute.hypervisors
- self.hypervisors_mock.reset_mock()
-
- # Get a shortcut to the compute client aggregates mock
- self.aggregates_mock = self.app.client_manager.compute.aggregates
- self.aggregates_mock.reset_mock()
+ # Create and get a shortcut to the compute client mock
+ self.app.client_manager.sdk_connection = mock.Mock()
+ self.sdk_client = self.app.client_manager.sdk_connection.compute
+ self.sdk_client.reset_mock()
class TestHypervisorList(TestHypervisor):
def setUp(self):
- super(TestHypervisorList, self).setUp()
+ super().setUp()
# Fake hypervisors to be listed up
- self.hypervisors = compute_fakes.FakeHypervisor.create_hypervisors()
- self.hypervisors_mock.list.return_value = self.hypervisors
+ self.hypervisors = compute_fakes.create_hypervisors()
+ self.sdk_client.hypervisors.return_value = self.hypervisors
self.columns = (
"ID",
@@ -70,14 +66,14 @@ class TestHypervisorList(TestHypervisor):
self.data = (
(
self.hypervisors[0].id,
- self.hypervisors[0].hypervisor_hostname,
+ self.hypervisors[0].name,
self.hypervisors[0].hypervisor_type,
self.hypervisors[0].host_ip,
self.hypervisors[0].state
),
(
self.hypervisors[1].id,
- self.hypervisors[1].hypervisor_hostname,
+ self.hypervisors[1].name,
self.hypervisors[1].hypervisor_type,
self.hypervisors[1].host_ip,
self.hypervisors[1].state
@@ -87,25 +83,25 @@ class TestHypervisorList(TestHypervisor):
self.data_long = (
(
self.hypervisors[0].id,
- self.hypervisors[0].hypervisor_hostname,
+ self.hypervisors[0].name,
self.hypervisors[0].hypervisor_type,
self.hypervisors[0].host_ip,
self.hypervisors[0].state,
self.hypervisors[0].vcpus_used,
self.hypervisors[0].vcpus,
- self.hypervisors[0].memory_mb_used,
- self.hypervisors[0].memory_mb
+ self.hypervisors[0].memory_used,
+ self.hypervisors[0].memory_size
),
(
self.hypervisors[1].id,
- self.hypervisors[1].hypervisor_hostname,
+ self.hypervisors[1].name,
self.hypervisors[1].hypervisor_type,
self.hypervisors[1].host_ip,
self.hypervisors[1].state,
self.hypervisors[1].vcpus_used,
self.hypervisors[1].vcpus,
- self.hypervisors[1].memory_mb_used,
- self.hypervisors[1].memory_mb
+ self.hypervisors[1].memory_used,
+ self.hypervisors[1].memory_size
),
)
# Get the command object to test
@@ -121,25 +117,25 @@ class TestHypervisorList(TestHypervisor):
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
- self.hypervisors_mock.list.assert_called_with()
+ self.sdk_client.hypervisors.assert_called_with(details=True)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
def test_hypervisor_list_matching_option_found(self):
arglist = [
- '--matching', self.hypervisors[0].hypervisor_hostname,
+ '--matching', self.hypervisors[0].name,
]
verifylist = [
- ('matching', self.hypervisors[0].hypervisor_hostname),
+ ('matching', self.hypervisors[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# Fake the return value of search()
- self.hypervisors_mock.search.return_value = [self.hypervisors[0]]
+ self.sdk_client.find_hypervisor.return_value = [self.hypervisors[0]]
self.data = (
(
self.hypervisors[0].id,
- self.hypervisors[0].hypervisor_hostname,
+ self.hypervisors[0].name,
self.hypervisors[1].hypervisor_type,
self.hypervisors[1].host_ip,
self.hypervisors[1].state,
@@ -151,8 +147,9 @@ class TestHypervisorList(TestHypervisor):
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
- self.hypervisors_mock.search.assert_called_with(
- self.hypervisors[0].hypervisor_hostname
+ self.sdk_client.find_hypervisor.assert_called_with(
+ self.hypervisors[0].name,
+ ignore_missing=False
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -167,25 +164,25 @@ class TestHypervisorList(TestHypervisor):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# Fake exception raised from search()
- self.hypervisors_mock.search.side_effect = exceptions.NotFound(None)
+ self.sdk_client.find_hypervisor.side_effect = \
+ exceptions.NotFound(None)
self.assertRaises(exceptions.NotFound,
self.cmd.take_action,
parsed_args)
- def test_hypervisor_list_with_matching_and_pagination_options(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.32')
-
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
+ def test_hypervisor_list_with_matching_and_pagination_options(
+ self, sm_mock):
arglist = [
- '--matching', self.hypervisors[0].hypervisor_hostname,
+ '--matching', self.hypervisors[0].name,
'--limit', '1',
- '--marker', self.hypervisors[0].hypervisor_hostname,
+ '--marker', self.hypervisors[0].name,
]
verifylist = [
- ('matching', self.hypervisors[0].hypervisor_hostname),
+ ('matching', self.hypervisors[0].name),
('limit', 1),
- ('marker', self.hypervisors[0].hypervisor_hostname),
+ ('marker', self.hypervisors[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -197,7 +194,8 @@ class TestHypervisorList(TestHypervisor):
self.assertIn(
'--matching is not compatible with --marker or --limit', str(ex))
- def test_hypervisor_list_long_option(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
+ def test_hypervisor_list_long_option(self, sm_mock):
arglist = [
'--long',
]
@@ -211,14 +209,12 @@ class TestHypervisorList(TestHypervisor):
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
- self.hypervisors_mock.list.assert_called_with()
+ self.sdk_client.hypervisors.assert_called_with(details=True)
self.assertEqual(self.columns_long, columns)
self.assertEqual(self.data_long, tuple(data))
- def test_hypervisor_list_with_limit(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.33')
-
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_hypervisor_list_with_limit(self, sm_mock):
arglist = [
'--limit', '1',
]
@@ -229,12 +225,10 @@ class TestHypervisorList(TestHypervisor):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
- self.hypervisors_mock.list.assert_called_with(limit=1)
-
- def test_hypervisor_list_with_limit_pre_v233(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.32')
+ self.sdk_client.hypervisors.assert_called_with(limit=1, details=True)
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
+ def test_hypervisor_list_with_limit_pre_v233(self, sm_mock):
arglist = [
'--limit', '1',
]
@@ -251,10 +245,8 @@ class TestHypervisorList(TestHypervisor):
self.assertIn(
'--os-compute-api-version 2.33 or greater is required', str(ex))
- def test_hypervisor_list_with_marker(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.33')
-
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_hypervisor_list_with_marker(self, sm_mock):
arglist = [
'--marker', 'test_hyp',
]
@@ -265,12 +257,11 @@ class TestHypervisorList(TestHypervisor):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
- self.hypervisors_mock.list.assert_called_with(marker='test_hyp')
-
- def test_hypervisor_list_with_marker_pre_v233(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.32')
+ self.sdk_client.hypervisors.assert_called_with(
+ marker='test_hyp', details=True)
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
+ def test_hypervisor_list_with_marker_pre_v233(self, sm_mock):
arglist = [
'--marker', 'test_hyp',
]
@@ -291,29 +282,66 @@ class TestHypervisorList(TestHypervisor):
class TestHypervisorShow(TestHypervisor):
def setUp(self):
- super(TestHypervisorShow, self).setUp()
+ super().setUp()
+
+ uptime_string = (' 01:28:24 up 3 days, 11:15, 1 user, '
+ ' load average: 0.94, 0.62, 0.50\n')
# Fake hypervisors to be listed up
- self.hypervisor = compute_fakes.FakeHypervisor.create_one_hypervisor()
+ self.hypervisor = compute_fakes.create_one_hypervisor(attrs={
+ 'uptime': uptime_string,
+ })
- # Return value of utils.find_resource()
- self.hypervisors_mock.get.return_value = self.hypervisor
+ # Return value of compute_client.find_hypervisor
+ self.sdk_client.find_hypervisor.return_value = self.hypervisor
- # Return value of compute_client.aggregates.list()
- self.aggregates_mock.list.return_value = []
+ # Return value of compute_client.aggregates()
+ self.sdk_client.aggregates.return_value = []
- # Return value of compute_client.hypervisors.uptime()
+ # Return value of compute_client.get_hypervisor_uptime()
uptime_info = {
'status': self.hypervisor.status,
'state': self.hypervisor.state,
'id': self.hypervisor.id,
- 'hypervisor_hostname': self.hypervisor.hypervisor_hostname,
- 'uptime': ' 01:28:24 up 3 days, 11:15, 1 user, '
- ' load average: 0.94, 0.62, 0.50\n',
+ 'hypervisor_hostname': self.hypervisor.name,
+ 'uptime': uptime_string,
}
- self.hypervisors_mock.uptime.return_value = fakes.FakeResource(
- info=copy.deepcopy(uptime_info),
- loaded=True
+ self.sdk_client.get_hypervisor_uptime.return_value = uptime_info
+
+ self.columns_v288 = (
+ 'aggregates',
+ 'cpu_info',
+ 'host_ip',
+ 'host_time',
+ 'hypervisor_hostname',
+ 'hypervisor_type',
+ 'hypervisor_version',
+ 'id',
+ 'load_average',
+ 'service_host',
+ 'service_id',
+ 'state',
+ 'status',
+ 'uptime',
+ 'users',
+ )
+
+ self.data_v288 = (
+ [],
+ format_columns.DictColumn({'aaa': 'aaa'}),
+ '192.168.0.10',
+ '01:28:24',
+ self.hypervisor.name,
+ 'QEMU',
+ 2004001,
+ self.hypervisor.id,
+ '0.94, 0.62, 0.50',
+ 'aaa',
+ 1,
+ 'up',
+ 'enabled',
+ '3 days, 11:15',
+ '1',
)
self.columns = (
@@ -353,7 +381,7 @@ class TestHypervisorShow(TestHypervisor):
1024,
'192.168.0.10',
'01:28:24',
- self.hypervisor.hypervisor_hostname,
+ self.hypervisor.name,
'QEMU',
2004001,
self.hypervisor.id,
@@ -376,15 +404,32 @@ class TestHypervisorShow(TestHypervisor):
# Get the command object to test
self.cmd = hypervisor.ShowHypervisor(self.app, None)
- def test_hypervisor_show(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.28')
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_hypervisor_show(self, sm_mock):
+ arglist = [
+ self.hypervisor.name,
+ ]
+ verifylist = [
+ ('hypervisor', self.hypervisor.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ # In base command class ShowOne in cliff, abstract method take_action()
+ # returns a two-part tuple with a tuple of column names and a tuple of
+ # data to be shown.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(self.columns_v288, columns)
+ self.assertCountEqual(self.data_v288, data)
+
+ @mock.patch.object(sdk_utils, 'supports_microversion',
+ side_effect=[False, True, False])
+ def test_hypervisor_show_pre_v288(self, sm_mock):
arglist = [
- self.hypervisor.hypervisor_hostname,
+ self.hypervisor.name,
]
verifylist = [
- ('hypervisor', self.hypervisor.hypervisor_hostname),
+ ('hypervisor', self.hypervisor.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -396,21 +441,19 @@ class TestHypervisorShow(TestHypervisor):
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
- def test_hypervisor_show_pre_v228(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.27')
-
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
+ def test_hypervisor_show_pre_v228(self, sm_mock):
# before microversion 2.28, nova returned a stringified version of this
# field
- self.hypervisor._info['cpu_info'] = json.dumps(
- self.hypervisor._info['cpu_info'])
- self.hypervisors_mock.get.return_value = self.hypervisor
+ self.hypervisor.cpu_info = json.dumps(
+ self.hypervisor.cpu_info)
+ self.sdk_client.find_hypervisor.return_value = self.hypervisor
arglist = [
- self.hypervisor.hypervisor_hostname,
+ self.hypervisor.name,
]
verifylist = [
- ('hypervisor', self.hypervisor.hypervisor_hostname),
+ ('hypervisor', self.hypervisor.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -422,19 +465,18 @@ class TestHypervisorShow(TestHypervisor):
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
- def test_hypervisor_show_uptime_not_implemented(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.28')
-
+ @mock.patch.object(sdk_utils, 'supports_microversion',
+ side_effect=[False, True, False])
+ def test_hypervisor_show_uptime_not_implemented(self, sm_mock):
arglist = [
- self.hypervisor.hypervisor_hostname,
+ self.hypervisor.name,
]
verifylist = [
- ('hypervisor', self.hypervisor.hypervisor_hostname),
+ ('hypervisor', self.hypervisor.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- self.hypervisors_mock.uptime.side_effect = (
+ self.sdk_client.get_hypervisor_uptime.side_effect = (
nova_exceptions.HTTPNotImplemented(501))
# In base command class ShowOne in cliff, abstract method take_action()
@@ -474,7 +516,7 @@ class TestHypervisorShow(TestHypervisor):
50,
1024,
'192.168.0.10',
- self.hypervisor.hypervisor_hostname,
+ self.hypervisor.name,
'QEMU',
2004001,
self.hypervisor.id,
diff --git a/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py b/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py
index 40086f9b..7bc7468a 100644
--- a/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py
+++ b/openstackclient/tests/unit/compute/v2/test_hypervisor_stats.py
@@ -12,9 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
#
+from unittest import mock
from openstackclient.compute.v2 import hypervisor_stats
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
+from openstackclient.tests.unit import fakes
class TestHypervisorStats(compute_fakes.TestComputev2):
@@ -23,20 +25,55 @@ class TestHypervisorStats(compute_fakes.TestComputev2):
super(TestHypervisorStats, self).setUp()
# Get a shortcut to the compute client hypervisors mock
- self.hypervisors_mock = self.app.client_manager.compute.hypervisors
- self.hypervisors_mock.reset_mock()
+ self.app.client_manager.sdk_connection = mock.Mock()
+ self.app.client_manager.sdk_connection.compute = mock.Mock()
+ self.sdk_client = self.app.client_manager.sdk_connection.compute
+ self.sdk_client.get = mock.Mock()
+
+
+# Not in fakes.py because hypervisor stats has been deprecated
+
+def create_one_hypervisor_stats(attrs=None):
+ """Create a fake hypervisor stats.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A dictionary that contains hypervisor stats information keys
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ stats_info = {
+ 'count': 2,
+ 'current_workload': 0,
+ 'disk_available_least': 50,
+ 'free_disk_gb': 100,
+ 'free_ram_mb': 23000,
+ 'local_gb': 100,
+ 'local_gb_used': 0,
+ 'memory_mb': 23800,
+ 'memory_mb_used': 1400,
+ 'running_vms': 3,
+ 'vcpus': 8,
+ 'vcpus_used': 3,
+ }
+
+ # Overwrite default attributes.
+ stats_info.update(attrs)
+
+ return stats_info
class TestHypervisorStatsShow(TestHypervisorStats):
+ _stats = create_one_hypervisor_stats()
+
def setUp(self):
super(TestHypervisorStatsShow, self).setUp()
- self.hypervisor_stats = \
- compute_fakes.FakeHypervisorStats.create_one_hypervisor_stats()
-
- self.hypervisors_mock.statistics.return_value =\
- self.hypervisor_stats
+ self.sdk_client.get.return_value = fakes.FakeResponse(
+ data={'hypervisor_statistics': self._stats})
self.cmd = hypervisor_stats.ShowHypervisorStats(self.app, None)
diff --git a/openstackclient/tests/unit/compute/v2/test_server.py b/openstackclient/tests/unit/compute/v2/test_server.py
index 46ace579..a5d5a43f 100644
--- a/openstackclient/tests/unit/compute/v2/test_server.py
+++ b/openstackclient/tests/unit/compute/v2/test_server.py
@@ -117,6 +117,21 @@ class TestServer(compute_fakes.TestComputev2):
# Set object methods to be tested. Could be overwritten in subclass.
self.methods = {}
+ patcher = mock.patch.object(
+ sdk_utils, 'supports_microversion', return_value=True)
+ self.addCleanup(patcher.stop)
+ self.supports_microversion_mock = patcher.start()
+ self._set_mock_microversion(
+ self.app.client_manager.compute.api_version.get_string())
+
+ def _set_mock_microversion(self, mock_v):
+ """Set a specific microversion for the mock supports_microversion()."""
+ self.supports_microversion_mock.reset_mock(return_value=True)
+
+ self.supports_microversion_mock.side_effect = (
+ lambda _, v:
+ api_versions.APIVersion(v) <= api_versions.APIVersion(mock_v))
+
def setup_servers_mock(self, count):
# If we are creating more than one server, make one of them
# boot-from-volume
@@ -153,7 +168,7 @@ class TestServer(compute_fakes.TestComputev2):
return servers
def setup_sdk_volumes_mock(self, count):
- volumes = volume_fakes.FakeVolume.create_sdk_volumes(count=count)
+ volumes = volume_fakes.create_sdk_volumes(count=count)
# This is the return value for volume_client.find_volume()
self.sdk_volume_client.find_volume.side_effect = volumes
@@ -161,6 +176,10 @@ class TestServer(compute_fakes.TestComputev2):
return volumes
def run_method_with_servers(self, method_name, server_count):
+ # Starting with v2.91, the nova api needs to be call with a sentinel
+ # as availability_zone=None will unpin the server az.
+ _sentinel = object()
+
servers = self.setup_servers_mock(server_count)
arglist = []
@@ -183,7 +202,11 @@ class TestServer(compute_fakes.TestComputev2):
method.assert_called_with(reason=None)
elif method_name == 'unshelve':
version = self.app.client_manager.compute.api_version
- if version >= api_versions.APIVersion('2.77'):
+ if version >= api_versions.APIVersion('2.91'):
+ method.assert_called_with(availability_zone=_sentinel,
+ host=None)
+ elif (version >= api_versions.APIVersion('2.77') and
+ version < api_versions.APIVersion('2.91')):
method.assert_called_with(availability_zone=None)
else:
method.assert_called_with()
@@ -400,8 +423,7 @@ class TestServerAddFixedIP(TestServer):
self.assertEqual(expected_data, tuple(data))
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0].id,
- net_id=network['id'],
- fixed_ip=None
+ net_id=network['id']
)
@mock.patch.object(sdk_utils, 'supports_microversion')
@@ -456,7 +478,7 @@ class TestServerAddFixedIP(TestServer):
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0].id,
net_id=network['id'],
- fixed_ip='5.6.7.8'
+ fixed_ips=[{'ip_address': '5.6.7.8'}]
)
@mock.patch.object(sdk_utils, 'supports_microversion')
@@ -513,7 +535,7 @@ class TestServerAddFixedIP(TestServer):
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0].id,
net_id=network['id'],
- fixed_ip='5.6.7.8',
+ fixed_ips=[{'ip_address': '5.6.7.8'}],
tag='tag1',
)
@@ -592,7 +614,7 @@ class TestServerAddFloatingIPNetwork(
def test_server_add_floating_ip(self):
_server = compute_fakes.FakeServer.create_one_server()
self.servers_mock.get.return_value = _server
- _port = network_fakes.FakePort.create_one_port()
+ _port = network_fakes.create_one_port()
_floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
self.network.find_ip = mock.Mock(return_value=_floating_ip)
self.network.ports = mock.Mock(return_value=[_port])
@@ -661,7 +683,7 @@ class TestServerAddFloatingIPNetwork(
def test_server_add_floating_ip_no_external_gateway(self, success=False):
_server = compute_fakes.FakeServer.create_one_server()
self.servers_mock.get.return_value = _server
- _port = network_fakes.FakePort.create_one_port()
+ _port = network_fakes.create_one_port()
_floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
self.network.find_ip = mock.Mock(return_value=_floating_ip)
return_value = [_port]
@@ -717,7 +739,7 @@ class TestServerAddFloatingIPNetwork(
def test_server_add_floating_ip_with_fixed_ip(self):
_server = compute_fakes.FakeServer.create_one_server()
self.servers_mock.get.return_value = _server
- _port = network_fakes.FakePort.create_one_port()
+ _port = network_fakes.create_one_port()
_floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
self.network.find_ip = mock.Mock(return_value=_floating_ip)
self.network.ports = mock.Mock(return_value=[_port])
@@ -759,7 +781,7 @@ class TestServerAddFloatingIPNetwork(
def test_server_add_floating_ip_with_fixed_ip_no_port_found(self):
_server = compute_fakes.FakeServer.create_one_server()
self.servers_mock.get.return_value = _server
- _port = network_fakes.FakePort.create_one_port()
+ _port = network_fakes.create_one_port()
_floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
self.network.find_ip = mock.Mock(return_value=_floating_ip)
self.network.ports = mock.Mock(return_value=[_port])
@@ -824,7 +846,7 @@ class TestServerAddPort(TestServer):
result = self.cmd.take_action(parsed_args)
self.sdk_client.create_server_interface.assert_called_once_with(
- servers[0], port_id=port_id, fixed_ip=None)
+ servers[0], port_id=port_id)
self.assertIsNone(result)
def test_server_add_port(self):
@@ -862,7 +884,6 @@ class TestServerAddPort(TestServer):
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0],
port_id='fake-port',
- fixed_ip=None,
tag='tag1')
@mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
@@ -910,8 +931,7 @@ class TestServerVolume(TestServer):
'volume_id': self.volumes[0].id,
}
self.volume_attachment = \
- compute_fakes.FakeVolumeAttachment.\
- create_one_sdk_volume_attachment(attrs=attrs)
+ compute_fakes.create_one_volume_attachment(attrs=attrs)
self.sdk_client.create_volume_attachment.return_value = \
self.volume_attachment
@@ -1265,7 +1285,7 @@ class TestServerAddNetwork(TestServer):
result = self.cmd.take_action(parsed_args)
self.sdk_client.create_server_interface.assert_called_once_with(
- servers[0], net_id=net_id, fixed_ip=None)
+ servers[0], net_id=net_id)
self.assertIsNone(result)
def test_server_add_network(self):
@@ -1304,7 +1324,6 @@ class TestServerAddNetwork(TestServer):
self.sdk_client.create_server_interface.assert_called_once_with(
servers[0],
net_id='fake-network',
- fixed_ip=None,
tag='tag1'
)
@@ -1436,10 +1455,11 @@ class TestServerCreate(TestServer):
self.flavor = compute_fakes.FakeFlavor.create_one_flavor()
self.flavors_mock.get.return_value = self.flavor
- self.volume = volume_fakes.FakeVolume.create_one_volume()
+ self.volume = volume_fakes.create_one_volume()
+ self.volume_alt = volume_fakes.create_one_volume()
self.volumes_mock.get.return_value = self.volume
- self.snapshot = volume_fakes.FakeSnapshot.create_one_snapshot()
+ self.snapshot = volume_fakes.create_one_snapshot()
self.snapshots_mock.get.return_value = self.snapshot
# Get the command object to test
@@ -1667,6 +1687,7 @@ class TestServerCreate(TestServer):
'--nic', 'net-id=net1,v4-fixed-ip=10.0.0.2',
'--port', 'port1',
'--network', 'net1',
+ '--network', 'auto', # this is a network called 'auto'
'--nic', 'port-id=port2',
self.new_server.name,
]
@@ -1675,24 +1696,40 @@ class TestServerCreate(TestServer):
('flavor', 'flavor1'),
('nics', [
{
- 'net-id': 'net1', 'port-id': '',
- 'v4-fixed-ip': '', 'v6-fixed-ip': '',
+ 'net-id': 'net1',
+ 'port-id': '',
+ 'v4-fixed-ip': '',
+ 'v6-fixed-ip': '',
},
{
- 'net-id': 'net1', 'port-id': '',
- 'v4-fixed-ip': '10.0.0.2', 'v6-fixed-ip': '',
+ 'net-id': 'net1',
+ 'port-id': '',
+ 'v4-fixed-ip': '10.0.0.2',
+ 'v6-fixed-ip': '',
},
{
- 'net-id': '', 'port-id': 'port1',
- 'v4-fixed-ip': '', 'v6-fixed-ip': '',
+ 'net-id': '',
+ 'port-id': 'port1',
+ 'v4-fixed-ip': '',
+ 'v6-fixed-ip': '',
},
{
- 'net-id': 'net1', 'port-id': '',
- 'v4-fixed-ip': '', 'v6-fixed-ip': '',
+ 'net-id': 'net1',
+ 'port-id': '',
+ 'v4-fixed-ip': '',
+ 'v6-fixed-ip': '',
},
{
- 'net-id': '', 'port-id': 'port2',
- 'v4-fixed-ip': '', 'v6-fixed-ip': '',
+ 'net-id': 'auto',
+ 'port-id': '',
+ 'v4-fixed-ip': '',
+ 'v6-fixed-ip': '',
+ },
+ {
+ 'net-id': '',
+ 'port-id': 'port2',
+ 'v4-fixed-ip': '',
+ 'v6-fixed-ip': '',
},
]),
('config_drive', False),
@@ -1721,12 +1758,16 @@ class TestServerCreate(TestServer):
"port2": port2_resource}[port_id])
# Mock sdk APIs.
- _network = mock.Mock(id='net1_uuid')
+ _network_1 = mock.Mock(id='net1_uuid')
+ _network_auto = mock.Mock(id='auto_uuid')
_port1 = mock.Mock(id='port1_uuid')
_port2 = mock.Mock(id='port2_uuid')
find_network = mock.Mock()
find_port = mock.Mock()
- find_network.return_value = _network
+ find_network.side_effect = lambda net_id, ignore_missing: {
+ "net1": _network_1,
+ "auto": _network_auto,
+ }[net_id]
find_port.side_effect = (lambda port_id, ignore_missing:
{"port1": _port1,
"port2": _port2}[port_id])
@@ -1767,6 +1808,10 @@ class TestServerCreate(TestServer):
'v4-fixed-ip': '',
'v6-fixed-ip': '',
'port-id': ''},
+ {'net-id': 'auto_uuid',
+ 'v4-fixed-ip': '',
+ 'v6-fixed-ip': '',
+ 'port-id': ''},
{'net-id': '',
'v4-fixed-ip': '',
'v6-fixed-ip': '',
@@ -1892,13 +1937,11 @@ class TestServerCreate(TestServer):
self.assertRaises(
exceptions.CommandError, self.cmd.take_action, parsed_args)
- def test_server_create_with_auto_network(self):
- arglist = [
- '--image', 'image1',
- '--flavor', 'flavor1',
- '--nic', 'auto',
- self.new_server.name,
- ]
+ def _test_server_create_with_auto_network(self, arglist):
+ # requires API microversion 2.37 or later
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.37')
+
verifylist = [
('image', 'image1'),
('flavor', 'flavor1'),
@@ -1938,8 +1981,66 @@ class TestServerCreate(TestServer):
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist(), data)
+ # NOTE(stephenfin): '--auto-network' is an alias for '--nic auto' so the
+ # tests are nearly identical
+
+ def test_server_create_with_auto_network_legacy(self):
+ arglist = [
+ '--image', 'image1',
+ '--flavor', 'flavor1',
+ '--nic', 'auto',
+ self.new_server.name,
+ ]
+ self._test_server_create_with_auto_network(arglist)
+
+ def test_server_create_with_auto_network(self):
+ arglist = [
+ '--image', 'image1',
+ '--flavor', 'flavor1',
+ '--auto-network',
+ self.new_server.name,
+ ]
+ self._test_server_create_with_auto_network(arglist)
+
+ def test_server_create_with_auto_network_pre_v237(self):
+ # use an API microversion that's too old
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.36')
+
+ arglist = [
+ '--image', 'image1',
+ '--flavor', 'flavor1',
+ '--nic', 'auto',
+ self.new_server.name,
+ ]
+ verifylist = [
+ ('image', 'image1'),
+ ('flavor', 'flavor1'),
+ ('nics', ['auto']),
+ ('config_drive', False),
+ ('server_name', self.new_server.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args,
+ )
+ self.assertIn(
+ '--os-compute-api-version 2.37 or greater is required to support '
+ 'explicit auto-allocation of a network or to disable network '
+ 'allocation',
+ str(exc),
+ )
+ self.assertNotCalled(self.servers_mock.create)
+
def test_server_create_with_auto_network_default_v2_37(self):
"""Tests creating a server without specifying --nic using 2.37."""
+ # requires API microversion 2.37 or later
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.37')
+
arglist = [
'--image', 'image1',
'--flavor', 'flavor1',
@@ -1953,12 +2054,7 @@ class TestServerCreate(TestServer):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- # Since check_parser doesn't handle compute global options like
- # --os-compute-api-version, we have to mock the construction of
- # the novaclient client object with our own APIVersion.
- with mock.patch.object(self.app.client_manager.compute, 'api_version',
- api_versions.APIVersion('2.37')):
- columns, data = self.cmd.take_action(parsed_args)
+ columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = dict(
@@ -1988,13 +2084,11 @@ class TestServerCreate(TestServer):
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist(), data)
- def test_server_create_with_none_network(self):
- arglist = [
- '--image', 'image1',
- '--flavor', 'flavor1',
- '--nic', 'none',
- self.new_server.name,
- ]
+ def _test_server_create_with_none_network(self, arglist):
+ # requires API microversion 2.37 or later
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.37')
+
verifylist = [
('image', 'image1'),
('flavor', 'flavor1'),
@@ -2034,6 +2128,61 @@ class TestServerCreate(TestServer):
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist(), data)
+ # NOTE(stephenfin): '--no-network' is an alias for '--nic none' so the
+ # tests are nearly identical
+
+ def test_server_create_with_none_network_legacy(self):
+ arglist = [
+ '--image', 'image1',
+ '--flavor', 'flavor1',
+ '--nic', 'none',
+ self.new_server.name,
+ ]
+ self._test_server_create_with_none_network(arglist)
+
+ def test_server_create_with_none_network(self):
+ arglist = [
+ '--image', 'image1',
+ '--flavor', 'flavor1',
+ '--no-network',
+ self.new_server.name,
+ ]
+ self._test_server_create_with_none_network(arglist)
+
+ def test_server_create_with_none_network_pre_v237(self):
+ # use an API microversion that's too old
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.36')
+
+ arglist = [
+ '--image', 'image1',
+ '--flavor', 'flavor1',
+ '--nic', 'none',
+ self.new_server.name,
+ ]
+
+ verifylist = [
+ ('image', 'image1'),
+ ('flavor', 'flavor1'),
+ ('nics', ['none']),
+ ('config_drive', False),
+ ('server_name', self.new_server.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args,
+ )
+ self.assertIn(
+ '--os-compute-api-version 2.37 or greater is required to support '
+ 'explicit auto-allocation of a network or to disable network '
+ 'allocation',
+ str(exc),
+ )
+ self.assertNotCalled(self.servers_mock.create)
+
def test_server_create_with_conflict_network_options(self):
arglist = [
'--image', 'image1',
@@ -2317,7 +2466,7 @@ class TestServerCreate(TestServer):
'admin_pass': None,
'block_device_mapping_v2': [{
'uuid': self.volume.id,
- 'boot_index': '0',
+ 'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
}],
@@ -2368,7 +2517,7 @@ class TestServerCreate(TestServer):
'admin_pass': None,
'block_device_mapping_v2': [{
'uuid': self.snapshot.id,
- 'boot_index': '0',
+ 'boot_index': 0,
'source_type': 'snapshot',
'destination_type': 'volume',
'delete_on_termination': False,
@@ -2391,20 +2540,20 @@ class TestServerCreate(TestServer):
self.assertEqual(self.datalist(), data)
def test_server_create_with_block_device(self):
- block_device = f'uuid={self.volume.id},source_type=volume'
+ block_device = f'uuid={self.volume.id},source_type=volume,boot_index=0'
arglist = [
- '--image', 'image1',
'--flavor', self.flavor.id,
'--block-device', block_device,
self.new_server.name,
]
verifylist = [
- ('image', 'image1'),
+ ('image', None),
('flavor', self.flavor.id),
('block_devices', [
{
'uuid': self.volume.id,
'source_type': 'volume',
+ 'boot_index': '0',
},
]),
('server_name', self.new_server.name),
@@ -2426,11 +2575,14 @@ class TestServerCreate(TestServer):
'key_name': None,
'availability_zone': None,
'admin_pass': None,
- 'block_device_mapping_v2': [{
- 'uuid': self.volume.id,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- }],
+ 'block_device_mapping_v2': [
+ {
+ 'uuid': self.volume.id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ },
+ ],
'nics': [],
'scheduler_hints': {},
'config_drive': None,
@@ -2438,7 +2590,7 @@ class TestServerCreate(TestServer):
# ServerManager.create(name, image, flavor, **kwargs)
self.servers_mock.create.assert_called_with(
self.new_server.name,
- self.image,
+ None,
self.flavor,
**kwargs
)
@@ -2457,11 +2609,13 @@ class TestServerCreate(TestServer):
f'volume_type=foo,boot_index=1,delete_on_termination=true,'
f'tag=foo'
)
+ block_device_alt = f'uuid={self.volume_alt.id},source_type=volume'
arglist = [
'--image', 'image1',
'--flavor', self.flavor.id,
'--block-device', block_device,
+ '--block-device', block_device_alt,
self.new_server.name,
]
verifylist = [
@@ -2482,6 +2636,10 @@ class TestServerCreate(TestServer):
'delete_on_termination': 'true',
'tag': 'foo',
},
+ {
+ 'uuid': self.volume_alt.id,
+ 'source_type': 'volume',
+ },
]),
('server_name', self.new_server.name),
]
@@ -2502,20 +2660,27 @@ class TestServerCreate(TestServer):
'key_name': None,
'availability_zone': None,
'admin_pass': None,
- 'block_device_mapping_v2': [{
- 'uuid': self.volume.id,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'disk_bus': 'ide',
- 'device_name': 'sdb',
- 'volume_size': '64',
- 'guest_format': 'ext4',
- 'boot_index': 1,
- 'device_type': 'disk',
- 'delete_on_termination': True,
- 'tag': 'foo',
- 'volume_type': 'foo',
- }],
+ 'block_device_mapping_v2': [
+ {
+ 'uuid': self.volume.id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'disk_bus': 'ide',
+ 'device_name': 'sdb',
+ 'volume_size': '64',
+ 'guest_format': 'ext4',
+ 'boot_index': 1,
+ 'device_type': 'disk',
+ 'delete_on_termination': True,
+ 'tag': 'foo',
+ 'volume_type': 'foo',
+ },
+ {
+ 'uuid': self.volume_alt.id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ },
+ ],
'nics': 'auto',
'scheduler_hints': {},
'config_drive': None,
@@ -3164,13 +3329,11 @@ class TestServerCreate(TestServer):
arglist = [
'--image-property', 'hypervisor_type=qemu',
'--flavor', 'flavor1',
- '--nic', 'none',
self.new_server.name,
]
verifylist = [
('image_properties', {'hypervisor_type': 'qemu'}),
('flavor', 'flavor1'),
- ('nics', ['none']),
('config_drive', False),
('server_name', self.new_server.name),
]
@@ -3198,7 +3361,7 @@ class TestServerCreate(TestServer):
availability_zone=None,
admin_pass=None,
block_device_mapping_v2=[],
- nics='none',
+ nics=[],
meta=None,
scheduler_hints={},
config_drive=None,
@@ -3219,14 +3382,12 @@ class TestServerCreate(TestServer):
'--image-property', 'hypervisor_type=qemu',
'--image-property', 'hw_disk_bus=ide',
'--flavor', 'flavor1',
- '--nic', 'none',
self.new_server.name,
]
verifylist = [
('image_properties', {'hypervisor_type': 'qemu',
'hw_disk_bus': 'ide'}),
('flavor', 'flavor1'),
- ('nics', ['none']),
('config_drive', False),
('server_name', self.new_server.name),
]
@@ -3254,7 +3415,7 @@ class TestServerCreate(TestServer):
availability_zone=None,
admin_pass=None,
block_device_mapping_v2=[],
- nics='none',
+ nics=[],
meta=None,
scheduler_hints={},
config_drive=None,
@@ -3275,14 +3436,12 @@ class TestServerCreate(TestServer):
'--image-property', 'hypervisor_type=qemu',
'--image-property', 'hw_disk_bus=virtio',
'--flavor', 'flavor1',
- '--nic', 'none',
self.new_server.name,
]
verifylist = [
('image_properties', {'hypervisor_type': 'qemu',
'hw_disk_bus': 'virtio'}),
('flavor', 'flavor1'),
- ('nics', ['none']),
('config_drive', False),
('server_name', self.new_server.name),
]
@@ -3306,7 +3465,6 @@ class TestServerCreate(TestServer):
'--image-property',
'owner_specified.openstack.object=image/cirros',
'--flavor', 'flavor1',
- '--nic', 'none',
self.new_server.name,
]
@@ -3314,7 +3472,6 @@ class TestServerCreate(TestServer):
('image_properties',
{'owner_specified.openstack.object': 'image/cirros'}),
('flavor', 'flavor1'),
- ('nics', ['none']),
('server_name', self.new_server.name),
]
# create a image_info as the side_effect of the fake image_list()
@@ -3344,7 +3501,7 @@ class TestServerCreate(TestServer):
availability_zone=None,
admin_pass=None,
block_device_mapping_v2=[],
- nics='none',
+ nics=[],
meta=None,
scheduler_hints={},
config_drive=None,
@@ -3361,6 +3518,37 @@ class TestServerCreate(TestServer):
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist(), data)
+ def test_server_create_no_boot_device(self):
+ block_device = f'uuid={self.volume.id},source_type=volume,boot_index=1'
+ arglist = [
+ '--block-device', block_device,
+ '--flavor', self.flavor.id,
+ self.new_server.name,
+ ]
+ verifylist = [
+ ('image', None),
+ ('flavor', self.flavor.id),
+ ('block_devices', [
+ {
+ 'uuid': self.volume.id,
+ 'source_type': 'volume',
+ 'boot_index': '1',
+ },
+ ]),
+ ('server_name', self.new_server.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args,
+ )
+ self.assertIn(
+ 'An image (--image, --image-property) or bootable volume '
+ '(--volume, --snapshot, --block-device) is required',
+ str(exc),
+ )
+
def test_server_create_with_swap(self):
arglist = [
'--image', 'image1',
@@ -4272,21 +4460,36 @@ class TestServerDelete(TestServer):
class TestServerDumpCreate(TestServer):
def setUp(self):
- super(TestServerDumpCreate, self).setUp()
+ super().setUp()
# Get the command object to test
self.cmd = server.CreateServerDump(self.app, None)
- # Set methods to be tested.
- self.methods = {
- 'trigger_crash_dump': None,
- }
+ def run_test_server_dump(self, server_count):
+ servers = self.setup_sdk_servers_mock(server_count)
+
+ arglist = []
+ verifylist = []
+
+ for s in servers:
+ arglist.append(s.id)
+
+ verifylist = [
+ ('server', arglist),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+ for s in servers:
+ s.trigger_crash_dump.assert_called_once_with(self.sdk_client)
def test_server_dump_one_server(self):
- self.run_method_with_servers('trigger_crash_dump', 1)
+ self.run_test_server_dump(1)
def test_server_dump_multi_servers(self):
- self.run_method_with_servers('trigger_crash_dump', 3)
+ self.run_test_server_dump(3)
class _TestServerList(TestServer):
@@ -4319,32 +4522,25 @@ class _TestServerList(TestServer):
def setUp(self):
super(_TestServerList, self).setUp()
- self.search_opts = {
+ # Default params of the core function of the command in the case of no
+ # commandline option specified.
+ self.kwargs = {
'reservation_id': None,
'ip': None,
'ip6': None,
'name': None,
- 'instance_name': None,
'status': None,
'flavor': None,
'image': None,
'host': None,
- 'tenant_id': None,
- 'all_tenants': False,
+ 'project_id': None,
+ 'all_projects': False,
'user_id': None,
'deleted': False,
'changes-since': None,
'changes-before': None,
}
- # Default params of the core function of the command in the case of no
- # commandline option specified.
- self.kwargs = {
- 'search_opts': self.search_opts,
- 'marker': None,
- 'limit': None,
- }
-
# The fake servers' attributes. Use the original attributes names in
# nova, not the ones printed by "server list" command.
self.attrs = {
@@ -4359,10 +4555,6 @@ class _TestServerList(TestServer):
'Metadata': format_columns.DictColumn({}),
}
- # The servers to be listed.
- self.servers = self.setup_servers_mock(3)
- self.servers_mock.list.return_value = self.servers
-
self.image = image_fakes.create_one_image()
# self.images_mock.return_value = [self.image]
@@ -4370,7 +4562,12 @@ class _TestServerList(TestServer):
self.get_image_mock.return_value = self.image
self.flavor = compute_fakes.FakeFlavor.create_one_flavor()
- self.flavors_mock.get.return_value = self.flavor
+ self.sdk_client.find_flavor.return_value = self.flavor
+ self.attrs['flavor'] = {'original_name': self.flavor.name}
+
+ # The servers to be listed.
+ self.servers = self.setup_sdk_servers_mock(3)
+ self.sdk_client.servers.return_value = self.servers
# Get the command object to test
self.cmd = server.ListServer(self.app, None)
@@ -4389,7 +4586,7 @@ class TestServerList(_TestServerList):
]
Flavor = collections.namedtuple('Flavor', 'id name')
- self.flavors_mock.list.return_value = [
+ self.sdk_client.flavors.return_value = [
Flavor(id=s.flavor['id'], name=self.flavor.name)
for s in self.servers
]
@@ -4399,7 +4596,7 @@ class TestServerList(_TestServerList):
s.id,
s.name,
s.status,
- format_columns.DictListColumn(s.networks),
+ server.AddressesColumn(s.addresses),
# Image will be an empty string if boot-from-volume
self.image.name if s.image else server.IMAGE_STRING_FOR_BFV,
self.flavor.name,
@@ -4418,9 +4615,9 @@ class TestServerList(_TestServerList):
columns, data = self.cmd.take_action(parsed_args)
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.images_mock.assert_called()
- self.flavors_mock.list.assert_called()
+ self.sdk_client.flavors.assert_called()
# we did not pass image or flavor, so gets on those must be absent
self.assertFalse(self.flavors_mock.get.call_count)
self.assertFalse(self.get_image_mock.call_count)
@@ -4435,14 +4632,14 @@ class TestServerList(_TestServerList):
('deleted', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- self.servers_mock.list.return_value = []
+ self.sdk_client.servers.return_value = []
self.data = ()
columns, data = self.cmd.take_action(parsed_args)
- self.servers_mock.list.assert_called_with(**self.kwargs)
- self.assertEqual(0, self.images_mock.list.call_count)
- self.assertEqual(0, self.flavors_mock.list.call_count)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
+ self.images_mock.assert_not_called()
+ self.sdk_client.flavors.assert_not_called()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -4452,20 +4649,21 @@ class TestServerList(_TestServerList):
s.id,
s.name,
s.status,
- getattr(s, 'OS-EXT-STS:task_state'),
+ getattr(s, 'task_state'),
server.PowerStateColumn(
- getattr(s, 'OS-EXT-STS:power_state')
+ getattr(s, 'power_state')
),
- format_columns.DictListColumn(s.networks),
+ server.AddressesColumn(s.addresses),
# Image will be an empty string if boot-from-volume
self.image.name if s.image else server.IMAGE_STRING_FOR_BFV,
s.image['id'] if s.image else server.IMAGE_STRING_FOR_BFV,
self.flavor.name,
s.flavor['id'],
- getattr(s, 'OS-EXT-AZ:availability_zone'),
- getattr(s, 'OS-EXT-SRV-ATTR:host'),
- s.Metadata,
- ) for s in self.servers)
+ getattr(s, 'availability_zone'),
+ server.HostColumn(getattr(s, 'hypervisor_hostname')),
+ format_columns.DictColumn(s.metadata),
+ ) for s in self.servers
+ )
arglist = [
'--long',
]
@@ -4476,7 +4674,12 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
+ image_ids = {s.image['id'] for s in self.servers if s.image}
+ self.images_mock.assert_called_once_with(
+ id=f'in:{",".join(image_ids)}',
+ )
+ self.sdk_client.flavors.assert_called_once_with(is_public=None)
self.assertEqual(self.columns_long, columns)
self.assertEqual(self.data, tuple(data))
@@ -4486,6 +4689,13 @@ class TestServerList(_TestServerList):
'-c', 'User ID',
'-c', 'Created At',
'-c', 'Security Groups',
+ '-c', 'Task State',
+ '-c', 'Power State',
+ '-c', 'Image ID',
+ '-c', 'Flavor ID',
+ '-c', 'Availability Zone',
+ '-c', 'Host',
+ '-c', 'Properties',
'--long'
]
verifylist = [
@@ -4495,11 +4705,19 @@ class TestServerList(_TestServerList):
columns, data = self.cmd.take_action(parsed_args)
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertIn('Project ID', columns)
self.assertIn('User ID', columns)
self.assertIn('Created At', columns)
self.assertIn('Security Groups', columns)
+ self.assertIn('Task State', columns)
+ self.assertIn('Power State', columns)
+ self.assertIn('Image ID', columns)
+ self.assertIn('Flavor ID', columns)
+ self.assertIn('Availability Zone', columns)
+ self.assertIn('Host', columns)
+ self.assertIn('Properties', columns)
+ self.assertCountEqual(columns, set(columns))
def test_server_list_no_name_lookup_option(self):
self.data = tuple(
@@ -4507,7 +4725,7 @@ class TestServerList(_TestServerList):
s.id,
s.name,
s.status,
- format_columns.DictListColumn(s.networks),
+ server.AddressesColumn(s.addresses),
# Image will be an empty string if boot-from-volume
s.image['id'] if s.image else server.IMAGE_STRING_FOR_BFV,
s.flavor['id']
@@ -4525,7 +4743,9 @@ class TestServerList(_TestServerList):
columns, data = self.cmd.take_action(parsed_args)
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
+ self.images_mock.assert_not_called()
+ self.sdk_client.flavors.assert_not_called()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -4535,7 +4755,7 @@ class TestServerList(_TestServerList):
s.id,
s.name,
s.status,
- format_columns.DictListColumn(s.networks),
+ server.AddressesColumn(s.addresses),
# Image will be an empty string if boot-from-volume
s.image['id'] if s.image else server.IMAGE_STRING_FOR_BFV,
s.flavor['id']
@@ -4553,7 +4773,9 @@ class TestServerList(_TestServerList):
columns, data = self.cmd.take_action(parsed_args)
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
+ self.images_mock.assert_not_called()
+ self.sdk_client.flavors.assert_not_called()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -4570,11 +4792,11 @@ class TestServerList(_TestServerList):
columns, data = self.cmd.take_action(parsed_args)
- self.servers_mock.list.assert_called_with(**self.kwargs)
- self.assertFalse(self.images_mock.list.call_count)
- self.assertFalse(self.flavors_mock.list.call_count)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
+ self.images_mock.assert_not_called()
+ self.sdk_client.flavors.assert_not_called()
self.get_image_mock.assert_called()
- self.flavors_mock.get.assert_called()
+ self.sdk_client.find_flavor.assert_called()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -4594,8 +4816,10 @@ class TestServerList(_TestServerList):
self.find_image_mock.assert_called_with(self.image.id,
ignore_missing=False)
- self.search_opts['image'] = self.image.id
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['image'] = self.image.id
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
+ self.images_mock.assert_not_called()
+ self.sdk_client.flavors.assert_called_once()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -4612,10 +4836,13 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.flavors_mock.get.has_calls(self.flavor.id)
+ self.sdk_client.find_flavor.assert_has_calls(
+ [mock.call(self.flavor.id)])
- self.search_opts['flavor'] = self.flavor.id
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['flavor'] = self.flavor.id
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
+ self.images_mock.assert_called_once()
+ self.sdk_client.flavors.assert_not_called()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -4634,9 +4861,9 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['changes-since'] = '2016-03-04T06:27:59Z'
- self.search_opts['deleted'] = True
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['changes-since'] = '2016-03-04T06:27:59Z'
+ self.kwargs['deleted'] = True
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
@@ -4663,8 +4890,7 @@ class TestServerList(_TestServerList):
)
def test_server_list_with_tag(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.26')
+ self._set_mock_microversion('2.26')
arglist = [
'--tag', 'tag1',
@@ -4677,16 +4903,15 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['tags'] = 'tag1,tag2'
+ self.kwargs['tags'] = 'tag1,tag2'
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
def test_server_list_with_tag_pre_v225(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.25')
+ self._set_mock_microversion('2.25')
arglist = [
'--tag', 'tag1',
@@ -4706,9 +4931,7 @@ class TestServerList(_TestServerList):
str(ex))
def test_server_list_with_not_tag(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.26')
-
+ self._set_mock_microversion('2.26')
arglist = [
'--not-tag', 'tag1',
'--not-tag', 'tag2',
@@ -4720,16 +4943,15 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['not-tags'] = 'tag1,tag2'
+ self.kwargs['not-tags'] = 'tag1,tag2'
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, tuple(data))
def test_server_list_with_not_tag_pre_v226(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.25')
+ self._set_mock_microversion('2.25')
arglist = [
'--not-tag', 'tag1',
@@ -4759,8 +4981,8 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['availability_zone'] = 'test-az'
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['availability_zone'] = 'test-az'
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -4775,8 +4997,8 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['key_name'] = 'test-key'
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['key_name'] = 'test-key'
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -4791,8 +5013,8 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['config_drive'] = True
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['config_drive'] = True
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -4807,8 +5029,8 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['config_drive'] = False
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['config_drive'] = False
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -4823,8 +5045,8 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['progress'] = '100'
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['progress'] = '100'
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -4848,8 +5070,8 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['vm_state'] = 'active'
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['vm_state'] = 'active'
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -4864,8 +5086,8 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['task_state'] = 'deleting'
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['task_state'] = 'deleting'
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -4880,11 +5102,96 @@ class TestServerList(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['power_state'] = 1
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['power_state'] = 1
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
+ def test_server_list_long_with_host_status_v216(self):
+ self._set_mock_microversion('2.16')
+ self.data1 = tuple(
+ (
+ s.id,
+ s.name,
+ s.status,
+ getattr(s, 'task_state'),
+ server.PowerStateColumn(
+ getattr(s, 'power_state')
+ ),
+ server.AddressesColumn(s.addresses),
+ # Image will be an empty string if boot-from-volume
+ self.image.name if s.image else server.IMAGE_STRING_FOR_BFV,
+ s.image['id'] if s.image else server.IMAGE_STRING_FOR_BFV,
+ self.flavor.name,
+ s.flavor['id'],
+ getattr(s, 'availability_zone'),
+ server.HostColumn(getattr(s, 'hypervisor_hostname')),
+ format_columns.DictColumn(s.metadata),
+ ) for s in self.servers)
+
+ arglist = [
+ '--long'
+ ]
+ verifylist = [
+ ('long', True),
+ ]
+
+ # First test without host_status in the data -- the column should not
+ # be present in this case.
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
+
+ self.assertEqual(self.columns_long, columns)
+ self.assertEqual(tuple(self.data1), tuple(data))
+
+ # Next test with host_status in the data -- the column should be
+ # present in this case.
+ self.sdk_client.servers.reset_mock()
+
+ self.attrs['host_status'] = 'UP'
+ servers = self.setup_sdk_servers_mock(3)
+ self.sdk_client.servers.return_value = servers
+
+ # Make sure the returned image and flavor IDs match the servers.
+ Image = collections.namedtuple('Image', 'id name')
+ self.images_mock.return_value = [
+ Image(id=s.image['id'], name=self.image.name)
+ # Image will be an empty string if boot-from-volume
+ for s in servers if s.image
+ ]
+
+ # Add the expected host_status column and data.
+ columns_long = self.columns_long + ('Host Status',)
+ self.data2 = tuple(
+ (
+ s.id,
+ s.name,
+ s.status,
+ getattr(s, 'task_state'),
+ server.PowerStateColumn(
+ getattr(s, 'power_state')
+ ),
+ server.AddressesColumn(s.addresses),
+ # Image will be an empty string if boot-from-volume
+ self.image.name if s.image else server.IMAGE_STRING_FOR_BFV,
+ s.image['id'] if s.image else server.IMAGE_STRING_FOR_BFV,
+ self.flavor.name,
+ s.flavor['id'],
+ getattr(s, 'availability_zone'),
+ server.HostColumn(getattr(s, 'hypervisor_hostname')),
+ format_columns.DictColumn(s.metadata),
+ s.host_status,
+ ) for s in servers)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
+
+ self.assertEqual(columns_long, columns)
+ self.assertEqual(tuple(self.data2), tuple(data))
+
class TestServerListV273(_TestServerList):
@@ -4928,8 +5235,8 @@ class TestServerListV273(_TestServerList):
}
# The servers to be listed.
- self.servers = self.setup_servers_mock(3)
- self.servers_mock.list.return_value = self.servers
+ self.servers = self.setup_sdk_servers_mock(3)
+ self.sdk_client.servers.return_value = self.servers
Image = collections.namedtuple('Image', 'id name')
self.images_mock.return_value = [
@@ -4940,14 +5247,14 @@ class TestServerListV273(_TestServerList):
# The flavor information is embedded, so now reason for this to be
# called
- self.flavors_mock.list = mock.NonCallableMock()
+ self.sdk_client.flavors = mock.NonCallableMock()
self.data = tuple(
(
s.id,
s.name,
s.status,
- format_columns.DictListColumn(s.networks),
+ server.AddressesColumn(s.addresses),
# Image will be an empty string if boot-from-volume
self.image.name if s.image else server.IMAGE_STRING_FOR_BFV,
self.flavor.name,
@@ -4971,8 +5278,7 @@ class TestServerListV273(_TestServerList):
def test_server_list_with_locked(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.73')
+ self._set_mock_microversion('2.73')
arglist = [
'--locked'
]
@@ -4983,16 +5289,15 @@ class TestServerListV273(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['locked'] = True
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['locked'] = True
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
- self.assertItemsEqual(self.columns, columns)
- self.assertItemsEqual(self.data, tuple(data))
+ self.assertCountEqual(self.columns, columns)
+ self.assertCountEqual(self.data, tuple(data))
def test_server_list_with_unlocked_v273(self):
+ self._set_mock_microversion('2.73')
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.73')
arglist = [
'--unlocked'
]
@@ -5003,16 +5308,15 @@ class TestServerListV273(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['locked'] = False
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.kwargs['locked'] = False
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
- self.assertItemsEqual(self.columns, columns)
- self.assertItemsEqual(self.data, tuple(data))
+ self.assertCountEqual(self.columns, columns)
+ self.assertCountEqual(self.data, tuple(data))
def test_server_list_with_locked_and_unlocked(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.73')
+ self._set_mock_microversion('2.73')
arglist = [
'--locked',
'--unlocked'
@@ -5028,8 +5332,7 @@ class TestServerListV273(_TestServerList):
self.assertIn('Argument parse failed', str(ex))
def test_server_list_with_changes_before(self):
- self.app.client_manager.compute.api_version = (
- api_versions.APIVersion('2.66'))
+ self._set_mock_microversion('2.66')
arglist = [
'--changes-before', '2016-03-05T06:27:59Z',
'--deleted'
@@ -5042,20 +5345,18 @@ class TestServerListV273(_TestServerList):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.search_opts['changes-before'] = '2016-03-05T06:27:59Z'
- self.search_opts['deleted'] = True
+ self.kwargs['changes-before'] = '2016-03-05T06:27:59Z'
+ self.kwargs['deleted'] = True
- self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.sdk_client.servers.assert_called_with(**self.kwargs)
- self.assertItemsEqual(self.columns, columns)
- self.assertItemsEqual(self.data, tuple(data))
+ self.assertCountEqual(self.columns, columns)
+ self.assertCountEqual(self.data, tuple(data))
@mock.patch.object(iso8601, 'parse_date', side_effect=iso8601.ParseError)
def test_server_list_with_invalid_changes_before(
self, mock_parse_isotime):
- self.app.client_manager.compute.api_version = (
- api_versions.APIVersion('2.66'))
-
+ self._set_mock_microversion('2.66')
arglist = [
'--changes-before', 'Invalid time value',
]
@@ -5075,8 +5376,7 @@ class TestServerListV273(_TestServerList):
)
def test_server_with_changes_before_pre_v266(self):
- self.app.client_manager.compute.api_version = (
- api_versions.APIVersion('2.65'))
+ self._set_mock_microversion('2.65')
arglist = [
'--changes-before', '2016-03-05T06:27:59Z',
@@ -5094,8 +5394,7 @@ class TestServerListV273(_TestServerList):
parsed_args)
def test_server_list_v269_with_partial_constructs(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.69')
+ self._set_mock_microversion('2.69')
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -5121,10 +5420,10 @@ class TestServerListV273(_TestServerList):
# it will fail at formatting the networks info later on.
"networks": {}
}
- server = compute_fakes.fakes.FakeResource(
+ fake_server = compute_fakes.fakes.FakeResource(
info=server_dict,
)
- self.servers.append(server)
+ self.servers.append(fake_server)
columns, data = self.cmd.take_action(parsed_args)
# get the first three servers out since our interest is in the partial
# server.
@@ -5134,7 +5433,7 @@ class TestServerListV273(_TestServerList):
partial_server = next(data)
expected_row = (
'server-id-95a56bfc4xxxxxx28d7e418bfd97813a', '',
- 'UNKNOWN', format_columns.DictListColumn({}), '', '')
+ 'UNKNOWN', server.AddressesColumn(''), '', '')
self.assertEqual(expected_row, partial_server)
@@ -5666,6 +5965,25 @@ class TestServerRebuild(TestServer):
self.get_image_mock.assert_called_with(self.image.id)
self.server.rebuild.assert_called_with(self.image, None)
+ def test_rebuild_with_volume_backed_server_no_image(self):
+ # the volume-backed server will have the image attribute set to an
+ # empty string, not null/None
+ self.server.image = ''
+
+ arglist = [
+ self.server.id,
+ ]
+ verifylist = [
+ ('server', self.server.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn('The --image option is required', str(exc))
+
def test_rebuild_with_name(self):
name = 'test-server-xxx'
arglist = [
@@ -6160,6 +6478,103 @@ class TestServerRebuild(TestServer):
parsed_args)
+class TestServerRebuildVolumeBacked(TestServer):
+
+ def setUp(self):
+ super().setUp()
+
+ self.new_image = image_fakes.create_one_image()
+ self.find_image_mock.return_value = self.new_image
+
+ attrs = {
+ 'image': '',
+ 'networks': {},
+ 'adminPass': 'passw0rd',
+ }
+ new_server = compute_fakes.FakeServer.create_one_server(attrs=attrs)
+
+ # Fake the server to be rebuilt. The IDs of them should be the same.
+ attrs['id'] = new_server.id
+ methods = {
+ 'rebuild': new_server,
+ }
+ self.server = compute_fakes.FakeServer.create_one_server(
+ attrs=attrs,
+ methods=methods
+ )
+
+ # Return value for utils.find_resource for server.
+ self.servers_mock.get.return_value = self.server
+
+ self.cmd = server.RebuildServer(self.app, None)
+
+ def test_rebuild_with_reimage_boot_volume(self):
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.93')
+
+ arglist = [
+ self.server.id,
+ '--reimage-boot-volume',
+ '--image', self.new_image.id
+ ]
+ verifylist = [
+ ('server', self.server.id),
+ ('reimage_boot_volume', True),
+ ('image', self.new_image.id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(self.server.id)
+ self.server.rebuild.assert_called_with(
+ self.new_image, None)
+
+ def test_rebuild_with_no_reimage_boot_volume(self):
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.93')
+
+ arglist = [
+ self.server.id,
+ '--no-reimage-boot-volume',
+ '--image', self.new_image.id
+ ]
+ verifylist = [
+ ('server', self.server.id),
+ ('reimage_boot_volume', False),
+ ('image', self.new_image.id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn('--reimage-boot-volume is required', str(exc))
+
+ def test_rebuild_with_reimage_boot_volume_pre_v293(self):
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.92')
+
+ arglist = [
+ self.server.id,
+ '--reimage-boot-volume',
+ '--image', self.new_image.id
+ ]
+ verifylist = [
+ ('server', self.server.id),
+ ('reimage_boot_volume', True)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-compute-api-version 2.93 or greater is required', str(exc))
+
+
class TestEvacuateServer(TestServer):
def setUp(self):
@@ -7530,20 +7945,15 @@ class TestServerShow(TestServer):
'tenant_id': 'tenant-id-xxx',
'networks': {'public': ['10.20.30.40', '2001:db8::f']},
}
- # Fake the server.diagnostics() method. The return value contains http
- # response and data. The data is a dict. Sincce this method itself is
- # faked, we don't need to fake everything of the return value exactly.
- resp = mock.Mock()
- resp.status_code = 200
+ self.sdk_client.get_server_diagnostics.return_value = {'test': 'test'}
server_method = {
- 'diagnostics': (resp, {'test': 'test'}),
- 'topology': self.topology,
+ 'fetch_topology': self.topology,
}
self.server = compute_fakes.FakeServer.create_one_server(
attrs=server_info, methods=server_method)
# This is the return value for utils.find_resource()
- self.servers_mock.get.return_value = self.server
+ self.sdk_client.get_server.return_value = self.server
self.get_image_mock.return_value = self.image
self.flavors_mock.get.return_value = self.flavor
@@ -7644,8 +8054,7 @@ class TestServerShow(TestServer):
self.assertEqual(('test',), data)
def test_show_topology(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.78')
+ self._set_mock_microversion('2.78')
arglist = [
'--topology',
@@ -7667,8 +8076,7 @@ class TestServerShow(TestServer):
self.assertCountEqual(self.data, data)
def test_show_topology_pre_v278(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.77')
+ self._set_mock_microversion('2.77')
arglist = [
'--topology',
@@ -8097,7 +8505,23 @@ class TestServerUnshelve(TestServer):
def test_unshelve_multi_servers(self):
self.run_method_with_servers('unshelve', 3)
- def test_unshelve_with_specified_az(self):
+ def test_unshelve_v277(self):
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.77')
+
+ server = compute_fakes.FakeServer.create_one_server(
+ attrs=self.attrs, methods=self.methods)
+ self.servers_mock.get.return_value = server
+ arglist = [server.id]
+ verifylist = [('server', [server.id])]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(server.id)
+ server.unshelve.assert_called_with()
+
+ def test_unshelve_with_specified_az_v277(self):
self.app.client_manager.compute.api_version = \
api_versions.APIVersion('2.77')
@@ -8141,6 +8565,157 @@ class TestServerUnshelve(TestServer):
self.assertIn(
'--os-compute-api-version 2.77 or greater is required', str(ex))
+ def test_unshelve_v291(self):
+ self.app.client_manager.compute.api_version = (
+ api_versions.APIVersion('2.91'))
+
+ server = compute_fakes.FakeServer.create_one_server(
+ attrs=self.attrs, methods=self.methods)
+ self.servers_mock.get.return_value = server
+ arglist = [server.id]
+ verifylist = [('server', [server.id])]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(server.id)
+ server.unshelve.assert_called_with()
+
+ def test_unshelve_with_specified_az_v291(self):
+ self.app.client_manager.compute.api_version = (
+ api_versions.APIVersion('2.91'))
+
+ server = compute_fakes.FakeServer.create_one_server(
+ attrs=self.attrs, methods=self.methods)
+ self.servers_mock.get.return_value = server
+ arglist = [
+ '--availability-zone', "foo-az",
+ server.id,
+ ]
+ verifylist = [
+ ('availability_zone', "foo-az"),
+ ('server', [server.id])
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(server.id)
+ server.unshelve.assert_called_with(availability_zone="foo-az")
+
+ def test_unshelve_with_specified_host_v291(self):
+ self.app.client_manager.compute.api_version = (
+ api_versions.APIVersion('2.91'))
+
+ server = compute_fakes.FakeServer.create_one_server(
+ attrs=self.attrs, methods=self.methods)
+ self.servers_mock.get.return_value = server
+ arglist = [
+ '--host', "server1",
+ server.id,
+ ]
+ verifylist = [
+ ('host', "server1"),
+ ('server', [server.id])
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(server.id)
+ server.unshelve.assert_called_with(host="server1")
+
+ def test_unshelve_with_unpin_az_v291(self):
+ self.app.client_manager.compute.api_version = (
+ api_versions.APIVersion('2.91'))
+
+ server = compute_fakes.FakeServer.create_one_server(
+ attrs=self.attrs, methods=self.methods)
+ self.servers_mock.get.return_value = server
+ arglist = ['--no-availability-zone', server.id]
+ verifylist = [
+ ('no_availability_zone', True),
+ ('server', [server.id])
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(server.id)
+ server.unshelve.assert_called_with(availability_zone=None)
+
+ def test_unshelve_with_specified_az_and_host_v291(self):
+ self.app.client_manager.compute.api_version = (
+ api_versions.APIVersion('2.91'))
+
+ server = compute_fakes.FakeServer.create_one_server(
+ attrs=self.attrs, methods=self.methods)
+ self.servers_mock.get.return_value = server
+ arglist = [
+ '--host', "server1",
+ '--availability-zone', "foo-az",
+ server.id,
+ ]
+ verifylist = [
+ ('host', "server1"),
+ ('availability_zone', "foo-az"),
+ ('server', [server.id])
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(server.id)
+
+ def test_unshelve_with_unpin_az_and_host_v291(self):
+ self.app.client_manager.compute.api_version = (
+ api_versions.APIVersion('2.91'))
+
+ server = compute_fakes.FakeServer.create_one_server(
+ attrs=self.attrs, methods=self.methods)
+ self.servers_mock.get.return_value = server
+ arglist = [
+ '--host', "server1",
+ '--no-availability-zone',
+ server.id,
+ ]
+ verifylist = [
+ ('host', "server1"),
+ ('no_availability_zone', True),
+ ('server', [server.id])
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(server.id)
+
+ def test_unshelve_fails_with_unpin_az_and_az_v291(self):
+ self.app.client_manager.compute.api_version = (
+ api_versions.APIVersion('2.91'))
+
+ server = compute_fakes.FakeServer.create_one_server(
+ attrs=self.attrs, methods=self.methods)
+ self.servers_mock.get.return_value = server
+ arglist = [
+ '--availability-zone', "foo-az",
+ '--no-availability-zone',
+ server.id,
+ ]
+ verifylist = [
+ ('availability_zone', "foo-az"),
+ ('no_availability_zone', True),
+ ('server', [server.id])
+ ]
+
+ ex = self.assertRaises(utils.ParserException,
+ self.check_parser,
+ self.cmd, arglist, verifylist)
+ self.assertIn('argument --no-availability-zone: not allowed '
+ 'with argument --availability-zone', str(ex))
+
@mock.patch.object(common_utils, 'wait_for_status', return_value=True)
def test_unshelve_with_wait(self, mock_wait_for_status):
server = compute_fakes.FakeServer.create_one_server(
diff --git a/openstackclient/tests/unit/compute/v2/test_server_group.py b/openstackclient/tests/unit/compute/v2/test_server_group.py
index 3ed19e27..655366a8 100644
--- a/openstackclient/tests/unit/compute/v2/test_server_group.py
+++ b/openstackclient/tests/unit/compute/v2/test_server_group.py
@@ -15,10 +15,9 @@
from unittest import mock
-from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib.cli import format_columns
from osc_lib import exceptions
-from osc_lib import utils
from openstackclient.compute.v2 import server_group
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
@@ -27,38 +26,7 @@ from openstackclient.tests.unit import utils as tests_utils
class TestServerGroup(compute_fakes.TestComputev2):
- fake_server_group = compute_fakes.FakeServerGroup.create_one_server_group()
-
- columns = (
- 'id',
- 'members',
- 'name',
- 'policies',
- 'project_id',
- 'user_id',
- )
-
- data = (
- fake_server_group.id,
- format_columns.ListColumn(fake_server_group.members),
- fake_server_group.name,
- format_columns.ListColumn(fake_server_group.policies),
- fake_server_group.project_id,
- fake_server_group.user_id,
- )
-
- def setUp(self):
- super(TestServerGroup, self).setUp()
-
- # Get a shortcut to the ServerGroupsManager Mock
- self.server_groups_mock = self.app.client_manager.compute.server_groups
- self.server_groups_mock.reset_mock()
-
-
-class TestServerGroupV264(TestServerGroup):
-
- fake_server_group = \
- compute_fakes.FakeServerGroupV264.create_one_server_group()
+ fake_server_group = compute_fakes.create_one_server_group()
columns = (
'id',
@@ -66,31 +34,40 @@ class TestServerGroupV264(TestServerGroup):
'name',
'policy',
'project_id',
+ 'rules',
'user_id',
)
data = (
fake_server_group.id,
- format_columns.ListColumn(fake_server_group.members),
+ format_columns.ListColumn(fake_server_group.member_ids),
fake_server_group.name,
fake_server_group.policy,
fake_server_group.project_id,
+ format_columns.DictColumn(fake_server_group.rules),
fake_server_group.user_id,
)
def setUp(self):
- super(TestServerGroupV264, self).setUp()
+ super().setUp()
+
+ # Create and get a shortcut to the compute client mock
+ self.app.client_manager.sdk_connection = mock.Mock()
+ self.sdk_client = self.app.client_manager.sdk_connection.compute
+ self.sdk_client.reset_mock()
class TestServerGroupCreate(TestServerGroup):
def setUp(self):
- super(TestServerGroupCreate, self).setUp()
+ super().setUp()
- self.server_groups_mock.create.return_value = self.fake_server_group
+ self.sdk_client.create_server_group.return_value = \
+ self.fake_server_group
self.cmd = server_group.CreateServerGroup(self.app, None)
- def test_server_group_create(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_server_group_create(self, sm_mock):
arglist = [
'--policy', 'anti-affinity',
'affinity_group',
@@ -101,18 +78,16 @@ class TestServerGroupCreate(TestServerGroup):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.server_groups_mock.create.assert_called_once_with(
+ self.sdk_client.create_server_group.assert_called_once_with(
name=parsed_args.name,
- policies=[parsed_args.policy],
+ policy=parsed_args.policy,
)
self.assertCountEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
- def test_server_group_create_with_soft_policies(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.15')
-
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_server_group_create_with_soft_policies(self, sm_mock):
arglist = [
'--policy', 'soft-anti-affinity',
'affinity_group',
@@ -123,18 +98,16 @@ class TestServerGroupCreate(TestServerGroup):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.server_groups_mock.create.assert_called_once_with(
+ self.sdk_client.create_server_group.assert_called_once_with(
name=parsed_args.name,
- policies=[parsed_args.policy],
+ policy=parsed_args.policy,
)
self.assertCountEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
- def test_server_group_create_with_soft_policies_pre_v215(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.14')
-
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
+ def test_server_group_create_with_soft_policies_pre_v215(self, sm_mock):
arglist = [
'--policy', 'soft-anti-affinity',
'affinity_group',
@@ -152,10 +125,8 @@ class TestServerGroupCreate(TestServerGroup):
'--os-compute-api-version 2.15 or greater is required',
str(ex))
- def test_server_group_create_with_rules(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.64')
-
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_server_group_create_with_rules(self, sm_mock):
arglist = [
'--policy', 'soft-anti-affinity',
'--rule', 'max_server_per_host=2',
@@ -168,19 +139,18 @@ class TestServerGroupCreate(TestServerGroup):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.server_groups_mock.create.assert_called_once_with(
+ self.sdk_client.create_server_group.assert_called_once_with(
name=parsed_args.name,
- policy=parsed_args.policy, # should be 'policy', not 'policies'
+ policy=parsed_args.policy,
rules=parsed_args.rules,
)
self.assertCountEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
- def test_server_group_create_with_rules_pre_v264(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.63')
-
+ @mock.patch.object(
+ sdk_utils, 'supports_microversion', side_effect=[True, False])
+ def test_server_group_create_with_rules_pre_v264(self, sm_mock):
arglist = [
'--policy', 'soft-anti-affinity',
'--rule', 'max_server_per_host=2',
@@ -205,9 +175,9 @@ class TestServerGroupCreate(TestServerGroup):
class TestServerGroupDelete(TestServerGroup):
def setUp(self):
- super(TestServerGroupDelete, self).setUp()
+ super().setUp()
- self.server_groups_mock.get.return_value = self.fake_server_group
+ self.sdk_client.find_server_group.return_value = self.fake_server_group
self.cmd = server_group.DeleteServerGroup(self.app, None)
def test_server_group_delete(self):
@@ -219,8 +189,10 @@ class TestServerGroupDelete(TestServerGroup):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
- self.server_groups_mock.get.assert_called_once_with('affinity_group')
- self.server_groups_mock.delete.assert_called_once_with(
+ self.sdk_client.find_server_group.assert_called_once_with(
+ 'affinity_group'
+ )
+ self.sdk_client.delete_server_group.assert_called_once_with(
self.fake_server_group.id
)
self.assertIsNone(result)
@@ -235,13 +207,15 @@ class TestServerGroupDelete(TestServerGroup):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
- self.server_groups_mock.get.assert_any_call('affinity_group')
- self.server_groups_mock.get.assert_any_call('anti_affinity_group')
- self.server_groups_mock.delete.assert_called_with(
+ self.sdk_client.find_server_group.assert_any_call('affinity_group')
+ self.sdk_client.find_server_group.assert_any_call(
+ 'anti_affinity_group'
+ )
+ self.sdk_client.delete_server_group.assert_called_with(
self.fake_server_group.id
)
- self.assertEqual(2, self.server_groups_mock.get.call_count)
- self.assertEqual(2, self.server_groups_mock.delete.call_count)
+ self.assertEqual(2, self.sdk_client.find_server_group.call_count)
+ self.assertEqual(2, self.sdk_client.delete_server_group.call_count)
self.assertIsNone(result)
def test_server_group_delete_no_input(self):
@@ -262,25 +236,23 @@ class TestServerGroupDelete(TestServerGroup):
('server_group', ['affinity_group', 'anti_affinity_group']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- find_mock_result = [self.fake_server_group, exceptions.CommandError]
- with mock.patch.object(utils, 'find_resource',
- side_effect=find_mock_result) as find_mock:
- try:
- self.cmd.take_action(parsed_args)
- self.fail('CommandError should be raised.')
- except exceptions.CommandError as e:
- self.assertEqual('1 of 2 server groups failed to delete.',
- str(e))
-
- find_mock.assert_any_call(self.server_groups_mock,
- 'affinity_group')
- find_mock.assert_any_call(self.server_groups_mock,
- 'anti_affinity_group')
-
- self.assertEqual(2, find_mock.call_count)
- self.server_groups_mock.delete.assert_called_once_with(
- self.fake_server_group.id
- )
+
+ self.sdk_client.find_server_group.side_effect = [
+ self.fake_server_group, exceptions.CommandError]
+ try:
+ self.cmd.take_action(parsed_args)
+ self.fail('CommandError should be raised.')
+ except exceptions.CommandError as e:
+ self.assertEqual('1 of 2 server groups failed to delete.', str(e))
+
+ self.sdk_client.find_server_group.assert_any_call('affinity_group')
+ self.sdk_client.find_server_group.assert_any_call(
+ 'anti_affinity_group'
+ )
+ self.assertEqual(2, self.sdk_client.find_server_group.call_count)
+ self.sdk_client.delete_server_group.assert_called_once_with(
+ self.fake_server_group.id
+ )
class TestServerGroupList(TestServerGroup):
@@ -300,28 +272,67 @@ class TestServerGroupList(TestServerGroup):
'User Id',
)
+ list_columns_v264 = (
+ 'ID',
+ 'Name',
+ 'Policy',
+ )
+
+ list_columns_v264_long = (
+ 'ID',
+ 'Name',
+ 'Policy',
+ 'Members',
+ 'Project Id',
+ 'User Id',
+ )
+
list_data = ((
TestServerGroup.fake_server_group.id,
TestServerGroup.fake_server_group.name,
- format_columns.ListColumn(TestServerGroup.fake_server_group.policies),
+ format_columns.ListColumn(
+ TestServerGroup.fake_server_group.policies
+ ),
),)
list_data_long = ((
TestServerGroup.fake_server_group.id,
TestServerGroup.fake_server_group.name,
- format_columns.ListColumn(TestServerGroup.fake_server_group.policies),
- format_columns.ListColumn(TestServerGroup.fake_server_group.members),
+ format_columns.ListColumn(
+ TestServerGroup.fake_server_group.policies
+ ),
+ format_columns.ListColumn(
+ TestServerGroup.fake_server_group.member_ids
+ ),
+ TestServerGroup.fake_server_group.project_id,
+ TestServerGroup.fake_server_group.user_id,
+ ),)
+
+ list_data_v264 = ((
+ TestServerGroup.fake_server_group.id,
+ TestServerGroup.fake_server_group.name,
+ TestServerGroup.fake_server_group.policy,
+ ),)
+
+ list_data_v264_long = ((
+ TestServerGroup.fake_server_group.id,
+ TestServerGroup.fake_server_group.name,
+ TestServerGroup.fake_server_group.policy,
+ format_columns.ListColumn(
+ TestServerGroup.fake_server_group.member_ids
+ ),
TestServerGroup.fake_server_group.project_id,
TestServerGroup.fake_server_group.user_id,
),)
def setUp(self):
- super(TestServerGroupList, self).setUp()
+ super().setUp()
- self.server_groups_mock.list.return_value = [self.fake_server_group]
+ self.sdk_client.server_groups.return_value = [self.fake_server_group]
self.cmd = server_group.ListServerGroup(self.app, None)
- def test_server_group_list(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
+ def test_server_group_list(self, sm_mock):
arglist = []
verifylist = [
('all_projects', False),
@@ -332,12 +343,13 @@ class TestServerGroupList(TestServerGroup):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.server_groups_mock.list.assert_called_once_with()
+ self.sdk_client.server_groups.assert_called_once_with()
self.assertCountEqual(self.list_columns, columns)
self.assertCountEqual(self.list_data, tuple(data))
- def test_server_group_list_with_all_projects_and_long(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=False)
+ def test_server_group_list_with_all_projects_and_long(self, sm_mock):
arglist = [
'--all-projects',
'--long',
@@ -350,13 +362,14 @@ class TestServerGroupList(TestServerGroup):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.server_groups_mock.list.assert_called_once_with(
+ self.sdk_client.server_groups.assert_called_once_with(
all_projects=True)
self.assertCountEqual(self.list_columns_long, columns)
self.assertCountEqual(self.list_data_long, tuple(data))
- def test_server_group_list_with_limit(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_server_group_list_with_limit(self, sm_mock):
arglist = [
'--limit', '1',
]
@@ -370,9 +383,10 @@ class TestServerGroupList(TestServerGroup):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
- self.server_groups_mock.list.assert_called_once_with(limit=1)
+ self.sdk_client.server_groups.assert_called_once_with(limit=1)
- def test_server_group_list_with_offset(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_server_group_list_with_offset(self, sm_mock):
arglist = [
'--offset', '5',
]
@@ -386,51 +400,10 @@ class TestServerGroupList(TestServerGroup):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
- self.server_groups_mock.list.assert_called_once_with(offset=5)
-
-
-class TestServerGroupListV264(TestServerGroupV264):
-
- list_columns = (
- 'ID',
- 'Name',
- 'Policy',
- )
+ self.sdk_client.server_groups.assert_called_once_with(offset=5)
- list_columns_long = (
- 'ID',
- 'Name',
- 'Policy',
- 'Members',
- 'Project Id',
- 'User Id',
- )
-
- list_data = ((
- TestServerGroupV264.fake_server_group.id,
- TestServerGroupV264.fake_server_group.name,
- TestServerGroupV264.fake_server_group.policy,
- ),)
-
- list_data_long = ((
- TestServerGroupV264.fake_server_group.id,
- TestServerGroupV264.fake_server_group.name,
- TestServerGroupV264.fake_server_group.policy,
- format_columns.ListColumn(
- TestServerGroupV264.fake_server_group.members),
- TestServerGroupV264.fake_server_group.project_id,
- TestServerGroupV264.fake_server_group.user_id,
- ),)
-
- def setUp(self):
- super(TestServerGroupListV264, self).setUp()
-
- self.server_groups_mock.list.return_value = [self.fake_server_group]
- self.cmd = server_group.ListServerGroup(self.app, None)
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.64')
-
- def test_server_group_list(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_server_group_list_v264(self, sm_mock):
arglist = []
verifylist = [
('all_projects', False),
@@ -438,12 +411,13 @@ class TestServerGroupListV264(TestServerGroupV264):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.server_groups_mock.list.assert_called_once_with()
+ self.sdk_client.server_groups.assert_called_once_with()
- self.assertCountEqual(self.list_columns, columns)
- self.assertCountEqual(self.list_data, tuple(data))
+ self.assertCountEqual(self.list_columns_v264, columns)
+ self.assertCountEqual(self.list_data_v264, tuple(data))
- def test_server_group_list_with_all_projects_and_long(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_server_group_list_with_all_projects_and_long_v264(self, sm_mock):
arglist = [
'--all-projects',
'--long',
@@ -454,22 +428,23 @@ class TestServerGroupListV264(TestServerGroupV264):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.server_groups_mock.list.assert_called_once_with(
+ self.sdk_client.server_groups.assert_called_once_with(
all_projects=True)
- self.assertCountEqual(self.list_columns_long, columns)
- self.assertCountEqual(self.list_data_long, tuple(data))
+ self.assertCountEqual(self.list_columns_v264_long, columns)
+ self.assertCountEqual(self.list_data_v264_long, tuple(data))
class TestServerGroupShow(TestServerGroup):
def setUp(self):
- super(TestServerGroupShow, self).setUp()
+ super().setUp()
- self.server_groups_mock.get.return_value = self.fake_server_group
+ self.sdk_client.find_server_group.return_value = self.fake_server_group
self.cmd = server_group.ShowServerGroup(self.app, None)
- def test_server_group_show(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion', return_value=True)
+ def test_server_group_show(self, sm_mock):
arglist = [
'affinity_group',
]
diff --git a/openstackclient/tests/unit/compute/v2/test_server_migration.py b/openstackclient/tests/unit/compute/v2/test_server_migration.py
index c4cbac47..afe868d9 100644
--- a/openstackclient/tests/unit/compute/v2/test_server_migration.py
+++ b/openstackclient/tests/unit/compute/v2/test_server_migration.py
@@ -13,6 +13,7 @@
from unittest import mock
from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib import exceptions
from osc_lib import utils as common_utils
@@ -35,14 +36,22 @@ class TestServerMigration(compute_fakes.TestComputev2):
self.app.client_manager.compute.server_migrations
self.server_migrations_mock.reset_mock()
- # Get a shortcut to the compute client MigrationManager mock
- self.migrations_mock = self.app.client_manager.compute.migrations
- self.migrations_mock.reset_mock()
-
self.app.client_manager.sdk_connection = mock.Mock()
self.app.client_manager.sdk_connection.compute = mock.Mock()
self.sdk_client = self.app.client_manager.sdk_connection.compute
+ patcher = mock.patch.object(
+ sdk_utils, 'supports_microversion', return_value=True)
+ self.addCleanup(patcher.stop)
+ self.supports_microversion_mock = patcher.start()
+
+ def _set_mock_microversion(self, mock_v):
+ """Set a specific microversion for the mock supports_microversion()."""
+ self.supports_microversion_mock.reset_mock(return_value=True)
+ self.supports_microversion_mock.side_effect = (
+ lambda _, v:
+ api_versions.APIVersion(v) <= api_versions.APIVersion(mock_v))
+
class TestListMigration(TestServerMigration):
"""Test fetch all migrations."""
@@ -53,22 +62,22 @@ class TestListMigration(TestServerMigration):
'Old Flavor', 'New Flavor', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
MIGRATION_FIELDS = [
- 'source_node', 'dest_node', 'source_compute', 'dest_compute',
- 'dest_host', 'status', 'instance_uuid', 'old_instance_type_id',
- 'new_instance_type_id', 'created_at', 'updated_at'
+ 'source_node', 'dest_node', 'source_compute',
+ 'dest_compute', 'dest_host', 'status', 'server_id', 'old_flavor_id',
+ 'new_flavor_id', 'created_at', 'updated_at'
]
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
- self.servers_mock.get.return_value = self.server
+ self._set_mock_microversion('2.1')
+
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
+ self.sdk_client.find_server.return_value = self.server
- self.migrations = compute_fakes.FakeMigration.create_migrations(
- count=3)
- self.migrations_mock.list.return_value = self.migrations
+ self.migrations = compute_fakes.create_migrations(count=3)
+ self.sdk_client.migrations.return_value = self.migrations
self.data = (common_utils.get_item_properties(
s, self.MIGRATION_FIELDS) for s in self.migrations)
@@ -84,12 +93,9 @@ class TestListMigration(TestServerMigration):
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
- kwargs = {
- 'status': None,
- 'host': None,
- }
+ kwargs = {}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -117,8 +123,8 @@ class TestListMigration(TestServerMigration):
'migration_type': 'migration',
}
- self.servers_mock.get.assert_called_with('server1')
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.find_server.assert_called_with('server1')
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -133,18 +139,17 @@ class TestListMigrationV223(TestListMigration):
'Type', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
+ # These are the Migration object fields.
MIGRATION_FIELDS = [
'id', 'source_node', 'dest_node', 'source_compute', 'dest_compute',
- 'dest_host', 'status', 'instance_uuid', 'old_instance_type_id',
- 'new_instance_type_id', 'migration_type', 'created_at', 'updated_at'
+ 'dest_host', 'status', 'server_id', 'old_flavor_id',
+ 'new_flavor_id', 'migration_type', 'created_at', 'updated_at'
]
def setUp(self):
super().setUp()
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.23')
+ self._set_mock_microversion('2.23')
def test_server_migration_list(self):
arglist = [
@@ -159,10 +164,9 @@ class TestListMigrationV223(TestListMigration):
# Set expected values
kwargs = {
'status': 'migrating',
- 'host': None,
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -177,19 +181,18 @@ class TestListMigrationV259(TestListMigration):
'Old Flavor', 'New Flavor', 'Type', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
+ # These are the Migration object fields.
MIGRATION_FIELDS = [
'id', 'uuid', 'source_node', 'dest_node', 'source_compute',
- 'dest_compute', 'dest_host', 'status', 'instance_uuid',
- 'old_instance_type_id', 'new_instance_type_id', 'migration_type',
+ 'dest_compute', 'dest_host', 'status', 'server_id',
+ 'old_flavor_id', 'new_flavor_id', 'migration_type',
'created_at', 'updated_at'
]
def setUp(self):
super().setUp()
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
def test_server_migration_list(self):
arglist = [
@@ -211,19 +214,18 @@ class TestListMigrationV259(TestListMigration):
kwargs = {
'status': 'migrating',
'limit': 1,
+ 'paginated': False,
'marker': 'test_kp',
- 'host': None,
'changes_since': '2019-08-09T08:03:25Z',
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
def test_server_migration_list_with_limit_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
'--status', 'migrating',
'--limit', '1'
@@ -242,8 +244,7 @@ class TestListMigrationV259(TestListMigration):
str(ex))
def test_server_migration_list_with_marker_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
'--status', 'migrating',
'--marker', 'test_kp'
@@ -262,8 +263,7 @@ class TestListMigrationV259(TestListMigration):
str(ex))
def test_server_migration_list_with_changes_since_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
'--status', 'migrating',
'--changes-since', '2019-08-09T08:03:25Z'
@@ -291,19 +291,18 @@ class TestListMigrationV266(TestListMigration):
'Old Flavor', 'New Flavor', 'Type', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
+ # These are the Migration object fields.
MIGRATION_FIELDS = [
'id', 'uuid', 'source_node', 'dest_node', 'source_compute',
- 'dest_compute', 'dest_host', 'status', 'instance_uuid',
- 'old_instance_type_id', 'new_instance_type_id', 'migration_type',
+ 'dest_compute', 'dest_host', 'status', 'server_id',
+ 'old_flavor_id', 'new_flavor_id', 'migration_type',
'created_at', 'updated_at'
]
def setUp(self):
super().setUp()
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.66')
+ self._set_mock_microversion('2.66')
def test_server_migration_list_with_changes_before(self):
arglist = [
@@ -327,20 +326,19 @@ class TestListMigrationV266(TestListMigration):
kwargs = {
'status': 'migrating',
'limit': 1,
+ 'paginated': False,
'marker': 'test_kp',
- 'host': None,
'changes_since': '2019-08-07T08:03:25Z',
'changes_before': '2019-08-09T08:03:25Z',
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.assertEqual(self.MIGRATION_COLUMNS, columns)
self.assertEqual(tuple(self.data), tuple(data))
def test_server_migration_list_with_changes_before_pre_v266(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.65')
+ self._set_mock_microversion('2.65')
arglist = [
'--status', 'migrating',
'--changes-before', '2019-08-09T08:03:25Z'
@@ -368,11 +366,11 @@ class TestListMigrationV280(TestListMigration):
'Old Flavor', 'New Flavor', 'Type', 'Created At', 'Updated At'
]
- # These are the fields that come back in the response from the REST API.
+ # These are the Migration object fields.
MIGRATION_FIELDS = [
'id', 'uuid', 'source_node', 'dest_node', 'source_compute',
- 'dest_compute', 'dest_host', 'status', 'instance_uuid',
- 'old_instance_type_id', 'new_instance_type_id', 'migration_type',
+ 'dest_compute', 'dest_host', 'status', 'server_id',
+ 'old_flavor_id', 'new_flavor_id', 'migration_type',
'created_at', 'updated_at'
]
@@ -391,8 +389,7 @@ class TestListMigrationV280(TestListMigration):
self.projects_mock.get.return_value = self.project
self.users_mock.get.return_value = self.user
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.80')
+ self._set_mock_microversion('2.80')
def test_server_migration_list_with_project(self):
arglist = [
@@ -418,14 +415,14 @@ class TestListMigrationV280(TestListMigration):
kwargs = {
'status': 'migrating',
'limit': 1,
+ 'paginated': False,
'marker': 'test_kp',
- 'host': None,
'project_id': self.project.id,
'changes_since': '2019-08-07T08:03:25Z',
'changes_before': "2019-08-09T08:03:25Z",
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.MIGRATION_COLUMNS.insert(
len(self.MIGRATION_COLUMNS) - 2, "Project")
@@ -439,8 +436,7 @@ class TestListMigrationV280(TestListMigration):
self.MIGRATION_FIELDS.remove('project_id')
def test_get_migrations_with_project_pre_v280(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.79')
+ self._set_mock_microversion('2.79')
arglist = [
'--status', 'migrating',
'--changes-before', '2019-08-09T08:03:25Z',
@@ -478,20 +474,21 @@ class TestListMigrationV280(TestListMigration):
('user', self.user.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'status': 'migrating',
'limit': 1,
+ 'paginated': False,
'marker': 'test_kp',
- 'host': None,
'user_id': self.user.id,
'changes_since': '2019-08-07T08:03:25Z',
'changes_before': "2019-08-09T08:03:25Z",
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.MIGRATION_COLUMNS.insert(
len(self.MIGRATION_COLUMNS) - 2, "User")
@@ -505,8 +502,7 @@ class TestListMigrationV280(TestListMigration):
self.MIGRATION_FIELDS.remove('user_id')
def test_get_migrations_with_user_pre_v280(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.79')
+ self._set_mock_microversion('2.79')
arglist = [
'--status', 'migrating',
'--changes-before', '2019-08-09T08:03:25Z',
@@ -550,14 +546,14 @@ class TestListMigrationV280(TestListMigration):
kwargs = {
'status': 'migrating',
'limit': 1,
- 'host': None,
+ 'paginated': False,
'project_id': self.project.id,
'user_id': self.user.id,
'changes_since': '2019-08-07T08:03:25Z',
'changes_before': "2019-08-09T08:03:25Z",
}
- self.migrations_mock.list.assert_called_with(**kwargs)
+ self.sdk_client.migrations.assert_called_with(**kwargs)
self.MIGRATION_COLUMNS.insert(
len(self.MIGRATION_COLUMNS) - 2, "Project")
@@ -576,8 +572,7 @@ class TestListMigrationV280(TestListMigration):
self.MIGRATION_FIELDS.remove('user_id')
def test_get_migrations_with_project_and_user_pre_v280(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.79')
+ self._set_mock_microversion('2.79')
arglist = [
'--status', 'migrating',
'--changes-before', '2019-08-09T08:03:25Z',
@@ -605,12 +600,15 @@ class TestServerMigrationShow(TestServerMigration):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
- self.servers_mock.get.return_value = self.server
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
+ self.sdk_client.find_server.return_value = self.server
- self.server_migration = compute_fakes.FakeServerMigration\
- .create_one_server_migration()
- self.server_migrations_mock.get.return_value = self.server_migration
+ self.server_migration = compute_fakes.create_one_server_migration()
+ self.sdk_client.get_server_migration.return_value =\
+ self.server_migration
+ self.sdk_client.server_migrations.return_value = iter(
+ [self.server_migration]
+ )
self.columns = (
'ID',
@@ -633,7 +631,7 @@ class TestServerMigrationShow(TestServerMigration):
self.data = (
self.server_migration.id,
- self.server_migration.server_uuid,
+ self.server_migration.server_id,
self.server_migration.status,
self.server_migration.source_compute,
self.server_migration.source_node,
@@ -666,19 +664,18 @@ class TestServerMigrationShow(TestServerMigration):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.get.assert_called_with(
- self.server.id, '2',)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.get_server_migration.assert_called_with(
+ self.server.id, '2', ignore_missing=False)
def test_server_migration_show(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.24')
+ self._set_mock_microversion('2.24')
self._test_server_migration_show()
def test_server_migration_show_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
self.columns += ('UUID',)
self.data += (self.server_migration.uuid,)
@@ -686,8 +683,7 @@ class TestServerMigrationShow(TestServerMigration):
self._test_server_migration_show()
def test_server_migration_show_v280(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.80')
+ self._set_mock_microversion('2.80')
self.columns += ('UUID', 'User ID', 'Project ID')
self.data += (
@@ -699,8 +695,7 @@ class TestServerMigrationShow(TestServerMigration):
self._test_server_migration_show()
def test_server_migration_show_pre_v224(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.23')
+ self._set_mock_microversion('2.23')
arglist = [
self.server.id,
@@ -718,9 +713,11 @@ class TestServerMigrationShow(TestServerMigration):
str(ex))
def test_server_migration_show_by_uuid(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
- self.server_migrations_mock.list.return_value = [self.server_migration]
+ self._set_mock_microversion('2.59')
+
+ self.sdk_client.server_migrations.return_value = iter(
+ [self.server_migration]
+ )
self.columns += ('UUID',)
self.data += (self.server_migration.uuid,)
@@ -737,14 +734,14 @@ class TestServerMigrationShow(TestServerMigration):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.list.assert_called_with(self.server.id)
- self.server_migrations_mock.get.assert_not_called()
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.server_migrations.assert_called_with(self.server.id)
+ self.sdk_client.get_server_migration.assert_not_called()
def test_server_migration_show_by_uuid_no_matches(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
- self.server_migrations_mock.list.return_value = []
+ self._set_mock_microversion('2.59')
+ self.sdk_client.server_migrations.return_value = iter([])
arglist = [
self.server.id,
@@ -762,8 +759,7 @@ class TestServerMigrationShow(TestServerMigration):
str(ex))
def test_server_migration_show_by_uuid_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
self.server.id,
@@ -781,8 +777,7 @@ class TestServerMigrationShow(TestServerMigration):
str(ex))
def test_server_migration_show_invalid_id(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.24')
+ self._set_mock_microversion('2.24')
arglist = [
self.server.id,
@@ -805,17 +800,16 @@ class TestServerMigrationAbort(TestServerMigration):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
# Return value for utils.find_resource for server.
- self.servers_mock.get.return_value = self.server
+ self.sdk_client.find_server.return_value = self.server
# Get the command object to test
self.cmd = server_migration.AbortMigration(self.app, None)
def test_migration_abort(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.24')
+ self._set_mock_microversion('2.24')
arglist = [
self.server.id,
@@ -826,14 +820,14 @@ class TestServerMigrationAbort(TestServerMigration):
result = self.cmd.take_action(parsed_args)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.live_migration_abort.assert_called_with(
- self.server.id, '2',)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.abort_server_migration.assert_called_with(
+ '2', self.server.id, ignore_missing=False)
self.assertIsNone(result)
def test_migration_abort_pre_v224(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.23')
+ self._set_mock_microversion('2.23')
arglist = [
self.server.id,
@@ -851,12 +845,12 @@ class TestServerMigrationAbort(TestServerMigration):
str(ex))
def test_server_migration_abort_by_uuid(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
- self.server_migration = compute_fakes.FakeServerMigration\
- .create_one_server_migration()
- self.server_migrations_mock.list.return_value = [self.server_migration]
+ self.server_migration = compute_fakes.create_one_server_migration()
+ self.sdk_client.server_migrations.return_value = iter(
+ [self.server_migration]
+ )
arglist = [
self.server.id,
@@ -867,17 +861,19 @@ class TestServerMigrationAbort(TestServerMigration):
result = self.cmd.take_action(parsed_args)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.list.assert_called_with(self.server.id)
- self.server_migrations_mock.live_migration_abort.assert_called_with(
- self.server.id, self.server_migration.id)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.server_migrations.assert_called_with(self.server.id)
+ self.sdk_client.abort_server_migration.assert_called_with(
+ self.server_migration.id, self.server.id, ignore_missing=False)
self.assertIsNone(result)
def test_server_migration_abort_by_uuid_no_matches(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
- self.server_migrations_mock.list.return_value = []
+ self.sdk_client.server_migrations.return_value = iter(
+ []
+ )
arglist = [
self.server.id,
@@ -895,8 +891,7 @@ class TestServerMigrationAbort(TestServerMigration):
str(ex))
def test_server_migration_abort_by_uuid_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
self.server.id,
@@ -919,17 +914,16 @@ class TestServerMigrationForceComplete(TestServerMigration):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
# Return value for utils.find_resource for server.
- self.servers_mock.get.return_value = self.server
+ self.sdk_client.find_server.return_value = self.server
# Get the command object to test
self.cmd = server_migration.ForceCompleteMigration(self.app, None)
def test_migration_force_complete(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.22')
+ self._set_mock_microversion('2.22')
arglist = [
self.server.id,
@@ -940,14 +934,14 @@ class TestServerMigrationForceComplete(TestServerMigration):
result = self.cmd.take_action(parsed_args)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.live_migrate_force_complete\
- .assert_called_with(self.server.id, '2',)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.force_complete_server_migration\
+ .assert_called_with('2', self.server.id)
self.assertIsNone(result)
def test_migration_force_complete_pre_v222(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.21')
+ self._set_mock_microversion('2.21')
arglist = [
self.server.id,
@@ -965,12 +959,12 @@ class TestServerMigrationForceComplete(TestServerMigration):
str(ex))
def test_server_migration_force_complete_by_uuid(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
- self.server_migration = compute_fakes.FakeServerMigration\
- .create_one_server_migration()
- self.server_migrations_mock.list.return_value = [self.server_migration]
+ self.server_migration = compute_fakes.create_one_server_migration()
+ self.sdk_client.server_migrations.return_value = iter(
+ [self.server_migration]
+ )
arglist = [
self.server.id,
@@ -981,17 +975,17 @@ class TestServerMigrationForceComplete(TestServerMigration):
result = self.cmd.take_action(parsed_args)
- self.servers_mock.get.assert_called_with(self.server.id)
- self.server_migrations_mock.list.assert_called_with(self.server.id)
- self.server_migrations_mock.live_migrate_force_complete\
- .assert_called_with(self.server.id, self.server_migration.id)
+ self.sdk_client.find_server.assert_called_with(
+ self.server.id, ignore_missing=False)
+ self.sdk_client.server_migrations.assert_called_with(self.server.id)
+ self.sdk_client.force_complete_server_migration.\
+ assert_called_with(self.server_migration.id, self.server.id)
self.assertIsNone(result)
def test_server_migration_force_complete_by_uuid_no_matches(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.59')
+ self._set_mock_microversion('2.59')
- self.server_migrations_mock.list.return_value = []
+ self.sdk_client.server_migrations.return_value = iter([])
arglist = [
self.server.id,
@@ -1009,8 +1003,7 @@ class TestServerMigrationForceComplete(TestServerMigration):
str(ex))
def test_server_migration_force_complete_by_uuid_pre_v259(self):
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.58')
+ self._set_mock_microversion('2.58')
arglist = [
self.server.id,
diff --git a/openstackclient/tests/unit/compute/v2/test_server_volume.py b/openstackclient/tests/unit/compute/v2/test_server_volume.py
index 02d378f8..f86bc7dd 100644
--- a/openstackclient/tests/unit/compute/v2/test_server_volume.py
+++ b/openstackclient/tests/unit/compute/v2/test_server_volume.py
@@ -11,11 +11,15 @@
# under the License.
#
+from unittest import mock
+
from novaclient import api_versions
+from openstack import utils as sdk_utils
from osc_lib import exceptions
from openstackclient.compute.v2 import server_volume
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
class TestServerVolume(compute_fakes.TestComputev2):
@@ -23,13 +27,11 @@ class TestServerVolume(compute_fakes.TestComputev2):
def setUp(self):
super().setUp()
- # Get a shortcut to the compute client ServerManager Mock
- self.servers_mock = self.app.client_manager.compute.servers
- self.servers_mock.reset_mock()
-
- # Get a shortcut to the compute client VolumeManager mock
- self.servers_volumes_mock = self.app.client_manager.compute.volumes
- self.servers_volumes_mock.reset_mock()
+ self.app.client_manager.sdk_connection = mock.Mock()
+ self.app.client_manager.sdk_connection.compute = mock.Mock()
+ self.app.client_manager.sdk_connection.volume = mock.Mock()
+ self.compute_client = self.app.client_manager.sdk_connection.compute
+ self.volume_client = self.app.client_manager.sdk_connection.volume
class TestServerVolumeList(TestServerVolume):
@@ -37,20 +39,21 @@ class TestServerVolumeList(TestServerVolume):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
- self.volume_attachments = (
- compute_fakes.FakeVolumeAttachment.create_volume_attachments())
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
+ self.volume_attachments = compute_fakes.create_volume_attachments()
- self.servers_mock.get.return_value = self.server
- self.servers_volumes_mock.get_server_volumes.return_value = (
+ self.compute_client.find_server.return_value = self.server
+ self.compute_client.volume_attachments.return_value = (
self.volume_attachments)
# Get the command object to test
self.cmd = server_volume.ListServerVolume(self.app, None)
- def test_server_volume_list(self):
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_list(self, sm_mock):
self.app.client_manager.compute.api_version = \
api_versions.APIVersion('2.1')
+ sm_mock.side_effect = [False, False, False, False]
arglist = [
self.server.id,
@@ -68,24 +71,25 @@ class TestServerVolumeList(TestServerVolume):
(
self.volume_attachments[0].id,
self.volume_attachments[0].device,
- self.volume_attachments[0].serverId,
- self.volume_attachments[0].volumeId,
+ self.volume_attachments[0].server_id,
+ self.volume_attachments[0].volume_id,
),
(
self.volume_attachments[1].id,
self.volume_attachments[1].device,
- self.volume_attachments[1].serverId,
- self.volume_attachments[1].volumeId,
+ self.volume_attachments[1].server_id,
+ self.volume_attachments[1].volume_id,
),
),
tuple(data),
)
- self.servers_volumes_mock.get_server_volumes.assert_called_once_with(
- self.server.id)
+ self.compute_client.volume_attachments.assert_called_once_with(
+ self.server,
+ )
- def test_server_volume_list_with_tags(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.70')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_list_with_tags(self, sm_mock):
+ sm_mock.side_effect = [False, True, False, False]
arglist = [
self.server.id,
@@ -105,27 +109,27 @@ class TestServerVolumeList(TestServerVolume):
(
self.volume_attachments[0].id,
self.volume_attachments[0].device,
- self.volume_attachments[0].serverId,
- self.volume_attachments[0].volumeId,
+ self.volume_attachments[0].server_id,
+ self.volume_attachments[0].volume_id,
self.volume_attachments[0].tag,
),
(
self.volume_attachments[1].id,
self.volume_attachments[1].device,
- self.volume_attachments[1].serverId,
- self.volume_attachments[1].volumeId,
+ self.volume_attachments[1].server_id,
+ self.volume_attachments[1].volume_id,
self.volume_attachments[1].tag,
),
),
tuple(data),
)
- self.servers_volumes_mock.get_server_volumes.assert_called_once_with(
- self.server.id)
-
- def test_server_volume_list_with_delete_on_attachment(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.79')
+ self.compute_client.volume_attachments.assert_called_once_with(
+ self.server,
+ )
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_list_with_delete_on_attachment(self, sm_mock):
+ sm_mock.side_effect = [False, True, True, False]
arglist = [
self.server.id,
]
@@ -148,29 +152,30 @@ class TestServerVolumeList(TestServerVolume):
(
self.volume_attachments[0].id,
self.volume_attachments[0].device,
- self.volume_attachments[0].serverId,
- self.volume_attachments[0].volumeId,
+ self.volume_attachments[0].server_id,
+ self.volume_attachments[0].volume_id,
self.volume_attachments[0].tag,
self.volume_attachments[0].delete_on_termination,
),
(
self.volume_attachments[1].id,
self.volume_attachments[1].device,
- self.volume_attachments[1].serverId,
- self.volume_attachments[1].volumeId,
+ self.volume_attachments[1].server_id,
+ self.volume_attachments[1].volume_id,
self.volume_attachments[1].tag,
self.volume_attachments[1].delete_on_termination,
),
),
tuple(data),
)
- self.servers_volumes_mock.get_server_volumes.assert_called_once_with(
- self.server.id)
+ self.compute_client.volume_attachments.assert_called_once_with(
+ self.server,
+ )
- def test_server_volume_list_with_attachment_ids(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.89')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_list_with_attachment_ids(self, sm_mock):
+ sm_mock.side_effect = [True, True, True, True]
arglist = [
self.server.id,
]
@@ -193,28 +198,29 @@ class TestServerVolumeList(TestServerVolume):
(
(
self.volume_attachments[0].device,
- self.volume_attachments[0].serverId,
- self.volume_attachments[0].volumeId,
+ self.volume_attachments[0].server_id,
+ self.volume_attachments[0].volume_id,
self.volume_attachments[0].tag,
self.volume_attachments[0].delete_on_termination,
self.volume_attachments[0].attachment_id,
- self.volume_attachments[0].bdm_uuid
+ self.volume_attachments[0].bdm_id
),
(
self.volume_attachments[1].device,
- self.volume_attachments[1].serverId,
- self.volume_attachments[1].volumeId,
+ self.volume_attachments[1].server_id,
+ self.volume_attachments[1].volume_id,
self.volume_attachments[1].tag,
self.volume_attachments[1].delete_on_termination,
self.volume_attachments[1].attachment_id,
- self.volume_attachments[1].bdm_uuid
+ self.volume_attachments[1].bdm_id
),
),
tuple(data),
)
- self.servers_volumes_mock.get_server_volumes.assert_called_once_with(
- self.server.id)
+ self.compute_client.volume_attachments.assert_called_once_with(
+ self.server,
+ )
class TestServerVolumeUpdate(TestServerVolume):
@@ -222,21 +228,23 @@ class TestServerVolumeUpdate(TestServerVolume):
def setUp(self):
super().setUp()
- self.server = compute_fakes.FakeServer.create_one_server()
- self.servers_mock.get.return_value = self.server
+ self.server = compute_fakes.FakeServer.create_one_sdk_server()
+ self.compute_client.find_server.return_value = self.server
+
+ self.volume = volume_fakes.create_one_sdk_volume()
+ self.volume_client.find_volume.return_value = self.volume
# Get the command object to test
self.cmd = server_volume.UpdateServerVolume(self.app, None)
def test_server_volume_update(self):
-
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -244,67 +252,73 @@ class TestServerVolumeUpdate(TestServerVolume):
result = self.cmd.take_action(parsed_args)
# This is a no-op
- self.servers_volumes_mock.update_server_volume.assert_not_called()
+ self.compute_client.update_volume_attachment.assert_not_called()
self.assertIsNone(result)
- def test_server_volume_update_with_delete_on_termination(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.85')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_update_with_delete_on_termination(self, sm_mock):
+ sm_mock.return_value = True
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
'--delete-on-termination',
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
- self.servers_volumes_mock.update_server_volume.assert_called_once_with(
- self.server.id, 'foo', 'foo',
- delete_on_termination=True)
+ self.compute_client.update_volume_attachment.assert_called_once_with(
+ self.server,
+ self.volume,
+ delete_on_termination=True,
+ )
self.assertIsNone(result)
- def test_server_volume_update_with_preserve_on_termination(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.85')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_update_with_preserve_on_termination(self, sm_mock):
+ sm_mock.return_value = True
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
'--preserve-on-termination',
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
- self.servers_volumes_mock.update_server_volume.assert_called_once_with(
- self.server.id, 'foo', 'foo',
- delete_on_termination=False)
+ self.compute_client.update_volume_attachment.assert_called_once_with(
+ self.server,
+ self.volume,
+ delete_on_termination=False
+ )
self.assertIsNone(result)
- def test_server_volume_update_with_delete_on_termination_pre_v285(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.84')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_update_with_delete_on_termination_pre_v285(
+ self, sm_mock,
+ ):
+ sm_mock.return_value = False
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
'--delete-on-termination',
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -312,20 +326,24 @@ class TestServerVolumeUpdate(TestServerVolume):
self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
- parsed_args)
+ parsed_args,
+ )
+ self.compute_client.update_volume_attachment.assert_not_called()
- def test_server_volume_update_with_preserve_on_termination_pre_v285(self):
- self.app.client_manager.compute.api_version = \
- api_versions.APIVersion('2.84')
+ @mock.patch.object(sdk_utils, 'supports_microversion')
+ def test_server_volume_update_with_preserve_on_termination_pre_v285(
+ self, sm_mock,
+ ):
+ sm_mock.return_value = False
arglist = [
self.server.id,
- 'foo',
+ self.volume.id,
'--preserve-on-termination',
]
verifylist = [
('server', self.server.id),
- ('volume', 'foo'),
+ ('volume', self.volume.id),
('delete_on_termination', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -333,4 +351,6 @@ class TestServerVolumeUpdate(TestServerVolume):
self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
- parsed_args)
+ parsed_args,
+ )
+ self.compute_client.update_volume_attachment.assert_not_called()
diff --git a/openstackclient/tests/unit/compute/v2/test_usage.py b/openstackclient/tests/unit/compute/v2/test_usage.py
index bbccb9bd..85b45e1b 100644
--- a/openstackclient/tests/unit/compute/v2/test_usage.py
+++ b/openstackclient/tests/unit/compute/v2/test_usage.py
@@ -11,11 +11,8 @@
# under the License.
#
-import datetime
from unittest import mock
-from novaclient import api_versions
-
from openstackclient.compute.v2 import usage as usage_cmds
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
@@ -26,8 +23,9 @@ class TestUsage(compute_fakes.TestComputev2):
def setUp(self):
super(TestUsage, self).setUp()
- self.usage_mock = self.app.client_manager.compute.usage
- self.usage_mock.reset_mock()
+ self.app.client_manager.sdk_connection = mock.Mock()
+ self.app.client_manager.sdk_connection.compute = mock.Mock()
+ self.sdk_client = self.app.client_manager.sdk_connection.compute
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
@@ -38,7 +36,7 @@ class TestUsageList(TestUsage):
project = identity_fakes.FakeProject.create_one_project()
# Return value of self.usage_mock.list().
usages = compute_fakes.FakeUsage.create_usages(
- attrs={'tenant_id': project.name}, count=1)
+ attrs={'project_id': project.name}, count=1)
columns = (
"Project",
@@ -49,7 +47,7 @@ class TestUsageList(TestUsage):
)
data = [(
- usage_cmds.ProjectColumn(usages[0].tenant_id),
+ usage_cmds.ProjectColumn(usages[0].project_id),
usage_cmds.CountColumn(usages[0].server_usages),
usage_cmds.FloatColumn(usages[0].total_memory_mb_usage),
usage_cmds.FloatColumn(usages[0].total_vcpus_usage),
@@ -59,7 +57,7 @@ class TestUsageList(TestUsage):
def setUp(self):
super(TestUsageList, self).setUp()
- self.usage_mock.list.return_value = self.usages
+ self.sdk_client.usages.return_value = self.usages
self.projects_mock.list.return_value = [self.project]
# Get the command object to test
@@ -97,9 +95,9 @@ class TestUsageList(TestUsage):
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with()
- self.usage_mock.list.assert_called_with(
- datetime.datetime(2016, 11, 11, 0, 0),
- datetime.datetime(2016, 12, 20, 0, 0),
+ self.sdk_client.usages.assert_called_with(
+ start='2016-11-11T00:00:00',
+ end='2016-12-20T00:00:00',
detailed=True)
self.assertCountEqual(self.columns, columns)
@@ -112,20 +110,13 @@ class TestUsageList(TestUsage):
('end', None),
]
- self.app.client_manager.compute.api_version = api_versions.APIVersion(
- '2.40')
- self.usage_mock.list.reset_mock()
- self.usage_mock.list.side_effect = [self.usages, []]
-
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with()
- self.usage_mock.list.assert_has_calls([
- mock.call(mock.ANY, mock.ANY, detailed=True),
- mock.call(mock.ANY, mock.ANY, detailed=True,
- marker=self.usages[0]['server_usages'][0]['instance_id'])
+ self.sdk_client.usages.assert_has_calls([
+ mock.call(start=mock.ANY, end=mock.ANY, detailed=True)
])
self.assertCountEqual(self.columns, columns)
self.assertCountEqual(tuple(self.data), tuple(data))
@@ -136,7 +127,7 @@ class TestUsageShow(TestUsage):
project = identity_fakes.FakeProject.create_one_project()
# Return value of self.usage_mock.list().
usage = compute_fakes.FakeUsage.create_one_usage(
- attrs={'tenant_id': project.name})
+ attrs={'project_id': project.name})
columns = (
'Project',
@@ -147,7 +138,7 @@ class TestUsageShow(TestUsage):
)
data = (
- usage_cmds.ProjectColumn(usage.tenant_id),
+ usage_cmds.ProjectColumn(usage.project_id),
usage_cmds.CountColumn(usage.server_usages),
usage_cmds.FloatColumn(usage.total_memory_mb_usage),
usage_cmds.FloatColumn(usage.total_vcpus_usage),
@@ -157,7 +148,7 @@ class TestUsageShow(TestUsage):
def setUp(self):
super(TestUsageShow, self).setUp()
- self.usage_mock.get.return_value = self.usage
+ self.sdk_client.get_usage.return_value = self.usage
self.projects_mock.get.return_value = self.project
# Get the command object to test
@@ -199,10 +190,10 @@ class TestUsageShow(TestUsage):
columns, data = self.cmd.take_action(parsed_args)
- self.usage_mock.get.assert_called_with(
- self.project.id,
- datetime.datetime(2016, 11, 11, 0, 0),
- datetime.datetime(2016, 12, 20, 0, 0))
+ self.sdk_client.get_usage.assert_called_with(
+ project=self.project.id,
+ start='2016-11-11T00:00:00',
+ end='2016-12-20T00:00:00')
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
diff --git a/openstackclient/tests/unit/fakes.py b/openstackclient/tests/unit/fakes.py
index 00e0c129..086c2466 100644
--- a/openstackclient/tests/unit/fakes.py
+++ b/openstackclient/tests/unit/fakes.py
@@ -11,7 +11,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-#
import json
import sys
@@ -49,21 +48,6 @@ TEST_RESPONSE_DICT_V3.set_project_scope()
TEST_VERSIONS = fixture.DiscoveryList(href=AUTH_URL)
-def to_unicode_dict(catalog_dict):
- """Converts dict to unicode dict
-
- """
- if isinstance(catalog_dict, dict):
- return {to_unicode_dict(key): to_unicode_dict(value)
- for key, value in catalog_dict.items()}
- elif isinstance(catalog_dict, list):
- return [to_unicode_dict(element) for element in catalog_dict]
- elif isinstance(catalog_dict, str):
- return catalog_dict + u""
- else:
- return catalog_dict
-
-
class FakeStdout(object):
def __init__(self):
@@ -142,18 +126,30 @@ class FakeClientManager(object):
self.network_endpoint_enabled = True
self.compute_endpoint_enabled = True
self.volume_endpoint_enabled = True
+ # The source of configuration. This is either 'cloud_config' (a
+ # clouds.yaml file) or 'global_env' ('OS_'-prefixed envvars)
+ self.configuration_type = 'cloud_config'
def get_configuration(self):
- return {
- 'auth': {
- 'username': USERNAME,
- 'password': PASSWORD,
- 'token': AUTH_TOKEN,
- },
+
+ config = {
'region': REGION_NAME,
'identity_api_version': VERSION,
}
+ if self.configuration_type == 'cloud_config':
+ config['auth'] = {
+ 'username': USERNAME,
+ 'password': PASSWORD,
+ 'token': AUTH_TOKEN,
+ }
+ elif self.configuration_type == 'global_env':
+ config['username'] = USERNAME
+ config['password'] = PASSWORD
+ config['token'] = AUTH_TOKEN
+
+ return config
+
def is_network_endpoint_enabled(self):
return self.network_endpoint_enabled
diff --git a/openstackclient/tests/unit/identity/v3/test_identity_provider.py b/openstackclient/tests/unit/identity/v3/test_identity_provider.py
index 1a9a7991..480bae59 100644
--- a/openstackclient/tests/unit/identity/v3/test_identity_provider.py
+++ b/openstackclient/tests/unit/identity/v3/test_identity_provider.py
@@ -15,9 +15,12 @@
import copy
from unittest import mock
+from osc_lib import exceptions
+
from openstackclient.identity.v3 import identity_provider
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
+from openstackclient.tests.unit import utils as test_utils
class TestIdentityProvider(identity_fakes.TestFederatedIdentity):
@@ -308,6 +311,86 @@ class TestIdentityProviderCreate(TestIdentityProvider):
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.datalist, data)
+ def test_create_identity_provider_authttl_positive(self):
+ arglist = [
+ '--authorization-ttl', '60',
+ identity_fakes.idp_id,
+ ]
+ verifylist = [
+ ('identity_provider_id', identity_fakes.idp_id),
+ ('authorization_ttl', 60),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Set expected values
+ kwargs = {
+ 'remote_ids': None,
+ 'description': None,
+ 'domain_id': None,
+ 'enabled': True,
+ 'authorization_ttl': 60,
+ }
+
+ self.identity_providers_mock.create.assert_called_with(
+ id=identity_fakes.idp_id,
+ **kwargs
+ )
+
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.datalist, data)
+
+ def test_create_identity_provider_authttl_zero(self):
+ arglist = [
+ '--authorization-ttl', '0',
+ identity_fakes.idp_id,
+ ]
+ verifylist = [
+ ('identity_provider_id', identity_fakes.idp_id),
+ ('authorization_ttl', 0),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Set expected values
+ kwargs = {
+ 'remote_ids': None,
+ 'description': None,
+ 'domain_id': None,
+ 'enabled': True,
+ 'authorization_ttl': 0,
+ }
+
+ self.identity_providers_mock.create.assert_called_with(
+ id=identity_fakes.idp_id,
+ **kwargs
+ )
+
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.datalist, data)
+
+ def test_create_identity_provider_authttl_negative(self):
+ arglist = [
+ '--authorization-ttl', '-60',
+ identity_fakes.idp_id,
+ ]
+ verifylist = [
+ ('identity_provider_id', identity_fakes.idp_id),
+ ('authorization_ttl', -60),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+
+ def test_create_identity_provider_authttl_not_int(self):
+ arglist = [
+ '--authorization-ttl', 'spam',
+ identity_fakes.idp_id,
+ ]
+ verifylist = []
+ self.assertRaises(test_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
class TestIdentityProviderDelete(TestIdentityProvider):
@@ -678,6 +761,93 @@ class TestIdentityProviderSet(TestIdentityProvider):
self.cmd.take_action(parsed_args)
+ def test_identity_provider_set_authttl_positive(self):
+ def prepare(self):
+ """Prepare fake return objects before the test is executed"""
+ updated_idp = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER)
+ updated_idp['authorization_ttl'] = 60
+ resources = fakes.FakeResource(
+ None,
+ updated_idp,
+ loaded=True
+ )
+ self.identity_providers_mock.update.return_value = resources
+
+ prepare(self)
+ arglist = [
+ '--authorization-ttl', '60',
+ identity_fakes.idp_id
+ ]
+ verifylist = [
+ ('identity_provider', identity_fakes.idp_id),
+ ('enable', False),
+ ('disable', False),
+ ('remote_id', None),
+ ('authorization_ttl', 60),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.cmd.take_action(parsed_args)
+ self.identity_providers_mock.update.assert_called_with(
+ identity_fakes.idp_id,
+ authorization_ttl=60,
+ )
+
+ def test_identity_provider_set_authttl_zero(self):
+ def prepare(self):
+ """Prepare fake return objects before the test is executed"""
+ updated_idp = copy.deepcopy(identity_fakes.IDENTITY_PROVIDER)
+ updated_idp['authorization_ttl'] = 0
+ resources = fakes.FakeResource(
+ None,
+ updated_idp,
+ loaded=True
+ )
+ self.identity_providers_mock.update.return_value = resources
+
+ prepare(self)
+ arglist = [
+ '--authorization-ttl', '0',
+ identity_fakes.idp_id
+ ]
+ verifylist = [
+ ('identity_provider', identity_fakes.idp_id),
+ ('enable', False),
+ ('disable', False),
+ ('remote_id', None),
+ ('authorization_ttl', 0),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.cmd.take_action(parsed_args)
+ self.identity_providers_mock.update.assert_called_with(
+ identity_fakes.idp_id,
+ authorization_ttl=0,
+ )
+
+ def test_identity_provider_set_authttl_negative(self):
+ arglist = [
+ '--authorization-ttl', '-1',
+ identity_fakes.idp_id
+ ]
+ verifylist = [
+ ('identity_provider', identity_fakes.idp_id),
+ ('enable', False),
+ ('disable', False),
+ ('remote_id', None),
+ ('authorization_ttl', -1),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+
+ def test_identity_provider_set_authttl_not_int(self):
+ arglist = [
+ '--authorization-ttl', 'spam',
+ identity_fakes.idp_id
+ ]
+ verifylist = []
+ self.assertRaises(test_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
class TestIdentityProviderShow(TestIdentityProvider):
diff --git a/openstackclient/tests/unit/identity/v3/test_trust.py b/openstackclient/tests/unit/identity/v3/test_trust.py
index d8cfc59f..d530adf5 100644
--- a/openstackclient/tests/unit/identity/v3/test_trust.py
+++ b/openstackclient/tests/unit/identity/v3/test_trust.py
@@ -206,7 +206,113 @@ class TestTrustList(TestTrust):
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
- self.trusts_mock.list.assert_called_with()
+ self.trusts_mock.list.assert_called_with(
+ trustor_user=None,
+ trustee_user=None,
+ )
+
+ collist = ('ID', 'Expires At', 'Impersonation', 'Project ID',
+ 'Trustee User ID', 'Trustor User ID')
+ self.assertEqual(collist, columns)
+ datalist = ((
+ identity_fakes.trust_id,
+ identity_fakes.trust_expires,
+ identity_fakes.trust_impersonation,
+ identity_fakes.project_id,
+ identity_fakes.user_id,
+ identity_fakes.user_id
+ ), )
+ self.assertEqual(datalist, tuple(data))
+
+ def test_trust_list_auth_user(self):
+ auth_ref = self.app.client_manager.auth_ref = mock.Mock()
+ auth_ref.user_id.return_value = identity_fakes.user_id
+
+ arglist = ['--auth-user']
+ verifylist = [
+ ('trustor', None),
+ ('trustee', None),
+ ('authuser', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.trusts_mock.list.assert_any_call(
+ trustor_user=self.users_mock.get()
+ )
+ self.trusts_mock.list.assert_any_call(
+ trustee_user=self.users_mock.get()
+ )
+
+ collist = ('ID', 'Expires At', 'Impersonation', 'Project ID',
+ 'Trustee User ID', 'Trustor User ID')
+ self.assertEqual(collist, columns)
+ datalist = ((
+ identity_fakes.trust_id,
+ identity_fakes.trust_expires,
+ identity_fakes.trust_impersonation,
+ identity_fakes.project_id,
+ identity_fakes.user_id,
+ identity_fakes.user_id
+ ), )
+ self.assertEqual(datalist, tuple(data))
+
+ def test_trust_list_trustee(self):
+ arglist = ['--trustee', identity_fakes.user_name]
+ verifylist = [
+ ('trustor', None),
+ ('trustee', identity_fakes.user_name),
+ ('authuser', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ print(self.trusts_mock.list.call_args_list)
+ self.trusts_mock.list.assert_any_call(
+ trustee_user=self.users_mock.get(),
+ trustor_user=None,
+ )
+
+ collist = ('ID', 'Expires At', 'Impersonation', 'Project ID',
+ 'Trustee User ID', 'Trustor User ID')
+ self.assertEqual(collist, columns)
+ datalist = ((
+ identity_fakes.trust_id,
+ identity_fakes.trust_expires,
+ identity_fakes.trust_impersonation,
+ identity_fakes.project_id,
+ identity_fakes.user_id,
+ identity_fakes.user_id
+ ), )
+ self.assertEqual(datalist, tuple(data))
+
+ def test_trust_list_trustor(self):
+ arglist = ['--trustor', identity_fakes.user_name]
+ verifylist = [
+ ('trustee', None),
+ ('trustor', identity_fakes.user_name),
+ ('authuser', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ print(self.trusts_mock.list.call_args_list)
+ self.trusts_mock.list.assert_any_call(
+ trustor_user=self.users_mock.get(),
+ trustee_user=None,
+ )
collist = ('ID', 'Expires At', 'Impersonation', 'Project ID',
'Trustee User ID', 'Trustor User ID')
diff --git a/openstackclient/tests/unit/image/v2/fakes.py b/openstackclient/tests/unit/image/v2/fakes.py
index a0eda6d2..8ddd9a09 100644
--- a/openstackclient/tests/unit/image/v2/fakes.py
+++ b/openstackclient/tests/unit/image/v2/fakes.py
@@ -18,6 +18,9 @@ import uuid
from openstack.image.v2 import image
from openstack.image.v2 import member
+from openstack.image.v2 import metadef_namespace
+from openstack.image.v2 import service_info as _service_info
+from openstack.image.v2 import task
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
@@ -36,6 +39,8 @@ class FakeImagev2Client:
self.download_image = mock.Mock()
self.reactivate_image = mock.Mock()
self.deactivate_image = mock.Mock()
+ self.stage_image = mock.Mock()
+ self.import_image = mock.Mock()
self.members = mock.Mock()
self.add_member = mock.Mock()
@@ -43,6 +48,13 @@ class FakeImagev2Client:
self.update_member = mock.Mock()
self.remove_tag = mock.Mock()
+ self.metadef_namespaces = mock.Mock()
+
+ self.tasks = mock.Mock()
+ self.tasks.resource_class = fakes.FakeResource(None, {})
+ self.get_task = mock.Mock()
+
+ self.get_import_info = mock.Mock()
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
@@ -129,3 +141,151 @@ def create_one_image_member(attrs=None):
image_member_info.update(attrs)
return member.Member(**image_member_info)
+
+
+def create_one_import_info(attrs=None):
+ """Create a fake import info.
+
+ :param attrs: A dictionary with all attributes of import info
+ :type attrs: dict
+ :return: A fake Import object.
+ :rtype: `openstack.image.v2.service_info.Import`
+ """
+ attrs = attrs or {}
+
+ import_info = {
+ 'import-methods': {
+ 'description': 'Import methods available.',
+ 'type': 'array',
+ 'value': [
+ 'glance-direct',
+ 'web-download',
+ 'glance-download',
+ 'copy-image',
+ ]
+ }
+ }
+ import_info.update(attrs)
+
+ return _service_info.Import(**import_info)
+
+
+def create_one_task(attrs=None):
+ """Create a fake task.
+
+ :param attrs: A dictionary with all attributes of task
+ :type attrs: dict
+ :return: A fake Task object.
+ :rtype: `openstack.image.v2.task.Task`
+ """
+ attrs = attrs or {}
+
+ # Set default attribute
+ task_info = {
+ 'created_at': '2016-06-29T16:13:07Z',
+ 'expires_at': '2016-07-01T16:13:07Z',
+ 'id': str(uuid.uuid4()),
+ 'input': {
+ 'image_properties': {
+ 'container_format': 'ovf',
+ 'disk_format': 'vhd'
+ },
+ 'import_from': 'https://apps.openstack.org/excellent-image',
+ 'import_from_format': 'qcow2'
+ },
+ 'message': '',
+ 'owner': str(uuid.uuid4()),
+ 'result': {
+ 'image_id': str(uuid.uuid4()),
+ },
+ 'schema': '/v2/schemas/task',
+ 'status': random.choice(
+ [
+ 'pending',
+ 'processing',
+ 'success',
+ 'failure',
+ ]
+ ),
+ # though not documented, the API only allows 'import'
+ # https://github.com/openstack/glance/blob/24.0.0/glance/api/v2/tasks.py#L186-L190
+ 'type': 'import',
+ 'updated_at': '2016-06-29T16:13:07Z',
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ task_info.update(attrs)
+
+ return task.Task(**task_info)
+
+
+def create_tasks(attrs=None, count=2):
+ """Create multiple fake tasks.
+
+ :param attrs: A dictionary with all attributes of Task
+ :type attrs: dict
+ :param count: The number of tasks to be faked
+ :type count: int
+ :return: A list of fake Task objects
+ :rtype: list
+ """
+ tasks = []
+ for n in range(0, count):
+ tasks.append(create_one_task(attrs))
+
+ return tasks
+
+
+class FakeMetadefNamespaceClient:
+
+ def __init__(self, **kwargs):
+ self.create_metadef_namespace = mock.Mock()
+ self.delete_metadef_namespace = mock.Mock()
+ self.metadef_namespaces = mock.Mock()
+ self.get_metadef_namespace = mock.Mock()
+ self.update_metadef_namespace = mock.Mock()
+
+ self.auth_token = kwargs['token']
+ self.management_url = kwargs['endpoint']
+ self.version = 2.0
+
+
+class TestMetadefNamespaces(utils.TestCommand):
+
+ def setUp(self):
+ super().setUp()
+
+ self.app.client_manager.image = FakeMetadefNamespaceClient(
+ endpoint=fakes.AUTH_URL,
+ token=fakes.AUTH_TOKEN,
+ )
+
+ self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client(
+ endpoint=fakes.AUTH_URL,
+ token=fakes.AUTH_TOKEN,
+ )
+
+
+def create_one_metadef_namespace(attrs=None):
+ """Create a fake MetadefNamespace member.
+
+ :param attrs: A dictionary with all attributes of metadef_namespace member
+ :type attrs: dict
+ :return: a list of MetadefNamespace objects
+ :rtype: list of `metadef_namespace.MetadefNamespace`
+ """
+ attrs = attrs or {}
+
+ metadef_namespace_list = {
+ 'created_at': '2022-08-17T11:30:22Z',
+ 'display_name': 'Flavor Quota',
+ 'namespace': 'OS::Compute::Quota',
+ 'owner': 'admin',
+ # 'resource_type_associations': ['OS::Nova::Flavor'],
+ # The part that receives the list type factor is not implemented.
+ 'visibility': 'public',
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ metadef_namespace_list.update(attrs)
+ return metadef_namespace.MetadefNamespace(**metadef_namespace_list)
diff --git a/openstackclient/tests/unit/image/v2/test_image.py b/openstackclient/tests/unit/image/v2/test_image.py
index 7ccc9f0f..019b4d9d 100644
--- a/openstackclient/tests/unit/image/v2/test_image.py
+++ b/openstackclient/tests/unit/image/v2/test_image.py
@@ -14,23 +14,24 @@
import copy
import io
-import os
import tempfile
from unittest import mock
+from cinderclient import api_versions
from openstack import exceptions as sdk_exceptions
from osc_lib.cli import format_columns
from osc_lib import exceptions
-from openstackclient.image.v2 import image
+from openstackclient.image.v2 import image as _image
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
from openstackclient.tests.unit.image.v2 import fakes as image_fakes
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
-class TestImage(image_fakes.TestImagev2):
+class TestImage(image_fakes.TestImagev2, volume_fakes.TestVolume):
def setUp(self):
- super(TestImage, self).setUp()
+ super().setUp()
# Get shortcuts to mocked image client
self.client = self.app.client_manager.image
@@ -40,6 +41,13 @@ class TestImage(image_fakes.TestImagev2):
self.project_mock.reset_mock()
self.domain_mock = self.app.client_manager.identity.domains
self.domain_mock.reset_mock()
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ fake_body = {
+ 'os-volume_upload_image':
+ {'volume_type': {'name': 'fake_type'}}}
+ self.volumes_mock.upload_to_image.return_value = (
+ 200, fake_body)
+ self.volumes_mock.reset_mock()
def setup_images_mock(self, count):
images = image_fakes.create_images(count=count)
@@ -53,7 +61,7 @@ class TestImageCreate(TestImage):
domain = identity_fakes.FakeDomain.create_one_domain()
def setUp(self):
- super(TestImageCreate, self).setUp()
+ super().setUp()
self.new_image = image_fakes.create_one_image()
self.client.create_image.return_value = self.new_image
@@ -65,10 +73,10 @@ class TestImageCreate(TestImage):
self.client.update_image.return_value = self.new_image
(self.expected_columns, self.expected_data) = zip(
- *sorted(image._format_image(self.new_image).items()))
+ *sorted(_image._format_image(self.new_image).items()))
# Get the command object to test
- self.cmd = image.CreateImage(self.app, None)
+ self.cmd = _image.CreateImage(self.app, None)
@mock.patch("sys.stdin", side_effect=[None])
def test_image_reserve_no_options(self, raw_input):
@@ -76,8 +84,8 @@ class TestImageCreate(TestImage):
self.new_image.name
]
verifylist = [
- ('container_format', image.DEFAULT_CONTAINER_FORMAT),
- ('disk_format', image.DEFAULT_DISK_FORMAT),
+ ('container_format', _image.DEFAULT_CONTAINER_FORMAT),
+ ('disk_format', _image.DEFAULT_DISK_FORMAT),
('name', self.new_image.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -91,19 +99,12 @@ class TestImageCreate(TestImage):
self.client.create_image.assert_called_with(
name=self.new_image.name,
allow_duplicates=True,
- container_format=image.DEFAULT_CONTAINER_FORMAT,
- disk_format=image.DEFAULT_DISK_FORMAT,
+ container_format=_image.DEFAULT_CONTAINER_FORMAT,
+ disk_format=_image.DEFAULT_DISK_FORMAT,
)
- # Verify update() was not called, if it was show the args
- self.assertEqual(self.client.update_image.call_args_list, [])
-
- self.assertEqual(
- self.expected_columns,
- columns)
- self.assertCountEqual(
- self.expected_data,
- data)
+ self.assertEqual(self.expected_columns, columns)
+ self.assertCountEqual(self.expected_data, data)
@mock.patch('sys.stdin', side_effect=[None])
def test_image_reserve_options(self, raw_input):
@@ -112,10 +113,11 @@ class TestImageCreate(TestImage):
'--disk-format', 'ami',
'--min-disk', '10',
'--min-ram', '4',
- ('--protected'
- if self.new_image.is_protected else '--unprotected'),
- ('--private'
- if self.new_image.visibility == 'private' else '--public'),
+ '--protected' if self.new_image.is_protected else '--unprotected',
+ (
+ '--private'
+ if self.new_image.visibility == 'private' else '--public'
+ ),
'--project', self.new_image.owner_id,
'--project-domain', self.domain.id,
self.new_image.name,
@@ -125,10 +127,8 @@ class TestImageCreate(TestImage):
('disk_format', 'ami'),
('min_disk', 10),
('min_ram', 4),
- ('protected', self.new_image.is_protected),
- ('unprotected', not self.new_image.is_protected),
- ('public', self.new_image.visibility == 'public'),
- ('private', self.new_image.visibility == 'private'),
+ ('is_protected', self.new_image.is_protected),
+ ('visibility', self.new_image.visibility),
('project', self.new_image.owner_id),
('project_domain', self.domain.id),
('name', self.new_image.name),
@@ -153,12 +153,8 @@ class TestImageCreate(TestImage):
visibility=self.new_image.visibility,
)
- self.assertEqual(
- self.expected_columns,
- columns)
- self.assertCountEqual(
- self.expected_data,
- data)
+ self.assertEqual(self.expected_columns, columns)
+ self.assertCountEqual(self.expected_data, data)
def test_image_create_with_unexist_project(self):
self.project_mock.get.side_effect = exceptions.NotFound(None)
@@ -179,10 +175,8 @@ class TestImageCreate(TestImage):
('disk_format', 'ami'),
('min_disk', 10),
('min_ram', 4),
- ('protected', True),
- ('unprotected', False),
- ('public', False),
- ('private', True),
+ ('is_protected', True),
+ ('visibility', 'private'),
('project', 'unexist_owner'),
('name', 'graven'),
]
@@ -212,11 +206,9 @@ class TestImageCreate(TestImage):
self.new_image.name,
]
verifylist = [
- ('file', imagefile.name),
- ('protected', self.new_image.is_protected),
- ('unprotected', not self.new_image.is_protected),
- ('public', self.new_image.visibility == 'public'),
- ('private', self.new_image.visibility == 'private'),
+ ('filename', imagefile.name),
+ ('is_protected', self.new_image.is_protected),
+ ('visibility', self.new_image.visibility),
('properties', {'Alpha': '1', 'Beta': '2'}),
('tags', self.new_image.tags),
('name', self.new_image.name),
@@ -232,8 +224,8 @@ class TestImageCreate(TestImage):
self.client.create_image.assert_called_with(
name=self.new_image.name,
allow_duplicates=True,
- container_format=image.DEFAULT_CONTAINER_FORMAT,
- disk_format=image.DEFAULT_DISK_FORMAT,
+ container_format=_image.DEFAULT_CONTAINER_FORMAT,
+ disk_format=_image.DEFAULT_DISK_FORMAT,
is_protected=self.new_image.is_protected,
visibility=self.new_image.visibility,
Alpha='1',
@@ -249,6 +241,37 @@ class TestImageCreate(TestImage):
self.expected_data,
data)
+ @mock.patch('openstackclient.image.v2.image.get_data_from_stdin')
+ def test_image_create__progress_ignore_with_stdin(
+ self, mock_get_data_from_stdin,
+ ):
+ fake_stdin = io.BytesIO(b'some fake data')
+ mock_get_data_from_stdin.return_value = fake_stdin
+
+ arglist = [
+ '--progress',
+ self.new_image.name,
+ ]
+ verifylist = [
+ ('progress', True),
+ ('name', self.new_image.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.client.create_image.assert_called_with(
+ name=self.new_image.name,
+ allow_duplicates=True,
+ container_format=_image.DEFAULT_CONTAINER_FORMAT,
+ disk_format=_image.DEFAULT_DISK_FORMAT,
+ data=fake_stdin,
+ validate_checksum=False,
+ )
+
+ self.assertEqual(self.expected_columns, columns)
+ self.assertCountEqual(self.expected_data, data)
+
def test_image_create_dead_options(self):
arglist = [
@@ -282,11 +305,106 @@ class TestImageCreate(TestImage):
self.client.create_image.assert_called_with(
name=self.new_image.name,
allow_duplicates=True,
- container_format=image.DEFAULT_CONTAINER_FORMAT,
- disk_format=image.DEFAULT_DISK_FORMAT,
+ container_format=_image.DEFAULT_CONTAINER_FORMAT,
+ disk_format=_image.DEFAULT_DISK_FORMAT,
use_import=True
)
+ @mock.patch('osc_lib.utils.find_resource')
+ @mock.patch('openstackclient.image.v2.image.get_data_from_stdin')
+ def test_image_create_from_volume(self, mock_get_data_f, mock_get_vol):
+
+ fake_vol_id = 'fake-volume-id'
+ mock_get_data_f.return_value = None
+
+ class FakeVolume:
+ id = fake_vol_id
+
+ mock_get_vol.return_value = FakeVolume()
+
+ arglist = [
+ '--volume', fake_vol_id,
+ self.new_image.name,
+ ]
+ verifylist = [
+ ('name', self.new_image.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.upload_to_image.assert_called_with(
+ fake_vol_id,
+ False,
+ self.new_image.name,
+ 'bare',
+ 'raw'
+ )
+
+ @mock.patch('osc_lib.utils.find_resource')
+ @mock.patch('openstackclient.image.v2.image.get_data_from_stdin')
+ def test_image_create_from_volume_fail(self, mock_get_data_f,
+ mock_get_vol):
+
+ fake_vol_id = 'fake-volume-id'
+ mock_get_data_f.return_value = None
+
+ class FakeVolume:
+ id = fake_vol_id
+
+ mock_get_vol.return_value = FakeVolume()
+
+ arglist = [
+ '--volume', fake_vol_id,
+ self.new_image.name,
+ '--public'
+ ]
+ verifylist = [
+ ('name', self.new_image.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+
+ @mock.patch('osc_lib.utils.find_resource')
+ @mock.patch('openstackclient.image.v2.image.get_data_from_stdin')
+ def test_image_create_from_volume_v31(self, mock_get_data_f,
+ mock_get_vol):
+
+ self.app.client_manager.volume.api_version = (
+ api_versions.APIVersion('3.1'))
+
+ fake_vol_id = 'fake-volume-id'
+ mock_get_data_f.return_value = None
+
+ class FakeVolume:
+ id = fake_vol_id
+
+ mock_get_vol.return_value = FakeVolume()
+
+ arglist = [
+ '--volume', fake_vol_id,
+ self.new_image.name,
+ '--public'
+ ]
+ verifylist = [
+ ('name', self.new_image.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.upload_to_image.assert_called_with(
+ fake_vol_id,
+ False,
+ self.new_image.name,
+ 'bare',
+ 'raw',
+ visibility='public',
+ protected=False
+ )
+
class TestAddProjectToImage(TestImage):
@@ -317,7 +435,7 @@ class TestAddProjectToImage(TestImage):
)
def setUp(self):
- super(TestAddProjectToImage, self).setUp()
+ super().setUp()
# This is the return value for utils.find_resource()
self.client.find_image.return_value = self._image
@@ -327,7 +445,7 @@ class TestAddProjectToImage(TestImage):
self.project_mock.get.return_value = self.project
self.domain_mock.get.return_value = self.domain
# Get the command object to test
- self.cmd = image.AddProjectToImage(self.app, None)
+ self.cmd = _image.AddProjectToImage(self.app, None)
def test_add_project_to_image_no_option(self):
arglist = [
@@ -381,12 +499,12 @@ class TestAddProjectToImage(TestImage):
class TestImageDelete(TestImage):
def setUp(self):
- super(TestImageDelete, self).setUp()
+ super().setUp()
self.client.delete_image.return_value = None
# Get the command object to test
- self.cmd = image.DeleteImage(self.app, None)
+ self.cmd = _image.DeleteImage(self.app, None)
def test_image_delete_no_options(self):
images = self.setup_images_mock(count=1)
@@ -472,20 +590,17 @@ class TestImageList(TestImage):
),
def setUp(self):
- super(TestImageList, self).setUp()
+ super().setUp()
self.client.images.side_effect = [[self._image], []]
# Get the command object to test
- self.cmd = image.ListImage(self.app, None)
+ self.cmd = _image.ListImage(self.app, None)
def test_image_list_no_options(self):
arglist = []
verifylist = [
- ('public', False),
- ('private', False),
- ('community', False),
- ('shared', False),
+ ('visibility', None),
('long', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -506,10 +621,7 @@ class TestImageList(TestImage):
'--public',
]
verifylist = [
- ('public', True),
- ('private', False),
- ('community', False),
- ('shared', False),
+ ('visibility', 'public'),
('long', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -530,10 +642,7 @@ class TestImageList(TestImage):
'--private',
]
verifylist = [
- ('public', False),
- ('private', True),
- ('community', False),
- ('shared', False),
+ ('visibility', 'private'),
('long', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -554,10 +663,7 @@ class TestImageList(TestImage):
'--community',
]
verifylist = [
- ('public', False),
- ('private', False),
- ('community', True),
- ('shared', False),
+ ('visibility', 'community'),
('long', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -578,10 +684,7 @@ class TestImageList(TestImage):
'--shared',
]
verifylist = [
- ('public', False),
- ('private', False),
- ('community', False),
- ('shared', True),
+ ('visibility', 'shared'),
('long', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -597,16 +700,34 @@ class TestImageList(TestImage):
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.datalist, tuple(data))
+ def test_image_list_all_option(self):
+ arglist = [
+ '--all',
+ ]
+ verifylist = [
+ ('visibility', 'all'),
+ ('long', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+ self.client.images.assert_called_with(
+ visibility='all',
+ )
+
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.datalist, tuple(data))
+
def test_image_list_shared_member_status_option(self):
arglist = [
'--shared',
'--member-status', 'all'
]
verifylist = [
- ('public', False),
- ('private', False),
- ('community', False),
- ('shared', True),
+ ('visibility', 'shared'),
('long', False),
('member_status', 'all')
]
@@ -630,10 +751,7 @@ class TestImageList(TestImage):
'--member-status', 'ALl'
]
verifylist = [
- ('public', False),
- ('private', False),
- ('community', False),
- ('shared', True),
+ ('visibility', 'shared'),
('long', False),
('member_status', 'all')
]
@@ -787,7 +905,10 @@ class TestImageList(TestImage):
marker=self._image.id,
)
- self.client.find_image.assert_called_with('graven')
+ self.client.find_image.assert_called_with(
+ 'graven',
+ ignore_missing=False,
+ )
def test_image_list_name_option(self):
arglist = [
@@ -823,7 +944,7 @@ class TestImageList(TestImage):
'--hidden',
]
verifylist = [
- ('hidden', True),
+ ('is_hidden', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -835,15 +956,16 @@ class TestImageList(TestImage):
def test_image_list_tag_option(self):
arglist = [
'--tag', 'abc',
+ '--tag', 'cba'
]
verifylist = [
- ('tag', 'abc'),
+ ('tag', ['abc', 'cba']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.client.images.assert_called_with(
- tag='abc'
+ tag=['abc', 'cba']
)
@@ -869,12 +991,12 @@ class TestListImageProjects(TestImage):
)]
def setUp(self):
- super(TestListImageProjects, self).setUp()
+ super().setUp()
self.client.find_image.return_value = self._image
self.client.members.return_value = [self.member]
- self.cmd = image.ListImageProjects(self.app, None)
+ self.cmd = _image.ListImageProjects(self.app, None)
def test_image_member_list(self):
arglist = [
@@ -899,7 +1021,7 @@ class TestRemoveProjectImage(TestImage):
domain = identity_fakes.FakeDomain.create_one_domain()
def setUp(self):
- super(TestRemoveProjectImage, self).setUp()
+ super().setUp()
self._image = image_fakes.create_one_image()
# This is the return value for utils.find_resource()
@@ -909,7 +1031,7 @@ class TestRemoveProjectImage(TestImage):
self.domain_mock.get.return_value = self.domain
self.client.remove_member.return_value = None
# Get the command object to test
- self.cmd = image.RemoveProjectImage(self.app, None)
+ self.cmd = _image.RemoveProjectImage(self.app, None)
def test_remove_project_image_no_options(self):
arglist = [
@@ -963,7 +1085,7 @@ class TestImageSet(TestImage):
_image = image_fakes.create_one_image({'tags': []})
def setUp(self):
- super(TestImageSet, self).setUp()
+ super().setUp()
self.project_mock.get.return_value = self.project
@@ -976,7 +1098,7 @@ class TestImageSet(TestImage):
)
# Get the command object to test
- self.cmd = image.SetImage(self.app, None)
+ self.cmd = _image.SetImage(self.app, None)
def test_image_set_no_options(self):
arglist = [
@@ -991,7 +1113,7 @@ class TestImageSet(TestImage):
self.assertIsNone(result)
# we'll have called this but not set anything
- self.app.client_manager.image.update_image.called_once_with(
+ self.app.client_manager.image.update_image.assert_called_once_with(
self._image.id,
)
@@ -1145,10 +1267,8 @@ class TestImageSet(TestImage):
'graven',
]
verifylist = [
- ('protected', True),
- ('unprotected', False),
- ('public', False),
- ('private', True),
+ ('is_protected', True),
+ ('visibility', 'private'),
('image', 'graven'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -1173,10 +1293,8 @@ class TestImageSet(TestImage):
'graven',
]
verifylist = [
- ('protected', False),
- ('unprotected', True),
- ('public', True),
- ('private', False),
+ ('is_protected', False),
+ ('visibility', 'public'),
('image', 'graven'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -1395,7 +1513,7 @@ class TestImageSet(TestImage):
'graven',
]
verifylist = [
- ('visibility', '1-mile'),
+ ('dead_visibility', '1-mile'),
('image', 'graven'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -1437,9 +1555,8 @@ class TestImageSet(TestImage):
'graven',
]
verifylist = [
- ('hidden', True),
- ('public', True),
- ('private', False),
+ ('is_hidden', True),
+ ('visibility', 'public'),
('image', 'graven'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -1464,9 +1581,8 @@ class TestImageSet(TestImage):
'graven',
]
verifylist = [
- ('hidden', False),
- ('public', True),
- ('private', False),
+ ('is_hidden', False),
+ ('visibility', 'public'),
('image', 'graven'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -1506,12 +1622,12 @@ class TestImageShow(TestImage):
)
def setUp(self):
- super(TestImageShow, self).setUp()
+ super().setUp()
self.client.find_image = mock.Mock(return_value=self._data)
# Get the command object to test
- self.cmd = image.ShowImage(self.app, None)
+ self.cmd = _image.ShowImage(self.app, None)
def test_image_show(self):
arglist = [
@@ -1562,7 +1678,7 @@ class TestImageShow(TestImage):
class TestImageUnset(TestImage):
def setUp(self):
- super(TestImageUnset, self).setUp()
+ super().setUp()
attrs = {}
attrs['tags'] = ['test']
@@ -1576,7 +1692,7 @@ class TestImageUnset(TestImage):
self.client.update_image.return_value = self.image
# Get the command object to test
- self.cmd = image.UnsetImage(self.app, None)
+ self.cmd = _image.UnsetImage(self.app, None)
def test_image_unset_no_options(self):
arglist = [
@@ -1656,25 +1772,282 @@ class TestImageUnset(TestImage):
self.assertIsNone(result)
+class TestImageStage(TestImage):
+
+ image = image_fakes.create_one_image({})
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.find_image.return_value = self.image
+
+ self.cmd = _image.StageImage(self.app, None)
+
+ def test_stage_image__from_file(self):
+ imagefile = tempfile.NamedTemporaryFile(delete=False)
+ imagefile.write(b'\0')
+ imagefile.close()
+
+ arglist = [
+ '--file', imagefile.name,
+ self.image.name,
+ ]
+ verifylist = [
+ ('filename', imagefile.name),
+ ('image', self.image.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.stage_image.assert_called_once_with(
+ self.image,
+ filename=imagefile.name,
+ )
+
+ @mock.patch('openstackclient.image.v2.image.get_data_from_stdin')
+ def test_stage_image__from_stdin(self, mock_get_data_from_stdin):
+ fake_stdin = io.BytesIO(b"some initial binary data: \x00\x01")
+ mock_get_data_from_stdin.return_value = fake_stdin
+
+ arglist = [
+ self.image.name,
+ ]
+ verifylist = [
+ ('image', self.image.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.stage_image.assert_called_once_with(
+ self.image,
+ data=fake_stdin,
+ )
+
+
+class TestImageImport(TestImage):
+
+ image = image_fakes.create_one_image(
+ {
+ 'container_format': 'bare',
+ 'disk_format': 'qcow2',
+ }
+ )
+ import_info = image_fakes.create_one_import_info()
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.find_image.return_value = self.image
+ self.client.get_import_info.return_value = self.import_info
+
+ self.cmd = _image.ImportImage(self.app, None)
+
+ def test_import_image__glance_direct(self):
+ self.image.status = 'uploading'
+ arglist = [
+ self.image.name,
+ ]
+ verifylist = [
+ ('image', self.image.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.import_image.assert_called_once_with(
+ self.image,
+ method='glance-direct',
+ uri=None,
+ remote_region=None,
+ remote_image=None,
+ remote_service_interface=None,
+ stores=None,
+ all_stores=None,
+ all_stores_must_succeed=False,
+ )
+
+ def test_import_image__web_download(self):
+ self.image.status = 'queued'
+ arglist = [
+ self.image.name,
+ '--method', 'web-download',
+ '--uri', 'https://example.com/',
+ ]
+ verifylist = [
+ ('image', self.image.name),
+ ('import_method', 'web-download'),
+ ('uri', 'https://example.com/'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.import_image.assert_called_once_with(
+ self.image,
+ method='web-download',
+ uri='https://example.com/',
+ remote_region=None,
+ remote_image=None,
+ remote_service_interface=None,
+ stores=None,
+ all_stores=None,
+ all_stores_must_succeed=False,
+ )
+
+ # NOTE(stephenfin): We don't do this for all combinations since that would
+ # be tedious af. You get the idea...
+ def test_import_image__web_download_missing_options(self):
+ arglist = [
+ self.image.name,
+ '--method', 'web-download',
+ ]
+ verifylist = [
+ ('image', self.image.name),
+ ('import_method', 'web-download'),
+ ('uri', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args,
+ )
+ self.assertIn("The '--uri' option is required ", str(exc))
+
+ self.client.import_image.assert_not_called()
+
+ # NOTE(stephenfin): Ditto
+ def test_import_image__web_download_invalid_options(self):
+ arglist = [
+ self.image.name,
+ '--method', 'glance-direct', # != web-download
+ '--uri', 'https://example.com/',
+ ]
+ verifylist = [
+ ('image', self.image.name),
+ ('import_method', 'glance-direct'),
+ ('uri', 'https://example.com/'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args,
+ )
+ self.assertIn("The '--uri' option is only supported ", str(exc))
+
+ self.client.import_image.assert_not_called()
+
+ def test_import_image__web_download_invalid_image_state(self):
+ self.image.status = 'uploading' # != 'queued'
+ arglist = [
+ self.image.name,
+ '--method', 'web-download',
+ '--uri', 'https://example.com/',
+ ]
+ verifylist = [
+ ('image', self.image.name),
+ ('import_method', 'web-download'),
+ ('uri', 'https://example.com/'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args,
+ )
+ self.assertIn(
+ "The 'web-download' import method can only be used with "
+ "an image in status 'queued'",
+ str(exc),
+ )
+
+ self.client.import_image.assert_not_called()
+
+ def test_import_image__copy_image(self):
+ self.image.status = 'active'
+ arglist = [
+ self.image.name,
+ '--method', 'copy-image',
+ '--store', 'fast',
+ ]
+ verifylist = [
+ ('image', self.image.name),
+ ('import_method', 'copy-image'),
+ ('stores', ['fast']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.import_image.assert_called_once_with(
+ self.image,
+ method='copy-image',
+ uri=None,
+ remote_region=None,
+ remote_image=None,
+ remote_service_interface=None,
+ stores=['fast'],
+ all_stores=None,
+ all_stores_must_succeed=False,
+ )
+
+ def test_import_image__glance_download(self):
+ arglist = [
+ self.image.name,
+ '--method', 'glance-download',
+ '--remote-region', 'eu/dublin',
+ '--remote-image', 'remote-image-id',
+ '--remote-service-interface', 'private',
+ ]
+ verifylist = [
+ ('image', self.image.name),
+ ('import_method', 'glance-download'),
+ ('remote_region', 'eu/dublin'),
+ ('remote_image', 'remote-image-id'),
+ ('remote_service_interface', 'private'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.import_image.assert_called_once_with(
+ self.image,
+ method='glance-download',
+ uri=None,
+ remote_region='eu/dublin',
+ remote_image='remote-image-id',
+ remote_service_interface='private',
+ stores=None,
+ all_stores=None,
+ all_stores_must_succeed=False,
+ )
+
+
class TestImageSave(TestImage):
image = image_fakes.create_one_image({})
def setUp(self):
- super(TestImageSave, self).setUp()
+ super().setUp()
self.client.find_image.return_value = self.image
self.client.download_image.return_value = self.image
# Get the command object to test
- self.cmd = image.SaveImage(self.app, None)
+ self.cmd = _image.SaveImage(self.app, None)
def test_save_data(self):
arglist = ['--file', '/path/to/file', self.image.id]
verifylist = [
- ('file', '/path/to/file'),
+ ('filename', '/path/to/file'),
('image', self.image.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -1689,49 +2062,26 @@ class TestImageSave(TestImage):
class TestImageGetData(TestImage):
- def setUp(self):
- super(TestImageGetData, self).setUp()
- self.args = mock.Mock()
-
- def test_get_data_file_file(self):
- (fd, fname) = tempfile.mkstemp(prefix='osc_test_image')
- self.args.file = fname
-
- (test_fd, test_name) = image.get_data_file(self.args)
-
- self.assertEqual(fname, test_name)
- test_fd.close()
-
- os.unlink(fname)
-
- def test_get_data_file_2(self):
-
- self.args.file = None
-
- f = io.BytesIO(b"some initial binary data: \x00\x01")
+ def test_get_data_from_stdin(self):
+ fd = io.BytesIO(b"some initial binary data: \x00\x01")
with mock.patch('sys.stdin') as stdin:
- stdin.return_value = f
+ stdin.return_value = fd
stdin.isatty.return_value = False
- stdin.buffer = f
+ stdin.buffer = fd
- (test_fd, test_name) = image.get_data_file(self.args)
+ test_fd = _image.get_data_from_stdin()
# Ensure data written to temp file is correct
- self.assertEqual(f, test_fd)
- self.assertIsNone(test_name)
-
- def test_get_data_file_3(self):
-
- self.args.file = None
+ self.assertEqual(fd, test_fd)
- f = io.BytesIO(b"some initial binary data: \x00\x01")
+ def test_get_data_from_stdin__interactive(self):
+ fd = io.BytesIO(b"some initial binary data: \x00\x01")
with mock.patch('sys.stdin') as stdin:
# There is stdin, but interactive
- stdin.return_value = f
+ stdin.return_value = fd
- (test_fd, test_fname) = image.get_data_file(self.args)
+ test_fd = _image.get_data_from_stdin()
self.assertIsNone(test_fd)
- self.assertIsNone(test_fname)
diff --git a/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py b/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py
new file mode 100644
index 00000000..7ed11838
--- /dev/null
+++ b/openstackclient/tests/unit/image/v2/test_metadef_namespaces.py
@@ -0,0 +1,215 @@
+# Copyright 2013 Nebula Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from openstackclient.image.v2 import metadef_namespaces
+from openstackclient.tests.unit.image.v2 import fakes as md_namespace_fakes
+
+
+class TestMetadefNamespaces(md_namespace_fakes.TestMetadefNamespaces):
+ def setUp(self):
+ super().setUp()
+
+ # Get shortcuts to mocked image client
+ self.client = self.app.client_manager.image
+
+ # Get shortcut to the Mocks in identity client
+ self.project_mock = self.app.client_manager.identity.projects
+ self.project_mock.reset_mock()
+ self.domain_mock = self.app.client_manager.identity.domains
+ self.domain_mock.reset_mock()
+
+
+class TestMetadefNamespaceCreate(TestMetadefNamespaces):
+ _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace()
+
+ expected_columns = (
+ 'created_at',
+ 'description',
+ 'display_name',
+ 'id',
+ 'is_protected',
+ 'location',
+ 'name',
+ 'namespace',
+ 'owner',
+ 'resource_type_associations',
+ 'updated_at',
+ 'visibility'
+ )
+ expected_data = (
+ _metadef_namespace.created_at,
+ _metadef_namespace.description,
+ _metadef_namespace.display_name,
+ _metadef_namespace.id,
+ _metadef_namespace.is_protected,
+ _metadef_namespace.location,
+ _metadef_namespace.name,
+ _metadef_namespace.namespace,
+ _metadef_namespace.owner,
+ _metadef_namespace.resource_type_associations,
+ _metadef_namespace.updated_at,
+ _metadef_namespace.visibility
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.create_metadef_namespace.return_value \
+ = self._metadef_namespace
+ self.cmd = metadef_namespaces.CreateMetadefNameSpace(self.app, None)
+ self.datalist = self._metadef_namespace
+
+ def test_namespace_create(self):
+ arglist = [
+ self._metadef_namespace.namespace
+ ]
+
+ verifylist = [
+
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(self.expected_columns, columns)
+ self.assertEqual(self.expected_data, data)
+
+
+class TestMetadefNamespaceDelete(TestMetadefNamespaces):
+ _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace()
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.delete_metadef_namespace.return_value \
+ = self._metadef_namespace
+ self.cmd = metadef_namespaces.DeleteMetadefNameSpace(self.app, None)
+ self.datalist = self._metadef_namespace
+
+ def test_namespace_create(self):
+ arglist = [
+ self._metadef_namespace.namespace
+ ]
+
+ verifylist = [
+
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+
+
+class TestMetadefNamespaceList(TestMetadefNamespaces):
+ _metadef_namespace = [md_namespace_fakes.create_one_metadef_namespace()]
+
+ columns = [
+ 'namespace'
+ ]
+
+ datalist = []
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.metadef_namespaces.side_effect = [
+ self._metadef_namespace, []]
+
+ # Get the command object to test
+ self.client.metadef_namespaces.return_value = iter(
+ self._metadef_namespace
+ )
+ self.cmd = metadef_namespaces.ListMetadefNameSpaces(self.app, None)
+ self.datalist = self._metadef_namespace
+
+ def test_namespace_list_no_options(self):
+ arglist = []
+ parsed_args = self.check_parser(self.cmd, arglist, [])
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(getattr(self.datalist[0], 'namespace'),
+ next(data)[0])
+
+
+class TestMetadefNamespaceSet(TestMetadefNamespaces):
+ _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace()
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.update_metadef_namespace.return_value \
+ = self._metadef_namespace
+ self.cmd = metadef_namespaces.SetMetadefNameSpace(self.app, None)
+ self.datalist = self._metadef_namespace
+
+ def test_namespace_set_no_options(self):
+ arglist = [
+ self._metadef_namespace.namespace
+ ]
+ verifylist = [
+ ('namespace', self._metadef_namespace.namespace),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertIsNone(result)
+
+
+class TestMetadefNamespaceShow(TestMetadefNamespaces):
+ _metadef_namespace = md_namespace_fakes.create_one_metadef_namespace()
+
+ expected_columns = (
+ 'created_at',
+ 'display_name',
+ 'namespace',
+ 'owner',
+ 'visibility'
+ )
+ expected_data = (
+ _metadef_namespace.created_at,
+ _metadef_namespace.display_name,
+ _metadef_namespace.namespace,
+ _metadef_namespace.owner,
+ _metadef_namespace.visibility
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.get_metadef_namespace.return_value \
+ = self._metadef_namespace
+ self.cmd = metadef_namespaces.ShowMetadefNameSpace(self.app, None)
+
+ def test_namespace_show_no_options(self):
+ arglist = [
+ self._metadef_namespace.namespace
+ ]
+
+ verifylist = [
+
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(self.expected_columns, columns)
+ self.assertEqual(self.expected_data, data)
diff --git a/openstackclient/tests/unit/image/v2/test_task.py b/openstackclient/tests/unit/image/v2/test_task.py
new file mode 100644
index 00000000..e077e2b1
--- /dev/null
+++ b/openstackclient/tests/unit/image/v2/test_task.py
@@ -0,0 +1,187 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from osc_lib.cli import format_columns
+
+from openstackclient.image.v2 import task
+from openstackclient.tests.unit.image.v2 import fakes as image_fakes
+
+
+class TestTask(image_fakes.TestImagev2):
+ def setUp(self):
+ super().setUp()
+
+ # Get shortcuts to mocked image client
+ self.client = self.app.client_manager.image
+
+
+class TestTaskShow(TestTask):
+
+ task = image_fakes.create_one_task()
+
+ columns = (
+ 'created_at',
+ 'expires_at',
+ 'id',
+ 'input',
+ 'message',
+ 'owner_id',
+ 'properties',
+ 'result',
+ 'status',
+ 'type',
+ 'updated_at',
+ )
+ data = (
+ task.created_at,
+ task.expires_at,
+ task.id,
+ task.input,
+ task.message,
+ task.owner_id,
+ format_columns.DictColumn({}),
+ task.result,
+ task.status,
+ task.type,
+ task.updated_at,
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.get_task.return_value = self.task
+
+ # Get the command object to test
+ self.cmd = task.ShowTask(self.app, None)
+
+ def test_task_show(self):
+ arglist = [self.task.id]
+ verifylist = [
+ ('task', self.task.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class ShowOne in cliff, abstract method take_action()
+ # returns a two-part tuple with a tuple of column names and a tuple of
+ # data to be shown.
+ columns, data = self.cmd.take_action(parsed_args)
+ self.client.get_task.assert_called_with(self.task.id)
+
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, data)
+
+
+class TestTaskList(TestTask):
+
+ tasks = image_fakes.create_tasks()
+
+ columns = (
+ 'ID',
+ 'Type',
+ 'Status',
+ 'Owner',
+ )
+ datalist = [
+ (
+ task.id,
+ task.type,
+ task.status,
+ task.owner_id,
+ )
+ for task in tasks
+ ]
+
+ def setUp(self):
+ super().setUp()
+
+ self.client.tasks.side_effect = [self.tasks, []]
+
+ # Get the command object to test
+ self.cmd = task.ListTask(self.app, None)
+
+ def test_task_list_no_options(self):
+ arglist = []
+ verifylist = [
+ ('sort_key', None),
+ ('sort_dir', None),
+ ('limit', None),
+ ('marker', None),
+ ('type', None),
+ ('status', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.client.tasks.assert_called_with()
+
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.datalist, data)
+
+ def test_task_list_sort_key_option(self):
+ arglist = ['--sort-key', 'created_at']
+ verifylist = [('sort_key', 'created_at')]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.client.tasks.assert_called_with(
+ sort_key=parsed_args.sort_key,
+ )
+
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.datalist, data)
+
+ def test_task_list_sort_dir_option(self):
+ arglist = ['--sort-dir', 'desc']
+ verifylist = [('sort_dir', 'desc')]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.tasks.assert_called_with(
+ sort_dir=parsed_args.sort_dir,
+ )
+
+ def test_task_list_pagination_options(self):
+ arglist = ['--limit', '1', '--marker', self.tasks[0].id]
+ verifylist = [('limit', 1), ('marker', self.tasks[0].id)]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.tasks.assert_called_with(
+ limit=parsed_args.limit,
+ marker=parsed_args.marker,
+ )
+
+ def test_task_list_type_option(self):
+ arglist = ['--type', self.tasks[0].type]
+ verifylist = [('type', self.tasks[0].type)]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.tasks.assert_called_with(
+ type=self.tasks[0].type,
+ )
+
+ def test_task_list_status_option(self):
+ arglist = ['--status', self.tasks[0].status]
+ verifylist = [('status', self.tasks[0].status)]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ self.client.tasks.assert_called_with(
+ status=self.tasks[0].status,
+ )
diff --git a/openstackclient/tests/unit/network/v2/fakes.py b/openstackclient/tests/unit/network/v2/fakes.py
index bb113d3c..6d922008 100644
--- a/openstackclient/tests/unit/network/v2/fakes.py
+++ b/openstackclient/tests/unit/network/v2/fakes.py
@@ -20,15 +20,21 @@ import uuid
from openstack.network.v2 import address_group as _address_group
from openstack.network.v2 import address_scope as _address_scope
+from openstack.network.v2 import agent as network_agent
from openstack.network.v2 import auto_allocated_topology as allocated_topology
from openstack.network.v2 import availability_zone as _availability_zone
from openstack.network.v2 import flavor as _flavor
from openstack.network.v2 import local_ip as _local_ip
from openstack.network.v2 import local_ip_association as _local_ip_association
+from openstack.network.v2 import ndp_proxy as _ndp_proxy
from openstack.network.v2 import network as _network
from openstack.network.v2 import network_ip_availability as _ip_availability
from openstack.network.v2 import network_segment_range as _segment_range
+from openstack.network.v2 import port as _port
+from openstack.network.v2 import rbac_policy as network_rbac
from openstack.network.v2 import segment as _segment
+from openstack.network.v2 import service_profile as _flavor_profile
+from openstack.network.v2 import trunk as _trunk
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3
@@ -54,9 +60,11 @@ QUOTA = {
RULE_TYPE_BANDWIDTH_LIMIT = 'bandwidth-limit'
RULE_TYPE_DSCP_MARKING = 'dscp-marking'
RULE_TYPE_MINIMUM_BANDWIDTH = 'minimum-bandwidth'
+RULE_TYPE_MINIMUM_PACKET_RATE = 'minimum-packet-rate'
VALID_QOS_RULES = [RULE_TYPE_BANDWIDTH_LIMIT,
RULE_TYPE_DSCP_MARKING,
- RULE_TYPE_MINIMUM_BANDWIDTH]
+ RULE_TYPE_MINIMUM_BANDWIDTH,
+ RULE_TYPE_MINIMUM_PACKET_RATE]
VALID_DSCP_MARKS = [0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32,
34, 36, 38, 40, 46, 48, 56]
@@ -128,297 +136,6 @@ class FakeExtension(object):
return extension
-class FakePort(object):
- """Fake one or more ports."""
-
- @staticmethod
- def create_one_port(attrs=None):
- """Create a fake port.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with id, name, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- port_attrs = {
- 'admin_state_up': True,
- 'allowed_address_pairs': [{}],
- 'binding:host_id': 'binding-host-id-' + uuid.uuid4().hex,
- 'binding:profile': {},
- 'binding:vif_details': {},
- 'binding:vif_type': 'ovs',
- 'binding:vnic_type': 'normal',
- 'data_plane_status': None,
- 'description': 'description-' + uuid.uuid4().hex,
- 'device_id': 'device-id-' + uuid.uuid4().hex,
- 'device_owner': 'compute:nova',
- 'device_profile': 'cyborg_device_profile_1',
- 'dns_assignment': [{}],
- 'dns_domain': 'dns-domain-' + uuid.uuid4().hex,
- 'dns_name': 'dns-name-' + uuid.uuid4().hex,
- 'extra_dhcp_opts': [{}],
- 'fixed_ips': [{'ip_address': '10.0.0.3',
- 'subnet_id': 'subnet-id-' + uuid.uuid4().hex}],
- 'id': 'port-id-' + uuid.uuid4().hex,
- 'mac_address': 'fa:16:3e:a9:4e:72',
- 'name': 'port-name-' + uuid.uuid4().hex,
- 'network_id': 'network-id-' + uuid.uuid4().hex,
- 'numa_affinity_policy': 'required',
- 'port_security_enabled': True,
- 'security_group_ids': [],
- 'status': 'ACTIVE',
- 'project_id': 'project-id-' + uuid.uuid4().hex,
- 'qos_network_policy_id': 'qos-policy-id-' + uuid.uuid4().hex,
- 'qos_policy_id': 'qos-policy-id-' + uuid.uuid4().hex,
- 'tags': [],
- 'propagate_uplink_status': False,
- 'location': 'MUNCHMUNCHMUNCH',
- }
-
- # Overwrite default attributes.
- port_attrs.update(attrs)
-
- port = fakes.FakeResource(info=copy.deepcopy(port_attrs),
- loaded=True)
-
- # Set attributes with special mappings in OpenStack SDK.
- port.binding_host_id = port_attrs['binding:host_id']
- port.binding_profile = port_attrs['binding:profile']
- port.binding_vif_details = port_attrs['binding:vif_details']
- port.binding_vif_type = port_attrs['binding:vif_type']
- port.binding_vnic_type = port_attrs['binding:vnic_type']
- port.is_admin_state_up = port_attrs['admin_state_up']
- port.is_port_security_enabled = port_attrs['port_security_enabled']
-
- return port
-
- @staticmethod
- def create_ports(attrs=None, count=2):
- """Create multiple fake ports.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :param int count:
- The number of ports to fake
- :return:
- A list of FakeResource objects faking the ports
- """
- ports = []
- for i in range(0, count):
- ports.append(FakePort.create_one_port(attrs))
-
- return ports
-
- @staticmethod
- def get_ports(ports=None, count=2):
- """Get an iterable Mock object with a list of faked ports.
-
- If ports list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List ports:
- A list of FakeResource objects faking ports
- :param int count:
- The number of ports to fake
- :return:
- An iterable Mock object with side_effect set to a list of faked
- ports
- """
- if ports is None:
- ports = FakePort.create_ports(count)
- return mock.Mock(side_effect=ports)
-
-
-class FakeNetworkAgent(object):
- """Fake one or more network agents."""
-
- @staticmethod
- def create_one_network_agent(attrs=None):
- """Create a fake network agent
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with id, agent_type, and so on.
- """
- attrs = attrs or {}
-
- # Set default attributes
- agent_attrs = {
- 'id': 'agent-id-' + uuid.uuid4().hex,
- 'agent_type': 'agent-type-' + uuid.uuid4().hex,
- 'host': 'host-' + uuid.uuid4().hex,
- 'availability_zone': 'zone-' + uuid.uuid4().hex,
- 'alive': True,
- 'admin_state_up': True,
- 'binary': 'binary-' + uuid.uuid4().hex,
- 'configurations': {'subnet': 2, 'networks': 1},
- 'location': 'MUNCHMUNCHMUNCH',
- }
- agent_attrs.update(attrs)
- agent = fakes.FakeResource(info=copy.deepcopy(agent_attrs),
- loaded=True)
- agent.is_admin_state_up = agent_attrs['admin_state_up']
- agent.is_alive = agent_attrs['alive']
- return agent
-
- @staticmethod
- def create_network_agents(attrs=None, count=2):
- """Create multiple fake network agents.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :param int count:
- The number of network agents to fake
- :return:
- A list of FakeResource objects faking the network agents
- """
- agents = []
- for i in range(0, count):
- agents.append(FakeNetworkAgent.create_one_network_agent(attrs))
-
- return agents
-
- @staticmethod
- def get_network_agents(agents=None, count=2):
- """Get an iterable Mock object with a list of faked network agents.
-
- If network agents list is provided, then initialize the Mock object
- with the list. Otherwise create one.
-
- :param List agents:
- A list of FakeResource objects faking network agents
- :param int count:
- The number of network agents to fake
- :return:
- An iterable Mock object with side_effect set to a list of faked
- network agents
- """
- if agents is None:
- agents = FakeNetworkAgent.create_network_agents(count)
- return mock.Mock(side_effect=agents)
-
-
-class FakeNetworkRBAC(object):
- """Fake one or more network rbac policies."""
-
- @staticmethod
- def create_one_network_rbac(attrs=None):
- """Create a fake network rbac
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with id, action, target_tenant,
- project_id, type
- """
- attrs = attrs or {}
-
- # Set default attributes
- rbac_attrs = {
- 'id': 'rbac-id-' + uuid.uuid4().hex,
- 'object_type': 'network',
- 'object_id': 'object-id-' + uuid.uuid4().hex,
- 'action': 'access_as_shared',
- 'target_tenant': 'target-tenant-' + uuid.uuid4().hex,
- 'project_id': 'project-id-' + uuid.uuid4().hex,
- 'location': 'MUNCHMUNCHMUNCH',
- }
- rbac_attrs.update(attrs)
- rbac = fakes.FakeResource(info=copy.deepcopy(rbac_attrs),
- loaded=True)
- # Set attributes with special mapping in OpenStack SDK.
- rbac.target_project_id = rbac_attrs['target_tenant']
- return rbac
-
- @staticmethod
- def create_network_rbacs(attrs=None, count=2):
- """Create multiple fake network rbac policies.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :param int count:
- The number of rbac policies to fake
- :return:
- A list of FakeResource objects faking the rbac policies
- """
- rbac_policies = []
- for i in range(0, count):
- rbac_policies.append(FakeNetworkRBAC.
- create_one_network_rbac(attrs))
-
- return rbac_policies
-
- @staticmethod
- def get_network_rbacs(rbac_policies=None, count=2):
- """Get an iterable Mock object with a list of faked rbac policies.
-
- If rbac policies list is provided, then initialize the Mock object
- with the list. Otherwise create one.
-
- :param List rbac_policies:
- A list of FakeResource objects faking rbac policies
- :param int count:
- The number of rbac policies to fake
- :return:
- An iterable Mock object with side_effect set to a list of faked
- rbac policies
- """
- if rbac_policies is None:
- rbac_policies = FakeNetworkRBAC.create_network_rbacs(count)
- return mock.Mock(side_effect=rbac_policies)
-
-
-class FakeNetworkFlavorProfile(object):
- """Fake network flavor profile."""
-
- @staticmethod
- def create_one_service_profile(attrs=None):
- """Create flavor profile."""
- attrs = attrs or {}
-
- flavor_profile_attrs = {
- 'id': 'flavor-profile-id' + uuid.uuid4().hex,
- 'description': 'flavor-profile-description-' + uuid.uuid4().hex,
- 'project_id': 'project-id-' + uuid.uuid4().hex,
- 'driver': 'driver-' + uuid.uuid4().hex,
- 'metainfo': 'metainfo-' + uuid.uuid4().hex,
- 'enabled': True,
- 'location': 'MUNCHMUNCHMUNCH',
- }
-
- flavor_profile_attrs.update(attrs)
-
- flavor_profile = fakes.FakeResource(
- info=copy.deepcopy(flavor_profile_attrs),
- loaded=True)
-
- flavor_profile.is_enabled = flavor_profile_attrs['enabled']
-
- return flavor_profile
-
- @staticmethod
- def create_service_profile(attrs=None, count=2):
- """Create multiple flavor profiles."""
-
- flavor_profiles = []
- for i in range(0, count):
- flavor_profiles.append(FakeNetworkFlavorProfile.
- create_one_service_profile(attrs))
- return flavor_profiles
-
- @staticmethod
- def get_service_profile(flavor_profile=None, count=2):
- """Get a list of flavor profiles."""
- if flavor_profile is None:
- flavor_profile = (FakeNetworkFlavorProfile.
- create_service_profile(count))
- return mock.Mock(side_effect=flavor_profile)
-
-
class FakeNetworkQosPolicy(object):
"""Fake one or more QoS policies."""
@@ -561,6 +278,9 @@ class FakeNetworkQosRule(object):
elif type == RULE_TYPE_MINIMUM_BANDWIDTH:
qos_rule_attrs['min_kbps'] = randint(1, 10000)
qos_rule_attrs['direction'] = 'egress'
+ elif type == RULE_TYPE_MINIMUM_PACKET_RATE:
+ qos_rule_attrs['min_kpps'] = randint(1, 10000)
+ qos_rule_attrs['direction'] = 'egress'
# Overwrite default attributes.
qos_rule_attrs.update(attrs)
@@ -1996,6 +1716,262 @@ def create_network_segment_ranges(attrs=None, count=2):
return network_segment_ranges
+def create_one_port(attrs=None):
+ """Create a fake port.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A Port object, with id, name, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ port_attrs = {
+ 'is_admin_state_up': True,
+ 'allowed_address_pairs': [{}],
+ 'binding:host_id': 'binding-host-id-' + uuid.uuid4().hex,
+ 'binding:profile': {},
+ 'binding:vif_details': {},
+ 'binding:vif_type': 'ovs',
+ 'binding:vnic_type': 'normal',
+ 'data_plane_status': None,
+ 'description': 'description-' + uuid.uuid4().hex,
+ 'device_id': 'device-id-' + uuid.uuid4().hex,
+ 'device_owner': 'compute:nova',
+ 'device_profile': 'cyborg_device_profile_1',
+ 'dns_assignment': [{}],
+ 'dns_domain': 'dns-domain-' + uuid.uuid4().hex,
+ 'dns_name': 'dns-name-' + uuid.uuid4().hex,
+ 'extra_dhcp_opts': [{}],
+ 'fixed_ips': [{'ip_address': '10.0.0.3',
+ 'subnet_id': 'subnet-id-' + uuid.uuid4().hex}],
+ 'id': 'port-id-' + uuid.uuid4().hex,
+ 'mac_address': 'fa:16:3e:a9:4e:72',
+ 'name': 'port-name-' + uuid.uuid4().hex,
+ 'network_id': 'network-id-' + uuid.uuid4().hex,
+ 'numa_affinity_policy': 'required',
+ 'is_port_security_enabled': True,
+ 'security_group_ids': [],
+ 'status': 'ACTIVE',
+ 'project_id': 'project-id-' + uuid.uuid4().hex,
+ 'qos_network_policy_id': 'qos-policy-id-' + uuid.uuid4().hex,
+ 'qos_policy_id': 'qos-policy-id-' + uuid.uuid4().hex,
+ 'tags': [],
+ 'propagate_uplink_status': False,
+ 'location': 'MUNCHMUNCHMUNCH',
+ }
+
+ # Overwrite default attributes.
+ port_attrs.update(attrs)
+
+ port = _port.Port(**port_attrs)
+
+ return port
+
+
+def create_ports(attrs=None, count=2):
+ """Create multiple fake ports.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of ports to fake
+ :return:
+ A list of Port objects faking the ports
+ """
+ ports = []
+ for i in range(0, count):
+ ports.append(create_one_port(attrs))
+
+ return ports
+
+
+def get_ports(ports=None, count=2):
+ """Get an iterable Mock object with a list of faked ports.
+
+ If ports list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List ports:
+ A list of Port objects faking ports
+ :param int count:
+ The number of ports to fake
+ :return:
+ An iterable Mock object with side_effect set to a list of faked
+ ports
+ """
+ if ports is None:
+ ports = create_ports(count)
+ return mock.Mock(side_effect=ports)
+
+
+def create_one_network_agent(attrs=None):
+ """Create a fake network agent
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ An Agent object, with id, agent_type, and so on.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes
+ agent_attrs = {
+ 'id': 'agent-id-' + uuid.uuid4().hex,
+ 'agent_type': 'agent-type-' + uuid.uuid4().hex,
+ 'host': 'host-' + uuid.uuid4().hex,
+ 'availability_zone': 'zone-' + uuid.uuid4().hex,
+ 'alive': True,
+ 'admin_state_up': True,
+ 'binary': 'binary-' + uuid.uuid4().hex,
+ 'configurations': {'subnet': 2, 'networks': 1},
+ 'location': 'MUNCHMUNCHMUNCH',
+ }
+ agent_attrs.update(attrs)
+ agent = network_agent.Agent(**agent_attrs)
+
+ return agent
+
+
+def create_network_agents(attrs=None, count=2):
+ """Create multiple fake network agents.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of network agents to fake
+ :return:
+ A list of Agent objects faking the network agents
+ """
+ agents = []
+ for i in range(0, count):
+ agents.append(create_one_network_agent(attrs))
+
+ return agents
+
+
+def get_network_agents(agents=None, count=2):
+ """Get an iterable Mock object with a list of faked network agents.
+
+ If network agents list is provided, then initialize the Mock object
+ with the list. Otherwise create one.
+
+ :param List agents:
+ A list of Agent objects faking network agents
+ :param int count:
+ The number of network agents to fake
+ :return:
+ An iterable Mock object with side_effect set to a list of faked
+ network agents
+ """
+ if agents is None:
+ agents = create_network_agents(count)
+ return mock.Mock(side_effect=agents)
+
+
+def create_one_network_rbac(attrs=None):
+ """Create a fake network rbac
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A RBACPolicy object, with id, action, target_tenant,
+ project_id, type
+ """
+ attrs = attrs or {}
+
+ # Set default attributes
+ rbac_attrs = {
+ 'id': 'rbac-id-' + uuid.uuid4().hex,
+ 'object_type': 'network',
+ 'object_id': 'object-id-' + uuid.uuid4().hex,
+ 'action': 'access_as_shared',
+ 'target_tenant': 'target-tenant-' + uuid.uuid4().hex,
+ 'project_id': 'project-id-' + uuid.uuid4().hex,
+ 'location': 'MUNCHMUNCHMUNCH',
+ }
+
+ rbac_attrs.update(attrs)
+ rbac = network_rbac.RBACPolicy(**rbac_attrs)
+
+ return rbac
+
+
+def create_network_rbacs(attrs=None, count=2):
+ """Create multiple fake network rbac policies.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of rbac policies to fake
+ :return:
+ A list of RBACPolicy objects faking the rbac policies
+ """
+ rbac_policies = []
+ for i in range(0, count):
+ rbac_policies.append(create_one_network_rbac(attrs))
+
+ return rbac_policies
+
+
+def get_network_rbacs(rbac_policies=None, count=2):
+ """Get an iterable Mock object with a list of faked rbac policies.
+
+ If rbac policies list is provided, then initialize the Mock object
+ with the list. Otherwise create one.
+
+ :param List rbac_policies:
+ A list of RBACPolicy objects faking rbac policies
+ :param int count:
+ The number of rbac policies to fake
+ :return:
+ An iterable Mock object with side_effect set to a list of faked
+ rbac policies
+ """
+ if rbac_policies is None:
+ rbac_policies = create_network_rbacs(count)
+ return mock.Mock(side_effect=rbac_policies)
+
+
+def create_one_service_profile(attrs=None):
+ """Create flavor profile."""
+ attrs = attrs or {}
+
+ flavor_profile_attrs = {
+ 'id': 'flavor-profile-id' + uuid.uuid4().hex,
+ 'description': 'flavor-profile-description-' + uuid.uuid4().hex,
+ 'project_id': 'project-id-' + uuid.uuid4().hex,
+ 'driver': 'driver-' + uuid.uuid4().hex,
+ 'metainfo': 'metainfo-' + uuid.uuid4().hex,
+ 'enabled': True,
+ 'location': 'MUNCHMUNCHMUNCH',
+ }
+
+ flavor_profile_attrs.update(attrs)
+
+ flavor_profile = _flavor_profile.ServiceProfile(**flavor_profile_attrs)
+
+ return flavor_profile
+
+
+def create_service_profile(attrs=None, count=2):
+ """Create multiple flavor profiles."""
+
+ flavor_profiles = []
+ for i in range(0, count):
+ flavor_profiles.append(create_one_service_profile(attrs))
+ return flavor_profiles
+
+
+def get_service_profile(flavor_profile=None, count=2):
+ """Get a list of flavor profiles."""
+ if flavor_profile is None:
+ flavor_profile = create_service_profile(count)
+
+ return mock.Mock(side_effect=flavor_profile)
+
+
def create_one_local_ip(attrs=None):
"""Create a fake local ip.
@@ -2129,3 +2105,143 @@ def get_local_ip_associations(local_ip_associations=None, count=2):
local_ip_associations = create_local_ip_associations(count)
return mock.Mock(side_effect=local_ip_associations)
+
+
+def create_one_ndp_proxy(attrs=None):
+ """Create a fake NDP proxy.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with router_id, port_id, etc.
+ """
+ attrs = attrs or {}
+ router_id = (
+ attrs.get('router_id') or 'router-id-' + uuid.uuid4().hex
+ )
+ port_id = (
+ attrs.get('port_id') or 'port-id-' + uuid.uuid4().hex
+ )
+ # Set default attributes.
+ np_attrs = {
+ 'id': uuid.uuid4().hex,
+ 'name': 'ndp-proxy-name-' + uuid.uuid4().hex,
+ 'router_id': router_id,
+ 'port_id': port_id,
+ 'ip_address': '2001::1:2',
+ 'description': 'ndp-proxy-description-' + uuid.uuid4().hex,
+ 'project_id': 'project-id-' + uuid.uuid4().hex,
+ 'location': 'MUNCHMUNCHMUNCH',
+ }
+
+ # Overwrite default attributes.
+ np_attrs.update(attrs)
+
+ return _ndp_proxy.NDPProxy(**np_attrs)
+
+
+def create_ndp_proxies(attrs=None, count=2):
+ """Create multiple fake NDP proxies.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of NDP proxxy to fake
+ :return:
+ A list of FakeResource objects faking the NDP proxies
+ """
+ ndp_proxies = []
+ for i in range(0, count):
+ ndp_proxies.append(
+ create_one_ndp_proxy(attrs)
+ )
+ return ndp_proxies
+
+
+def get_ndp_proxies(ndp_proxies=None, count=2):
+ """Get a list of faked NDP proxies.
+
+ If ndp_proxy list is provided, then initialize the Mock object
+ with the list. Otherwise create one.
+
+ :param List ndp_proxies:
+ A list of FakeResource objects faking ndp proxy
+ :param int count:
+ The number of ndp proxy to fake
+ :return:
+ An iterable Mock object with side_effect set to a list of faked
+ ndp proxy
+ """
+ if ndp_proxies is None:
+ ndp_proxies = (
+ create_ndp_proxies(count)
+ )
+ return mock.Mock(side_effect=ndp_proxies)
+
+
+def create_one_trunk(attrs=None):
+ """Create a fake trunk.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with name, id, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ trunk_attrs = {
+ 'id': 'trunk-id-' + uuid.uuid4().hex,
+ 'name': 'trunk-name-' + uuid.uuid4().hex,
+ 'description': '',
+ 'port_id': 'port-' + uuid.uuid4().hex,
+ 'admin_state_up': True,
+ 'project_id': 'project-id-' + uuid.uuid4().hex,
+ 'status': 'ACTIVE',
+ 'sub_ports': [{'port_id': 'subport-' +
+ uuid.uuid4().hex,
+ 'segmentation_type': 'vlan',
+ 'segmentation_id': 100}],
+ }
+ # Overwrite default attributes.
+ trunk_attrs.update(attrs)
+
+ trunk = _trunk.Trunk(**trunk_attrs)
+
+ return trunk
+
+
+def create_trunks(attrs=None, count=2):
+ """Create multiple fake trunks.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of trunks to fake
+ :return:
+ A list of FakeResource objects faking the trunks
+ """
+ trunks = []
+ for i in range(0, count):
+ trunks.append(create_one_trunk(attrs))
+
+ return trunks
+
+
+def get_trunks(trunks=None, count=2):
+ """Get an iterable Mock object with a list of faked trunks.
+
+ If trunk list is provided, then initialize the Mock object
+ with the list. Otherwise create one.
+
+ :param List trunks:
+ A list of FakeResource objects faking trunks
+ :param int count:
+ The number of trunks to fake
+ :return:
+ An iterable Mock object with side_effect set to a list of faked
+ trunks
+ """
+ if trunks is None:
+ trunks = create_trunks(count)
+ return mock.Mock(side_effect=trunks)
diff --git a/openstackclient/tests/unit/network/v2/test_floating_ip_network.py b/openstackclient/tests/unit/network/v2/test_floating_ip_network.py
index a850045d..f76dcc79 100644
--- a/openstackclient/tests/unit/network/v2/test_floating_ip_network.py
+++ b/openstackclient/tests/unit/network/v2/test_floating_ip_network.py
@@ -42,7 +42,7 @@ class TestCreateFloatingIPNetwork(TestFloatingIPNetwork):
# Fake data for option tests.
floating_network = network_fakes.create_one_network()
subnet = network_fakes.FakeSubnet.create_one_subnet()
- port = network_fakes.FakePort.create_one_port()
+ port = network_fakes.create_one_port()
# The floating ip created.
floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip(
@@ -381,7 +381,7 @@ class TestListFloatingIPNetwork(TestFloatingIPNetwork):
fake_network = network_fakes.create_one_network({
'id': 'fake_network_id',
})
- fake_port = network_fakes.FakePort.create_one_port({
+ fake_port = network_fakes.create_one_port({
'id': 'fake_port_id',
})
fake_router = network_fakes.FakeRouter.create_one_router({
@@ -702,7 +702,7 @@ class TestSetFloatingIP(TestFloatingIPNetwork):
# Fake data for option tests.
floating_network = network_fakes.create_one_network()
subnet = network_fakes.FakeSubnet.create_one_subnet()
- port = network_fakes.FakePort.create_one_port()
+ port = network_fakes.create_one_port()
# The floating ip to be set.
floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip(
@@ -934,7 +934,7 @@ class TestUnsetFloatingIP(TestFloatingIPNetwork):
floating_network = network_fakes.create_one_network()
subnet = network_fakes.FakeSubnet.create_one_subnet()
- port = network_fakes.FakePort.create_one_port()
+ port = network_fakes.create_one_port()
# The floating ip to be unset.
floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip(
diff --git a/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py b/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py
index 4c82fd17..d0f5af8c 100644
--- a/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py
+++ b/openstackclient/tests/unit/network/v2/test_floating_ip_port_forwarding.py
@@ -31,7 +31,7 @@ class TestFloatingIPPortForwarding(network_fakes.TestNetworkV2):
self.network = self.app.client_manager.network
self.floating_ip = (network_fakes.FakeFloatingIP.
create_one_floating_ip())
- self.port = network_fakes.FakePort.create_one_port()
+ self.port = network_fakes.create_one_port()
self.project = identity_fakes_v2.FakeProject.create_one_project()
self.network.find_port = mock.Mock(return_value=self.port)
diff --git a/openstackclient/tests/unit/network/v2/test_local_ip.py b/openstackclient/tests/unit/network/v2/test_local_ip.py
index 38e352f3..be23365e 100644
--- a/openstackclient/tests/unit/network/v2/test_local_ip.py
+++ b/openstackclient/tests/unit/network/v2/test_local_ip.py
@@ -41,7 +41,7 @@ class TestCreateLocalIP(TestLocalIP):
project = identity_fakes_v3.FakeProject.create_one_project()
domain = identity_fakes_v3.FakeDomain.create_one_domain()
local_ip_network = network_fakes.create_one_network()
- port = network_fakes.FakePort.create_one_port()
+ port = network_fakes.create_one_port()
# The new local ip created.
new_local_ip = network_fakes.create_one_local_ip(
attrs={'project_id': project.id,
@@ -96,7 +96,7 @@ class TestCreateLocalIP(TestLocalIP):
self.network.create_local_ip.assert_called_once_with(**{})
self.assertEqual(set(self.columns), set(columns))
- self.assertItemsEqual(self.data, data)
+ self.assertCountEqual(self.data, data)
def test_create_all_options(self):
arglist = [
@@ -130,7 +130,7 @@ class TestCreateLocalIP(TestLocalIP):
'ip_mode': self.new_local_ip.ip_mode,
})
self.assertEqual(set(self.columns), set(columns))
- self.assertItemsEqual(self.data, data)
+ self.assertCountEqual(self.data, data)
class TestDeleteLocalIP(TestLocalIP):
@@ -263,7 +263,7 @@ class TestListLocalIP(TestLocalIP):
self.network.local_ips.assert_called_once_with(**{})
self.assertEqual(self.columns, columns)
- self.assertItemsEqual(self.data, list(data))
+ self.assertCountEqual(self.data, list(data))
def test_local_ip_list_name(self):
arglist = [
@@ -278,7 +278,7 @@ class TestListLocalIP(TestLocalIP):
self.network.local_ips.assert_called_once_with(
**{'name': self.local_ips[0].name})
self.assertEqual(self.columns, columns)
- self.assertItemsEqual(self.data, list(data))
+ self.assertCountEqual(self.data, list(data))
def test_local_ip_list_project(self):
project = identity_fakes_v3.FakeProject.create_one_project()
@@ -295,7 +295,7 @@ class TestListLocalIP(TestLocalIP):
self.network.local_ips.assert_called_once_with(
**{'project_id': project.id})
self.assertEqual(self.columns, columns)
- self.assertItemsEqual(self.data, list(data))
+ self.assertCountEqual(self.data, list(data))
def test_local_ip_project_domain(self):
project = identity_fakes_v3.FakeProject.create_one_project()
@@ -314,7 +314,7 @@ class TestListLocalIP(TestLocalIP):
self.network.local_ips.assert_called_once_with(**filters)
self.assertEqual(self.columns, columns)
- self.assertItemsEqual(self.data, list(data))
+ self.assertCountEqual(self.data, list(data))
def test_local_ip_list_network(self):
arglist = [
@@ -477,4 +477,4 @@ class TestShowLocalIP(TestLocalIP):
self.network.find_local_ip.assert_called_once_with(
self._local_ip.name, ignore_missing=False)
self.assertEqual(set(self.columns), set(columns))
- self.assertItemsEqual(self.data, list(data))
+ self.assertCountEqual(self.data, list(data))
diff --git a/openstackclient/tests/unit/network/v2/test_local_ip_association.py b/openstackclient/tests/unit/network/v2/test_local_ip_association.py
index 97759302..0e453741 100644
--- a/openstackclient/tests/unit/network/v2/test_local_ip_association.py
+++ b/openstackclient/tests/unit/network/v2/test_local_ip_association.py
@@ -29,7 +29,7 @@ class TestLocalIPAssociation(network_fakes.TestNetworkV2):
super().setUp()
self.network = self.app.client_manager.network
self.local_ip = network_fakes.create_one_local_ip()
- self.fixed_port = network_fakes.FakePort.create_one_port()
+ self.fixed_port = network_fakes.create_one_port()
self.project = identity_fakes_v2.FakeProject.create_one_project()
self.network.find_port = mock.Mock(return_value=self.fixed_port)
diff --git a/openstackclient/tests/unit/network/v2/test_ndp_proxy.py b/openstackclient/tests/unit/network/v2/test_ndp_proxy.py
new file mode 100644
index 00000000..48c5deb2
--- /dev/null
+++ b/openstackclient/tests/unit/network/v2/test_ndp_proxy.py
@@ -0,0 +1,454 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from unittest import mock
+from unittest.mock import call
+
+from osc_lib import exceptions
+
+from openstackclient.network.v2 import ndp_proxy
+from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3
+from openstackclient.tests.unit.network.v2 import fakes as network_fakes
+from openstackclient.tests.unit import utils as tests_utils
+
+
+class TestNDPProxy(network_fakes.TestNetworkV2):
+
+ def setUp(self):
+ super(TestNDPProxy, self).setUp()
+ # Get a shortcut to the ProjectManager Mock
+ self.projects_mock = self.app.client_manager.identity.projects
+ # Get a shortcut to the DomainManager Mock
+ self.domains_mock = self.app.client_manager.identity.domains
+ # Get a shortcut to the network client
+ self.network = self.app.client_manager.network
+ self.router = network_fakes.FakeRouter.create_one_router(
+ {'id': 'fake-router-id'})
+ self.network.find_router = mock.Mock(return_value=self.router)
+ self.port = network_fakes.create_one_port()
+ self.network.find_port = mock.Mock(return_value=self.port)
+
+
+class TestCreateNDPProxy(TestNDPProxy):
+ def setUp(self):
+ super(TestCreateNDPProxy, self).setUp()
+ attrs = {'router_id': self.router.id, 'port_id': self.port.id}
+ self.ndp_proxy = (
+ network_fakes.create_one_ndp_proxy(
+ attrs))
+ self.columns = (
+ 'created_at',
+ 'description',
+ 'id',
+ 'ip_address',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'revision_number',
+ 'router_id',
+ 'updated_at')
+
+ self.data = (
+ self.ndp_proxy.created_at,
+ self.ndp_proxy.description,
+ self.ndp_proxy.id,
+ self.ndp_proxy.ip_address,
+ self.ndp_proxy.name,
+ self.ndp_proxy.port_id,
+ self.ndp_proxy.project_id,
+ self.ndp_proxy.revision_number,
+ self.ndp_proxy.router_id,
+ self.ndp_proxy.updated_at
+ )
+ self.network.create_ndp_proxy = mock.Mock(
+ return_value=self.ndp_proxy)
+
+ # Get the command object to test
+ self.cmd = ndp_proxy.CreateNDPProxy(self.app, self.namespace)
+
+ def test_create_no_options(self):
+ arglist = []
+ verifylist = []
+
+ # Missing required args should bail here
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_create_all_options(self):
+ arglist = [
+ self.ndp_proxy.router_id,
+ '--name', self.ndp_proxy.name,
+ '--port', self.ndp_proxy.port_id,
+ '--ip-address', self.ndp_proxy.ip_address,
+ '--description', self.ndp_proxy.description,
+ ]
+ verifylist = [
+ ('name', self.ndp_proxy.name),
+ ('router', self.ndp_proxy.router_id),
+ ('port', self.ndp_proxy.port_id),
+ ('ip_address', self.ndp_proxy.ip_address),
+ ('description', self.ndp_proxy.description),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.create_ndp_proxy.assert_called_once_with(
+ **{'name': self.ndp_proxy.name,
+ 'router_id': self.ndp_proxy.router_id,
+ 'ip_address': self.ndp_proxy.ip_address,
+ 'port_id': self.ndp_proxy.port_id,
+ 'description': self.ndp_proxy.description})
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+
+class TestDeleteNDPProxy(TestNDPProxy):
+
+ def setUp(self):
+ super(TestDeleteNDPProxy, self).setUp()
+ attrs = {'router_id': self.router.id, 'port_id': self.port.id}
+ self.ndp_proxies = (
+ network_fakes.create_ndp_proxies(attrs))
+ self.ndp_proxy = self.ndp_proxies[0]
+ self.network.delete_ndp_proxy = mock.Mock(
+ return_value=None)
+ self.network.find_ndp_proxy = mock.Mock(
+ return_value=self.ndp_proxy)
+
+ # Get the command object to test
+ self.cmd = ndp_proxy.DeleteNDPProxy(self.app, self.namespace)
+
+ def test_delete(self):
+ arglist = [
+ self.ndp_proxy.id
+ ]
+ verifylist = [
+ ('ndp_proxy', [self.ndp_proxy.id])
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+ self.network.delete_ndp_proxy.assert_called_once_with(self.ndp_proxy)
+ self.assertIsNone(result)
+
+ def test_delete_error(self):
+ arglist = [
+ self.ndp_proxy.id,
+ ]
+ verifylist = [
+ ('ndp_proxy', [self.ndp_proxy.id])
+ ]
+ self.network.delete_ndp_proxy.side_effect = Exception(
+ 'Error message')
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action, parsed_args)
+
+ def test_multi_ndp_proxies_delete(self):
+ arglist = []
+ np_id = []
+
+ for a in self.ndp_proxies:
+ arglist.append(a.id)
+ np_id.append(a.id)
+
+ verifylist = [
+ ('ndp_proxy', np_id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.delete_ndp_proxy.assert_has_calls(
+ [call(self.ndp_proxy), call(self.ndp_proxy)])
+ self.assertIsNone(result)
+
+
+class TestListNDPProxy(TestNDPProxy):
+
+ def setUp(self):
+ super(TestListNDPProxy, self).setUp()
+ attrs = {'router_id': self.router.id, 'port_id': self.port.id}
+ ndp_proxies = (
+ network_fakes.create_ndp_proxies(attrs, count=3))
+ self.columns = (
+ 'ID',
+ 'Name',
+ 'Router ID',
+ 'IP Address',
+ 'Project',
+ )
+ self.data = []
+ for np in ndp_proxies:
+ self.data.append((
+ np.id,
+ np.name,
+ np.router_id,
+ np.ip_address,
+ np.project_id,
+ ))
+
+ self.network.ndp_proxies = mock.Mock(
+ return_value=ndp_proxies)
+
+ # Get the command object to test
+ self.cmd = ndp_proxy.ListNDPProxy(self.app, self.namespace)
+
+ def test_ndp_proxy_list(self):
+ arglist = []
+ verifylist = []
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.ndp_proxies.assert_called_once_with()
+ self.assertEqual(self.columns, columns)
+ list_data = list(data)
+ self.assertEqual(len(self.data), len(list_data))
+ for index in range(len(list_data)):
+ self.assertEqual(self.data[index], list_data[index])
+
+ def test_ndp_proxy_list_router(self):
+ arglist = [
+ '--router', 'fake-router-name',
+ ]
+
+ verifylist = [
+ ('router', 'fake-router-name')
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.ndp_proxies.assert_called_once_with(**{
+ 'router_id': 'fake-router-id'})
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, list(data))
+
+ def test_ndp_proxy_list_port(self):
+ arglist = [
+ '--port', self.port.id,
+ ]
+
+ verifylist = [
+ ('port', self.port.id)
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.ndp_proxies.assert_called_once_with(**{
+ 'port_id': self.port.id})
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, list(data))
+
+ def test_ndp_proxy_list_name(self):
+ arglist = [
+ '--name', 'fake-ndp-proxy-name',
+ ]
+
+ verifylist = [
+ ('name', 'fake-ndp-proxy-name')
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.ndp_proxies.assert_called_once_with(**{
+ 'name': 'fake-ndp-proxy-name'})
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, list(data))
+
+ def test_ndp_proxy_list_ip_address(self):
+ arglist = [
+ '--ip-address', '2001::1:2',
+ ]
+
+ verifylist = [
+ ('ip_address', '2001::1:2')
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.ndp_proxies.assert_called_once_with(**{
+ 'ip_address': '2001::1:2'})
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, list(data))
+
+ def test_ndp_proxy_list_project(self):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ self.projects_mock.get.return_value = project
+ arglist = [
+ '--project', project.id,
+ ]
+ verifylist = [
+ ('project', project.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.ndp_proxies.assert_called_once_with(
+ **{'project_id': project.id})
+ self.assertEqual(self.columns, columns)
+ self.assertItemsEqual(self.data, list(data))
+
+ def test_ndp_proxy_list_project_domain(self):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ self.projects_mock.get.return_value = project
+ arglist = [
+ '--project', project.id,
+ '--project-domain', project.domain_id,
+ ]
+ verifylist = [
+ ('project', project.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+ filters = {'project_id': project.id}
+
+ self.network.ndp_proxies.assert_called_once_with(**filters)
+ self.assertEqual(self.columns, columns)
+ self.assertItemsEqual(self.data, list(data))
+
+
+class TestSetNDPProxy(TestNDPProxy):
+
+ def setUp(self):
+ super(TestSetNDPProxy, self).setUp()
+ attrs = {'router_id': self.router.id, 'port_id': self.port.id}
+ self.ndp_proxy = (
+ network_fakes.create_one_ndp_proxy(attrs))
+ self.network.update_ndp_proxy = mock.Mock(return_value=None)
+ self.network.find_ndp_proxy = mock.Mock(
+ return_value=self.ndp_proxy)
+
+ # Get the command object to test
+ self.cmd = ndp_proxy.SetNDPProxy(self.app, self.namespace)
+
+ def test_set_nothing(self):
+ arglist = [
+ self.ndp_proxy.id,
+ ]
+ verifylist = [
+ ('ndp_proxy', self.ndp_proxy.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = (self.cmd.take_action(parsed_args))
+
+ self.network.update_ndp_proxy.assert_called_once_with(
+ self.ndp_proxy)
+ self.assertIsNone(result)
+
+ def test_set_name(self):
+ arglist = [
+ self.ndp_proxy.id,
+ '--name', 'fake-name',
+ ]
+ verifylist = [
+ ('ndp_proxy', self.ndp_proxy.id),
+ ('name', 'fake-name'),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = (self.cmd.take_action(parsed_args))
+
+ self.network.update_ndp_proxy.assert_called_once_with(
+ self.ndp_proxy, name='fake-name')
+ self.assertIsNone(result)
+
+ def test_set_description(self):
+ arglist = [
+ self.ndp_proxy.id,
+ '--description', 'balala',
+ ]
+ verifylist = [
+ ('ndp_proxy', self.ndp_proxy.id),
+ ('description', 'balala'),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = (self.cmd.take_action(parsed_args))
+
+ self.network.update_ndp_proxy.assert_called_once_with(
+ self.ndp_proxy, description='balala')
+ self.assertIsNone(result)
+
+
+class TestShowNDPProxy(TestNDPProxy):
+
+ def setUp(self):
+ super(TestShowNDPProxy, self).setUp()
+ attrs = {'router_id': self.router.id, 'port_id': self.port.id}
+ self.ndp_proxy = (
+ network_fakes.create_one_ndp_proxy(attrs))
+
+ self.columns = (
+ 'created_at',
+ 'description',
+ 'id',
+ 'ip_address',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'revision_number',
+ 'router_id',
+ 'updated_at')
+
+ self.data = (
+ self.ndp_proxy.created_at,
+ self.ndp_proxy.description,
+ self.ndp_proxy.id,
+ self.ndp_proxy.ip_address,
+ self.ndp_proxy.name,
+ self.ndp_proxy.port_id,
+ self.ndp_proxy.project_id,
+ self.ndp_proxy.revision_number,
+ self.ndp_proxy.router_id,
+ self.ndp_proxy.updated_at
+ )
+ self.network.get_ndp_proxy = mock.Mock(return_value=self.ndp_proxy)
+ self.network.find_ndp_proxy = mock.Mock(return_value=self.ndp_proxy)
+
+ # Get the command object to test
+ self.cmd = ndp_proxy.ShowNDPProxy(self.app, self.namespace)
+
+ def test_show_no_options(self):
+ arglist = []
+ verifylist = []
+
+ # Missing required args should bail here
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_show_default_options(self):
+ arglist = [
+ self.ndp_proxy.id,
+ ]
+ verifylist = [
+ ('ndp_proxy', self.ndp_proxy.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.find_ndp_proxy.assert_called_once_with(
+ self.ndp_proxy.id, ignore_missing=False)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
diff --git a/openstackclient/tests/unit/network/v2/test_network.py b/openstackclient/tests/unit/network/v2/test_network.py
index 8edfbef1..6adb9e16 100644
--- a/openstackclient/tests/unit/network/v2/test_network.py
+++ b/openstackclient/tests/unit/network/v2/test_network.py
@@ -568,7 +568,7 @@ class TestListNetwork(TestNetwork):
self.network.networks = mock.Mock(return_value=self._network)
self._agent = \
- network_fakes.FakeNetworkAgent.create_one_network_agent()
+ network_fakes.create_one_network_agent()
self.network.get_agent = mock.Mock(return_value=self._agent)
self.network.dhcp_agent_hosting_networks = mock.Mock(
@@ -859,13 +859,12 @@ class TestListNetwork(TestNetwork):
('agent_id', self._agent.id),
]
- attrs = {self._agent, }
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.dhcp_agent_hosting_networks.assert_called_once_with(
- *attrs)
+ self._agent)
self.assertEqual(self.columns, columns)
self.assertCountEqual(list(data), list(self.data))
diff --git a/openstackclient/tests/unit/network/v2/test_network_agent.py b/openstackclient/tests/unit/network/v2/test_network_agent.py
index 6d3d882b..15c4c5de 100644
--- a/openstackclient/tests/unit/network/v2/test_network_agent.py
+++ b/openstackclient/tests/unit/network/v2/test_network_agent.py
@@ -34,7 +34,7 @@ class TestNetworkAgent(network_fakes.TestNetworkV2):
class TestAddNetworkToAgent(TestNetworkAgent):
net = network_fakes.create_one_network()
- agent = network_fakes.FakeNetworkAgent.create_one_network_agent()
+ agent = network_fakes.create_one_network_agent()
def setUp(self):
super(TestAddNetworkToAgent, self).setUp()
@@ -76,7 +76,7 @@ class TestAddNetworkToAgent(TestNetworkAgent):
class TestAddRouterAgent(TestNetworkAgent):
_router = network_fakes.FakeRouter.create_one_router()
- _agent = network_fakes.FakeNetworkAgent.create_one_network_agent()
+ _agent = network_fakes.create_one_network_agent()
def setUp(self):
super(TestAddRouterAgent, self).setUp()
@@ -115,8 +115,7 @@ class TestAddRouterAgent(TestNetworkAgent):
class TestDeleteNetworkAgent(TestNetworkAgent):
- network_agents = (
- network_fakes.FakeNetworkAgent.create_network_agents(count=2))
+ network_agents = network_fakes.create_network_agents(count=2)
def setUp(self):
super(TestDeleteNetworkAgent, self).setUp()
@@ -188,8 +187,7 @@ class TestDeleteNetworkAgent(TestNetworkAgent):
class TestListNetworkAgent(TestNetworkAgent):
- network_agents = (
- network_fakes.FakeNetworkAgent.create_network_agents(count=3))
+ network_agents = network_fakes.create_network_agents(count=3)
columns = (
'ID',
@@ -207,8 +205,8 @@ class TestListNetworkAgent(TestNetworkAgent):
agent.agent_type,
agent.host,
agent.availability_zone,
- network_agent.AliveColumn(agent.alive),
- network_agent.AdminStateColumn(agent.admin_state_up),
+ network_agent.AliveColumn(agent.is_alive),
+ network_agent.AdminStateColumn(agent.is_admin_state_up),
agent.binary,
))
@@ -217,8 +215,7 @@ class TestListNetworkAgent(TestNetworkAgent):
self.network.agents = mock.Mock(
return_value=self.network_agents)
- _testagent = \
- network_fakes.FakeNetworkAgent.create_one_network_agent()
+ _testagent = network_fakes.create_one_network_agent()
self.network.get_agent = mock.Mock(return_value=_testagent)
self._testnetwork = network_fakes.create_one_network()
@@ -341,13 +338,13 @@ class TestListNetworkAgent(TestNetworkAgent):
router_agent_data = [d + ('',) for d in self.data]
self.assertEqual(router_agent_columns, columns)
- self.assertCountEqual(router_agent_data, list(data))
+ self.assertEqual(len(router_agent_data), len(list(data)))
class TestRemoveNetworkFromAgent(TestNetworkAgent):
net = network_fakes.create_one_network()
- agent = network_fakes.FakeNetworkAgent.create_one_network_agent()
+ agent = network_fakes.create_one_network_agent()
def setUp(self):
super(TestRemoveNetworkFromAgent, self).setUp()
@@ -398,7 +395,7 @@ class TestRemoveNetworkFromAgent(TestNetworkAgent):
class TestRemoveRouterAgent(TestNetworkAgent):
_router = network_fakes.FakeRouter.create_one_router()
- _agent = network_fakes.FakeNetworkAgent.create_one_network_agent()
+ _agent = network_fakes.create_one_network_agent()
def setUp(self):
super(TestRemoveRouterAgent, self).setUp()
@@ -438,8 +435,7 @@ class TestRemoveRouterAgent(TestNetworkAgent):
class TestSetNetworkAgent(TestNetworkAgent):
- _network_agent = (
- network_fakes.FakeNetworkAgent.create_one_network_agent())
+ _network_agent = network_fakes.create_one_network_agent()
def setUp(self):
super(TestSetNetworkAgent, self).setUp()
@@ -515,8 +511,7 @@ class TestSetNetworkAgent(TestNetworkAgent):
class TestShowNetworkAgent(TestNetworkAgent):
- _network_agent = (
- network_fakes.FakeNetworkAgent.create_one_network_agent())
+ _network_agent = network_fakes.create_one_network_agent()
columns = (
'admin_state_up',
@@ -524,19 +519,33 @@ class TestShowNetworkAgent(TestNetworkAgent):
'alive',
'availability_zone',
'binary',
- 'configurations',
+ 'configuration',
+ 'created_at',
+ 'description',
'host',
+ 'ha_state',
'id',
+ 'last_heartbeat_at',
+ 'resources_synced',
+ 'started_at',
+ 'topic',
)
data = (
- network_agent.AdminStateColumn(_network_agent.admin_state_up),
+ network_agent.AdminStateColumn(_network_agent.is_admin_state_up),
_network_agent.agent_type,
network_agent.AliveColumn(_network_agent.is_alive),
_network_agent.availability_zone,
_network_agent.binary,
- format_columns.DictColumn(_network_agent.configurations),
+ format_columns.DictColumn(_network_agent.configuration),
+ _network_agent.created_at,
+ _network_agent.description,
+ _network_agent.ha_state,
_network_agent.host,
_network_agent.id,
+ _network_agent.last_heartbeat_at,
+ _network_agent.resources_synced,
+ _network_agent.started_at,
+ _network_agent.topic,
)
def setUp(self):
@@ -568,5 +577,5 @@ class TestShowNetworkAgent(TestNetworkAgent):
self.network.get_agent.assert_called_once_with(
self._network_agent.id)
- self.assertEqual(self.columns, columns)
- self.assertCountEqual(list(self.data), list(data))
+ self.assertEqual(set(self.columns), set(columns))
+ self.assertEqual(len(list(self.data)), len(list(data)))
diff --git a/openstackclient/tests/unit/network/v2/test_network_flavor.py b/openstackclient/tests/unit/network/v2/test_network_flavor.py
index 11364107..3149def6 100644
--- a/openstackclient/tests/unit/network/v2/test_network_flavor.py
+++ b/openstackclient/tests/unit/network/v2/test_network_flavor.py
@@ -40,8 +40,7 @@ class TestNetworkFlavor(network_fakes.TestNetworkV2):
class TestAddNetworkFlavorToProfile(TestNetworkFlavor):
network_flavor = network_fakes.create_one_network_flavor()
- service_profile = \
- network_fakes.FakeNetworkFlavorProfile.create_one_service_profile()
+ service_profile = network_fakes.create_one_service_profile()
def setUp(self):
super(TestAddNetworkFlavorToProfile, self).setUp()
@@ -320,8 +319,7 @@ class TestListNetworkFlavor(TestNetworkFlavor):
class TestRemoveNetworkFlavorFromProfile(TestNetworkFlavor):
network_flavor = network_fakes.create_one_network_flavor()
- service_profile = \
- network_fakes.FakeNetworkFlavorProfile.create_one_service_profile()
+ service_profile = network_fakes.create_one_service_profile()
def setUp(self):
super(TestRemoveNetworkFlavorFromProfile, self).setUp()
diff --git a/openstackclient/tests/unit/network/v2/test_network_flavor_profile.py b/openstackclient/tests/unit/network/v2/test_network_flavor_profile.py
index 1cbe30ba..5c2b9e2d 100644
--- a/openstackclient/tests/unit/network/v2/test_network_flavor_profile.py
+++ b/openstackclient/tests/unit/network/v2/test_network_flavor_profile.py
@@ -34,25 +34,23 @@ class TestFlavorProfile(network_fakes.TestNetworkV2):
class TestCreateFlavorProfile(TestFlavorProfile):
project = identity_fakes_v3.FakeProject.create_one_project()
domain = identity_fakes_v3.FakeDomain.create_one_domain()
- new_flavor_profile = (
- network_fakes.FakeNetworkFlavorProfile.
- create_one_service_profile()
- )
+ new_flavor_profile = network_fakes.create_one_service_profile()
+
columns = (
'description',
'driver',
'enabled',
'id',
- 'metainfo',
+ 'meta_info',
'project_id',
)
data = (
new_flavor_profile.description,
new_flavor_profile.driver,
- new_flavor_profile.enabled,
+ new_flavor_profile.is_enabled,
new_flavor_profile.id,
- new_flavor_profile.metainfo,
+ new_flavor_profile.meta_info,
new_flavor_profile.project_id,
)
@@ -72,7 +70,7 @@ class TestCreateFlavorProfile(TestFlavorProfile):
'--project-domain', self.domain.name,
"--enable",
"--driver", self.new_flavor_profile.driver,
- "--metainfo", self.new_flavor_profile.metainfo,
+ "--metainfo", self.new_flavor_profile.meta_info,
]
verifylist = [
@@ -81,7 +79,7 @@ class TestCreateFlavorProfile(TestFlavorProfile):
('project_domain', self.domain.name),
('enable', True),
('driver', self.new_flavor_profile.driver),
- ('metainfo', self.new_flavor_profile.metainfo)
+ ('metainfo', self.new_flavor_profile.meta_info)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -90,9 +88,9 @@ class TestCreateFlavorProfile(TestFlavorProfile):
self.network.create_service_profile.assert_called_once_with(
**{'description': self.new_flavor_profile.description,
'project_id': self.project.id,
- 'enabled': self.new_flavor_profile.enabled,
+ 'enabled': self.new_flavor_profile.is_enabled,
'driver': self.new_flavor_profile.driver,
- 'metainfo': self.new_flavor_profile.metainfo}
+ 'metainfo': self.new_flavor_profile.meta_info}
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
@@ -103,7 +101,7 @@ class TestCreateFlavorProfile(TestFlavorProfile):
"--project", self.new_flavor_profile.project_id,
'--project-domain', self.domain.name,
"--enable",
- "--metainfo", self.new_flavor_profile.metainfo,
+ "--metainfo", self.new_flavor_profile.meta_info,
]
verifylist = [
@@ -111,7 +109,7 @@ class TestCreateFlavorProfile(TestFlavorProfile):
('project', self.new_flavor_profile.project_id),
('project_domain', self.domain.name),
('enable', True),
- ('metainfo', self.new_flavor_profile.metainfo)
+ ('metainfo', self.new_flavor_profile.meta_info)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -120,8 +118,8 @@ class TestCreateFlavorProfile(TestFlavorProfile):
self.network.create_service_profile.assert_called_once_with(
**{'description': self.new_flavor_profile.description,
'project_id': self.project.id,
- 'enabled': self.new_flavor_profile.enabled,
- 'metainfo': self.new_flavor_profile.metainfo}
+ 'enabled': self.new_flavor_profile.is_enabled,
+ 'metainfo': self.new_flavor_profile.meta_info}
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
@@ -149,7 +147,7 @@ class TestCreateFlavorProfile(TestFlavorProfile):
self.network.create_service_profile.assert_called_once_with(
**{'description': self.new_flavor_profile.description,
'project_id': self.project.id,
- 'enabled': self.new_flavor_profile.enabled,
+ 'enabled': self.new_flavor_profile.is_enabled,
'driver': self.new_flavor_profile.driver,
}
)
@@ -203,14 +201,13 @@ class TestCreateFlavorProfile(TestFlavorProfile):
class TestDeleteFlavorProfile(TestFlavorProfile):
# The network flavor_profiles to delete.
- _network_flavor_profiles = (
- network_fakes.FakeNetworkFlavorProfile.create_service_profile(count=2))
+ _network_flavor_profiles = network_fakes.create_service_profile(count=2)
def setUp(self):
super(TestDeleteFlavorProfile, self).setUp()
self.network.delete_service_profile = mock.Mock(return_value=None)
self.network.find_service_profile = (
- network_fakes.FakeNetworkFlavorProfile.get_service_profile(
+ network_fakes.get_service_profile(
flavor_profile=self._network_flavor_profiles)
)
@@ -290,8 +287,7 @@ class TestDeleteFlavorProfile(TestFlavorProfile):
class TestListFlavorProfile(TestFlavorProfile):
# The network flavor profiles list
- _network_flavor_profiles = (
- network_fakes.FakeNetworkFlavorProfile.create_service_profile(count=2))
+ _network_flavor_profiles = network_fakes.create_service_profile(count=2)
columns = (
'ID',
@@ -305,8 +301,8 @@ class TestListFlavorProfile(TestFlavorProfile):
data.append((
flavor_profile.id,
flavor_profile.driver,
- flavor_profile.enabled,
- flavor_profile.metainfo,
+ flavor_profile.is_enabled,
+ flavor_profile.meta_info,
flavor_profile.description,
))
@@ -334,22 +330,21 @@ class TestListFlavorProfile(TestFlavorProfile):
class TestShowFlavorProfile(TestFlavorProfile):
# The network flavor profile to show.
- network_flavor_profile = (
- network_fakes.FakeNetworkFlavorProfile.create_one_service_profile())
+ network_flavor_profile = network_fakes.create_one_service_profile()
columns = (
'description',
'driver',
'enabled',
'id',
- 'metainfo',
+ 'meta_info',
'project_id',
)
data = (
network_flavor_profile.description,
network_flavor_profile.driver,
- network_flavor_profile.enabled,
+ network_flavor_profile.is_enabled,
network_flavor_profile.id,
- network_flavor_profile.metainfo,
+ network_flavor_profile.meta_info,
network_flavor_profile.project_id,
)
@@ -382,8 +377,7 @@ class TestShowFlavorProfile(TestFlavorProfile):
class TestSetFlavorProfile(TestFlavorProfile):
# The network flavor profile to set.
- network_flavor_profile = (
- network_fakes.FakeNetworkFlavorProfile.create_one_service_profile())
+ network_flavor_profile = network_fakes.create_one_service_profile()
def setUp(self):
super(TestSetFlavorProfile, self).setUp()
diff --git a/openstackclient/tests/unit/network/v2/test_network_qos_rule.py b/openstackclient/tests/unit/network/v2/test_network_qos_rule.py
index 217e481e..c7de8160 100644
--- a/openstackclient/tests/unit/network/v2/test_network_qos_rule.py
+++ b/openstackclient/tests/unit/network/v2/test_network_qos_rule.py
@@ -25,6 +25,7 @@ from openstackclient.tests.unit import utils as tests_utils
RULE_TYPE_BANDWIDTH_LIMIT = 'bandwidth-limit'
RULE_TYPE_DSCP_MARKING = 'dscp-marking'
RULE_TYPE_MINIMUM_BANDWIDTH = 'minimum-bandwidth'
+RULE_TYPE_MINIMUM_PACKET_RATE = 'minimum-packet-rate'
DSCP_VALID_MARKS = [0, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32,
34, 36, 38, 40, 46, 48, 56]
@@ -126,8 +127,101 @@ class TestCreateNetworkQosRuleMinimumBandwidth(TestNetworkQosRule):
try:
self.cmd.take_action(parsed_args)
except exceptions.CommandError as e:
- msg = ('"Create" rule command for type "minimum-bandwidth" '
- 'requires arguments: direction, min_kbps')
+ msg = ('Failed to create Network QoS rule: "Create" rule command '
+ 'for type "minimum-bandwidth" requires arguments: '
+ 'direction, min_kbps')
+ self.assertEqual(msg, str(e))
+
+
+class TestCreateNetworkQosRuleMinimumPacketRate(TestNetworkQosRule):
+
+ def test_check_type_parameters(self):
+ pass
+
+ def setUp(self):
+ super(TestCreateNetworkQosRuleMinimumPacketRate, self).setUp()
+ attrs = {'qos_policy_id': self.qos_policy.id,
+ 'type': RULE_TYPE_MINIMUM_PACKET_RATE}
+ self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule(
+ attrs)
+ self.columns = (
+ 'direction',
+ 'id',
+ 'min_kpps',
+ 'project_id',
+ 'qos_policy_id',
+ 'type'
+ )
+
+ self.data = (
+ self.new_rule.direction,
+ self.new_rule.id,
+ self.new_rule.min_kpps,
+ self.new_rule.project_id,
+ self.new_rule.qos_policy_id,
+ self.new_rule.type,
+ )
+ self.network.create_qos_minimum_packet_rate_rule = mock.Mock(
+ return_value=self.new_rule)
+
+ # Get the command object to test
+ self.cmd = network_qos_rule.CreateNetworkQosRule(self.app,
+ self.namespace)
+
+ def test_create_no_options(self):
+ arglist = []
+ verifylist = []
+
+ # Missing required args should bail here
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_create_default_options(self):
+ arglist = [
+ '--type', RULE_TYPE_MINIMUM_PACKET_RATE,
+ '--min-kpps', str(self.new_rule.min_kpps),
+ '--egress',
+ self.new_rule.qos_policy_id,
+ ]
+
+ verifylist = [
+ ('type', RULE_TYPE_MINIMUM_PACKET_RATE),
+ ('min_kpps', self.new_rule.min_kpps),
+ ('egress', True),
+ ('qos_policy', self.new_rule.qos_policy_id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_qos_minimum_packet_rate_rule.\
+ assert_called_once_with(
+ self.qos_policy.id,
+ **{'min_kpps': self.new_rule.min_kpps,
+ 'direction': self.new_rule.direction})
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+ def test_create_wrong_options(self):
+ arglist = [
+ '--type', RULE_TYPE_MINIMUM_PACKET_RATE,
+ '--min-kbps', '10000',
+ self.new_rule.qos_policy_id,
+ ]
+
+ verifylist = [
+ ('type', RULE_TYPE_MINIMUM_PACKET_RATE),
+ ('min_kbps', 10000),
+ ('qos_policy', self.new_rule.qos_policy_id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ try:
+ self.cmd.take_action(parsed_args)
+ except exceptions.CommandError as e:
+ msg = ('Failed to create Network QoS rule: "Create" rule command '
+ 'for type "minimum-packet-rate" requires arguments: '
+ 'direction, min_kpps')
self.assertEqual(msg, str(e))
@@ -212,8 +306,8 @@ class TestCreateNetworkQosRuleDSCPMarking(TestNetworkQosRule):
try:
self.cmd.take_action(parsed_args)
except exceptions.CommandError as e:
- msg = ('"Create" rule command for type "dscp-marking" '
- 'requires arguments: dscp_mark')
+ msg = ('Failed to create Network QoS rule: "Create" rule command '
+ 'for type "dscp-marking" requires arguments: dscp_mark')
self.assertEqual(msg, str(e))
@@ -351,8 +445,8 @@ class TestCreateNetworkQosRuleBandwidtLimit(TestNetworkQosRule):
try:
self.cmd.take_action(parsed_args)
except exceptions.CommandError as e:
- msg = ('"Create" rule command for type "bandwidth-limit" '
- 'requires arguments: max_kbps')
+ msg = ('Failed to create Network QoS rule: "Create" rule command '
+ 'for type "bandwidth-limit" requires arguments: max_kbps')
self.assertEqual(msg, str(e))
@@ -415,6 +509,65 @@ class TestDeleteNetworkQosRuleMinimumBandwidth(TestNetworkQosRule):
self.assertEqual(msg, str(e))
+class TestDeleteNetworkQosRuleMinimumPacketRate(TestNetworkQosRule):
+
+ def setUp(self):
+ super(TestDeleteNetworkQosRuleMinimumPacketRate, self).setUp()
+ attrs = {'qos_policy_id': self.qos_policy.id,
+ 'type': RULE_TYPE_MINIMUM_PACKET_RATE}
+ self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule(
+ attrs)
+ self.qos_policy.rules = [self.new_rule]
+ self.network.delete_qos_minimum_packet_rate_rule = mock.Mock(
+ return_value=None)
+ self.network.find_qos_minimum_packet_rate_rule = (
+ network_fakes.FakeNetworkQosRule.get_qos_rules(
+ qos_rules=self.new_rule)
+ )
+
+ # Get the command object to test
+ self.cmd = network_qos_rule.DeleteNetworkQosRule(self.app,
+ self.namespace)
+
+ def test_qos_policy_delete(self):
+ arglist = [
+ self.new_rule.qos_policy_id,
+ self.new_rule.id,
+ ]
+ verifylist = [
+ ('qos_policy', self.new_rule.qos_policy_id),
+ ('id', self.new_rule.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+ self.network.find_qos_policy.assert_called_once_with(
+ self.qos_policy.id, ignore_missing=False)
+ self.network.delete_qos_minimum_packet_rate_rule.\
+ assert_called_once_with(self.new_rule.id, self.qos_policy.id)
+ self.assertIsNone(result)
+
+ def test_qos_policy_delete_error(self):
+ arglist = [
+ self.new_rule.qos_policy_id,
+ self.new_rule.id,
+ ]
+ verifylist = [
+ ('qos_policy', self.new_rule.qos_policy_id),
+ ('id', self.new_rule.id),
+ ]
+
+ self.network.delete_qos_minimum_packet_rate_rule.side_effect = \
+ Exception('Error message')
+ try:
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.cmd.take_action(parsed_args)
+ except exceptions.CommandError as e:
+ msg = ('Failed to delete Network QoS rule ID "%(rule)s": %(e)s' %
+ {'rule': self.new_rule.id, 'e': 'Error message'})
+ self.assertEqual(msg, str(e))
+
+
class TestDeleteNetworkQosRuleDSCPMarking(TestNetworkQosRule):
def setUp(self):
@@ -627,6 +780,100 @@ class TestSetNetworkQosRuleMinimumBandwidth(TestNetworkQosRule):
self.assertEqual(msg, str(e))
+class TestSetNetworkQosRuleMinimumPacketRate(TestNetworkQosRule):
+
+ def setUp(self):
+ super(TestSetNetworkQosRuleMinimumPacketRate, self).setUp()
+ attrs = {'qos_policy_id': self.qos_policy.id,
+ 'type': RULE_TYPE_MINIMUM_PACKET_RATE}
+ self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule(
+ attrs=attrs)
+ self.qos_policy.rules = [self.new_rule]
+ self.network.update_qos_minimum_packet_rate_rule = mock.Mock(
+ return_value=None)
+ self.network.find_qos_minimum_packet_rate_rule = mock.Mock(
+ return_value=self.new_rule)
+ self.network.find_qos_policy = mock.Mock(
+ return_value=self.qos_policy)
+
+ # Get the command object to test
+ self.cmd = (network_qos_rule.SetNetworkQosRule(self.app,
+ self.namespace))
+
+ def test_set_nothing(self):
+ arglist = [
+ self.new_rule.qos_policy_id,
+ self.new_rule.id,
+ ]
+ verifylist = [
+ ('qos_policy', self.new_rule.qos_policy_id),
+ ('id', self.new_rule.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.update_qos_minimum_packet_rate_rule.assert_called_with(
+ self.new_rule, self.qos_policy.id)
+ self.assertIsNone(result)
+
+ def test_set_min_kpps(self):
+ self._set_min_kpps()
+
+ def test_set_min_kpps_to_zero(self):
+ self._set_min_kpps(min_kpps=0)
+
+ def _set_min_kpps(self, min_kpps=None):
+ if min_kpps:
+ previous_min_kpps = self.new_rule.min_kpps
+ self.new_rule.min_kpps = min_kpps
+
+ arglist = [
+ '--min-kpps', str(self.new_rule.min_kpps),
+ self.new_rule.qos_policy_id,
+ self.new_rule.id,
+ ]
+ verifylist = [
+ ('min_kpps', self.new_rule.min_kpps),
+ ('qos_policy', self.new_rule.qos_policy_id),
+ ('id', self.new_rule.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ 'min_kpps': self.new_rule.min_kpps,
+ }
+ self.network.update_qos_minimum_packet_rate_rule.assert_called_with(
+ self.new_rule, self.qos_policy.id, **attrs)
+ self.assertIsNone(result)
+
+ if min_kpps:
+ self.new_rule.min_kpps = previous_min_kpps
+
+ def test_set_wrong_options(self):
+ arglist = [
+ '--min-kbps', str(10000),
+ self.new_rule.qos_policy_id,
+ self.new_rule.id,
+ ]
+ verifylist = [
+ ('min_kbps', 10000),
+ ('qos_policy', self.new_rule.qos_policy_id),
+ ('id', self.new_rule.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ try:
+ self.cmd.take_action(parsed_args)
+ except exceptions.CommandError as e:
+ msg = ('Failed to set Network QoS rule ID "%(rule)s": Rule type '
+ '"minimum-packet-rate" only requires arguments: direction, '
+ 'min_kpps' % {'rule': self.new_rule.id})
+ self.assertEqual(msg, str(e))
+
+
class TestSetNetworkQosRuleDSCPMarking(TestNetworkQosRule):
def setUp(self):
@@ -893,6 +1140,9 @@ class TestListNetworkQosRule(TestNetworkQosRule):
'type': RULE_TYPE_MINIMUM_BANDWIDTH}
self.new_rule_min_bw = (network_fakes.FakeNetworkQosRule.
create_one_qos_rule(attrs=attrs))
+ attrs['type'] = RULE_TYPE_MINIMUM_PACKET_RATE
+ self.new_rule_min_pps = (network_fakes.FakeNetworkQosRule.
+ create_one_qos_rule(attrs=attrs))
attrs['type'] = RULE_TYPE_DSCP_MARKING
self.new_rule_dscp_mark = (network_fakes.FakeNetworkQosRule.
create_one_qos_rule(attrs=attrs))
@@ -900,10 +1150,13 @@ class TestListNetworkQosRule(TestNetworkQosRule):
self.new_rule_max_bw = (network_fakes.FakeNetworkQosRule.
create_one_qos_rule(attrs=attrs))
self.qos_policy.rules = [self.new_rule_min_bw,
+ self.new_rule_min_pps,
self.new_rule_dscp_mark,
self.new_rule_max_bw]
self.network.find_qos_minimum_bandwidth_rule = mock.Mock(
return_value=self.new_rule_min_bw)
+ self.network.find_qos_minimum_packet_rate_rule = mock.Mock(
+ return_value=self.new_rule_min_pps)
self.network.find_qos_dscp_marking_rule = mock.Mock(
return_value=self.new_rule_dscp_mark)
self.network.find_qos_bandwidth_limit_rule = mock.Mock(
@@ -915,6 +1168,7 @@ class TestListNetworkQosRule(TestNetworkQosRule):
'Max Kbps',
'Max Burst Kbits',
'Min Kbps',
+ 'Min Kpps',
'DSCP mark',
'Direction',
)
@@ -927,6 +1181,7 @@ class TestListNetworkQosRule(TestNetworkQosRule):
getattr(self.qos_policy.rules[index], 'max_kbps', ''),
getattr(self.qos_policy.rules[index], 'max_burst_kbps', ''),
getattr(self.qos_policy.rules[index], 'min_kbps', ''),
+ getattr(self.qos_policy.rules[index], 'min_kpps', ''),
getattr(self.qos_policy.rules[index], 'dscp_mark', ''),
getattr(self.qos_policy.rules[index], 'direction', ''),
))
@@ -1014,6 +1269,66 @@ class TestShowNetworkQosRuleMinimumBandwidth(TestNetworkQosRule):
self.assertEqual(list(self.data), list(data))
+class TestShowNetworkQosRuleMinimumPacketRate(TestNetworkQosRule):
+
+ def setUp(self):
+ super(TestShowNetworkQosRuleMinimumPacketRate, self).setUp()
+ attrs = {'qos_policy_id': self.qos_policy.id,
+ 'type': RULE_TYPE_MINIMUM_PACKET_RATE}
+ self.new_rule = network_fakes.FakeNetworkQosRule.create_one_qos_rule(
+ attrs)
+ self.qos_policy.rules = [self.new_rule]
+ self.columns = (
+ 'direction',
+ 'id',
+ 'min_kpps',
+ 'project_id',
+ 'qos_policy_id',
+ 'type'
+ )
+ self.data = (
+ self.new_rule.direction,
+ self.new_rule.id,
+ self.new_rule.min_kpps,
+ self.new_rule.project_id,
+ self.new_rule.qos_policy_id,
+ self.new_rule.type,
+ )
+
+ self.network.get_qos_minimum_packet_rate_rule = mock.Mock(
+ return_value=self.new_rule)
+
+ # Get the command object to test
+ self.cmd = network_qos_rule.ShowNetworkQosRule(self.app,
+ self.namespace)
+
+ def test_show_no_options(self):
+ arglist = []
+ verifylist = []
+
+ # Missing required args should bail here
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_show_all_options(self):
+ arglist = [
+ self.new_rule.qos_policy_id,
+ self.new_rule.id,
+ ]
+ verifylist = [
+ ('qos_policy', self.new_rule.qos_policy_id),
+ ('id', self.new_rule.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.get_qos_minimum_packet_rate_rule.assert_called_once_with(
+ self.new_rule.id, self.qos_policy.id)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(list(self.data), list(data))
+
+
class TestShowNetworkQosDSCPMarking(TestNetworkQosRule):
def setUp(self):
diff --git a/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py b/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py
index 08a83fab..3aae822e 100644
--- a/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py
+++ b/openstackclient/tests/unit/network/v2/test_network_qos_rule_type.py
@@ -115,3 +115,37 @@ class TestListNetworkQosRuleType(TestNetworkQosRuleType):
self.network.qos_rule_types.assert_called_once_with(**{})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
+
+ def test_qos_rule_type_list_all_supported(self):
+ arglist = [
+ '--all-supported'
+ ]
+ verifylist = [
+ ('all_supported', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.qos_rule_types.assert_called_once_with(
+ **{'all_supported': True}
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
+ def test_qos_rule_type_list_all_rules(self):
+ arglist = [
+ '--all-rules'
+ ]
+ verifylist = [
+ ('all_rules', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.qos_rule_types.assert_called_once_with(
+ **{'all_rules': True}
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
diff --git a/openstackclient/tests/unit/network/v2/test_network_rbac.py b/openstackclient/tests/unit/network/v2/test_network_rbac.py
index c7e3374d..7ce25205 100644
--- a/openstackclient/tests/unit/network/v2/test_network_rbac.py
+++ b/openstackclient/tests/unit/network/v2/test_network_rbac.py
@@ -44,7 +44,7 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
snp_object = network_fakes.FakeSubnetPool.create_one_subnet_pool()
ag_object = network_fakes.create_one_address_group()
project = identity_fakes_v3.FakeProject.create_one_project()
- rbac_policy = network_fakes.FakeNetworkRBAC.create_one_network_rbac(
+ rbac_policy = network_fakes.create_one_network_rbac(
attrs={'project_id': project.id,
'target_tenant': project.id,
'object_id': network_object.id}
@@ -65,7 +65,7 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
rbac_policy.object_id,
rbac_policy.object_type,
rbac_policy.project_id,
- rbac_policy.target_tenant,
+ rbac_policy.target_project_id,
]
def setUp(self):
@@ -120,13 +120,13 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
arglist = [
'--action', self.rbac_policy.action,
'--type', 'invalid_type',
- '--target-project', self.rbac_policy.target_tenant,
+ '--target-project', self.rbac_policy.target_project_id,
self.rbac_policy.object_id,
]
verifylist = [
('action', self.rbac_policy.action),
('type', 'invalid_type'),
- ('target-project', self.rbac_policy.target_tenant),
+ ('target-project', self.rbac_policy.target_project_id),
('rbac_policy', self.rbac_policy.id),
]
@@ -137,13 +137,13 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
arglist = [
'--type', self.rbac_policy.object_type,
'--action', 'invalid_action',
- '--target-project', self.rbac_policy.target_tenant,
+ '--target-project', self.rbac_policy.target_project_id,
self.rbac_policy.object_id,
]
verifylist = [
('type', self.rbac_policy.object_type),
('action', 'invalid_action'),
- ('target-project', self.rbac_policy.target_tenant),
+ ('target-project', self.rbac_policy.target_project_id),
('rbac_policy', self.rbac_policy.id),
]
@@ -154,13 +154,13 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
arglist = [
'--type', self.rbac_policy.object_type,
'--action', self.rbac_policy.action,
- '--target-project', self.rbac_policy.target_tenant,
+ '--target-project', self.rbac_policy.target_project_id,
self.rbac_policy.object_id,
]
verifylist = [
('type', self.rbac_policy.object_type),
('action', self.rbac_policy.action),
- ('target_project', self.rbac_policy.target_tenant),
+ ('target_project', self.rbac_policy.target_project_id),
('rbac_object', self.rbac_policy.object_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -172,7 +172,7 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
'object_id': self.rbac_policy.object_id,
'object_type': self.rbac_policy.object_type,
'action': self.rbac_policy.action,
- 'target_tenant': self.rbac_policy.target_tenant,
+ 'target_tenant': self.rbac_policy.target_project_id,
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
@@ -205,7 +205,7 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
arglist = [
'--type', self.rbac_policy.object_type,
'--action', self.rbac_policy.action,
- '--target-project', self.rbac_policy.target_tenant,
+ '--target-project', self.rbac_policy.target_project_id,
'--project', self.rbac_policy.project_id,
'--project-domain', self.project.domain_id,
'--target-project-domain', self.project.domain_id,
@@ -214,7 +214,7 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
verifylist = [
('type', self.rbac_policy.object_type),
('action', self.rbac_policy.action),
- ('target_project', self.rbac_policy.target_tenant),
+ ('target_project', self.rbac_policy.target_project_id),
('project', self.rbac_policy.project_id),
('project_domain', self.project.domain_id),
('target_project_domain', self.project.domain_id),
@@ -229,7 +229,7 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
'object_id': self.rbac_policy.object_id,
'object_type': self.rbac_policy.object_type,
'action': self.rbac_policy.action,
- 'target_tenant': self.rbac_policy.target_tenant,
+ 'target_tenant': self.rbac_policy.target_project_id,
'project_id': self.rbac_policy.project_id,
})
self.assertEqual(self.columns, columns)
@@ -251,13 +251,13 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
arglist = [
'--type', obj_type,
'--action', self.rbac_policy.action,
- '--target-project', self.rbac_policy.target_tenant,
+ '--target-project', self.rbac_policy.target_project_id,
obj_fake.name,
]
verifylist = [
('type', obj_type),
('action', self.rbac_policy.action),
- ('target_project', self.rbac_policy.target_tenant),
+ ('target_project', self.rbac_policy.target_project_id),
('rbac_object', obj_fake.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -269,7 +269,7 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
'object_id': obj_fake.id,
'object_type': obj_type,
'action': self.rbac_policy.action,
- 'target_tenant': self.rbac_policy.target_tenant,
+ 'target_tenant': self.rbac_policy.target_project_id,
})
self.data = [
self.rbac_policy.action,
@@ -277,7 +277,7 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
obj_fake.id,
obj_type,
self.rbac_policy.project_id,
- self.rbac_policy.target_tenant,
+ self.rbac_policy.target_project_id,
]
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
@@ -285,13 +285,13 @@ class TestCreateNetworkRBAC(TestNetworkRBAC):
class TestDeleteNetworkRBAC(TestNetworkRBAC):
- rbac_policies = network_fakes.FakeNetworkRBAC.create_network_rbacs(count=2)
+ rbac_policies = network_fakes.create_network_rbacs(count=2)
def setUp(self):
super(TestDeleteNetworkRBAC, self).setUp()
self.network.delete_rbac_policy = mock.Mock(return_value=None)
self.network.find_rbac_policy = (
- network_fakes.FakeNetworkRBAC.get_network_rbacs(
+ network_fakes.get_network_rbacs(
rbac_policies=self.rbac_policies)
)
@@ -368,7 +368,7 @@ class TestDeleteNetworkRBAC(TestNetworkRBAC):
class TestListNetworkRABC(TestNetworkRBAC):
# The network rbac policies going to be listed up.
- rbac_policies = network_fakes.FakeNetworkRBAC.create_network_rbacs(count=3)
+ rbac_policies = network_fakes.create_network_rbacs(count=3)
columns = (
'ID',
@@ -405,6 +405,9 @@ class TestListNetworkRABC(TestNetworkRBAC):
self.network.rbac_policies = mock.Mock(return_value=self.rbac_policies)
+ self.project = identity_fakes_v3.FakeProject.create_one_project()
+ self.projects_mock.get.return_value = self.project
+
def test_network_rbac_list(self):
arglist = []
verifylist = []
@@ -466,11 +469,27 @@ class TestListNetworkRABC(TestNetworkRBAC):
self.assertEqual(self.columns_long, columns)
self.assertEqual(self.data_long, list(data))
+ def test_network_rbac_list_target_project_opt(self):
+ arglist = [
+ '--target-project', self.rbac_policies[0].target_project_id, ]
+ verifylist = [
+ ('target_project', self.rbac_policies[0].target_project_id)]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # DisplayCommandBase.take_action() returns two tuples
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.rbac_policies.assert_called_with(**{
+ 'target_project_id': self.project.id
+ })
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
class TestSetNetworkRBAC(TestNetworkRBAC):
project = identity_fakes_v3.FakeProject.create_one_project()
- rbac_policy = network_fakes.FakeNetworkRBAC.create_one_network_rbac(
+ rbac_policy = network_fakes.create_one_network_rbac(
attrs={'target_tenant': project.id})
def setUp(self):
@@ -525,7 +544,7 @@ class TestSetNetworkRBAC(TestNetworkRBAC):
class TestShowNetworkRBAC(TestNetworkRBAC):
- rbac_policy = network_fakes.FakeNetworkRBAC.create_one_network_rbac()
+ rbac_policy = network_fakes.create_one_network_rbac()
columns = (
'action',
@@ -542,7 +561,7 @@ class TestShowNetworkRBAC(TestNetworkRBAC):
rbac_policy.object_id,
rbac_policy.object_type,
rbac_policy.project_id,
- rbac_policy.target_tenant,
+ rbac_policy.target_project_id,
]
def setUp(self):
diff --git a/openstackclient/tests/unit/network/v2/test_network_trunk.py b/openstackclient/tests/unit/network/v2/test_network_trunk.py
new file mode 100644
index 00000000..fae70fb0
--- /dev/null
+++ b/openstackclient/tests/unit/network/v2/test_network_trunk.py
@@ -0,0 +1,851 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import argparse
+import copy
+from unittest import mock
+from unittest.mock import call
+
+from osc_lib.cli import format_columns
+from osc_lib import exceptions
+import testtools
+
+from openstackclient.network.v2 import network_trunk
+from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3
+from openstackclient.tests.unit.network.v2 import fakes as network_fakes
+from openstackclient.tests.unit import utils as tests_utils
+
+
+# Tests for Neutron trunks
+#
+class TestNetworkTrunk(network_fakes.TestNetworkV2):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the network client
+ self.network = self.app.client_manager.network
+ # Get a shortcut to the ProjectManager Mock
+ self.projects_mock = self.app.client_manager.identity.projects
+ # Get a shortcut to the DomainManager Mock
+ self.domains_mock = self.app.client_manager.identity.domains
+
+
+class TestCreateNetworkTrunk(TestNetworkTrunk):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+
+ new_trunk = network_fakes.create_one_trunk(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+
+ columns = (
+ 'description',
+ 'id',
+ 'is_admin_state_up',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ 'tags'
+ )
+ data = (
+ new_trunk.description,
+ new_trunk.id,
+ new_trunk.is_admin_state_up,
+ new_trunk.name,
+ new_trunk.port_id,
+ new_trunk.project_id,
+ new_trunk.status,
+ format_columns.ListDictColumn(new_trunk.sub_ports),
+ [],
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.network.create_trunk = mock.Mock(return_value=self.new_trunk)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.parent_port, self.sub_port])
+
+ # Get the command object to test
+ self.cmd = network_trunk.CreateNetworkTrunk(self.app, self.namespace)
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ def test_create_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_create_default_options(self):
+ arglist = [
+ "--parent-port", self.new_trunk['port_id'],
+ self.new_trunk['name'],
+ ]
+ verifylist = [
+ ('parent_port', self.new_trunk['port_id']),
+ ('name', self.new_trunk['name']),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_trunk.assert_called_once_with(**{
+ 'name': self.new_trunk['name'],
+ 'admin_state_up': self.new_trunk['admin_state_up'],
+ 'port_id': self.new_trunk['port_id'],
+ })
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+ def test_create_full_options(self):
+ self.new_trunk['description'] = 'foo description'
+ subport = self.new_trunk.sub_ports[0]
+ arglist = [
+ "--disable",
+ "--description", self.new_trunk.description,
+ "--parent-port", self.new_trunk.port_id,
+ "--subport", 'port=%(port)s,segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type'],
+ 'port': subport['port_id']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('description', self.new_trunk.description),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'port': subport['port_id'],
+ 'segmentation-id': str(subport['segmentation_id']),
+ 'segmentation-type': subport['segmentation_type']}]),
+ ('disable', True),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_trunk.assert_called_once_with(**{
+ 'name': self.new_trunk.name,
+ 'description': self.new_trunk.description,
+ 'admin_state_up': False,
+ 'port_id': self.new_trunk.port_id,
+ 'sub_ports': [subport],
+ })
+ self.assertEqual(self.columns, columns)
+ data_with_desc = list(self.data)
+ data_with_desc[0] = self.new_trunk['description']
+ data_with_desc = tuple(data_with_desc)
+ self.assertEqual(data_with_desc, data)
+
+ def test_create_trunk_with_subport_invalid_segmentation_id_fail(self):
+ subport = self.new_trunk.sub_ports[0]
+ arglist = [
+ "--parent-port", self.new_trunk.port_id,
+ "--subport", "port=%(port)s,segmentation-type=%(seg_type)s,"
+ "segmentation-id=boom" % {
+ 'seg_type': subport['segmentation_type'],
+ 'port': subport['port_id']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'port': subport['port_id'],
+ 'segmentation-id': 'boom',
+ 'segmentation-type': subport['segmentation_type']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual("Segmentation-id 'boom' is not an integer",
+ str(e))
+
+ def test_create_network_trunk_subports_without_optional_keys(self):
+ subport = copy.copy(self.new_trunk.sub_ports[0])
+ # Pop out the segmentation-id and segmentation-type
+ subport.pop('segmentation_type')
+ subport.pop('segmentation_id')
+ arglist = [
+ '--parent-port', self.new_trunk.port_id,
+ '--subport', 'port=%(port)s' % {'port': subport['port_id']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'port': subport['port_id']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_trunk.assert_called_once_with(**{
+ 'name': self.new_trunk.name,
+ 'admin_state_up': True,
+ 'port_id': self.new_trunk.port_id,
+ 'sub_ports': [subport],
+ })
+ self.assertEqual(self.columns, columns)
+ data_with_desc = list(self.data)
+ data_with_desc[0] = self.new_trunk['description']
+ data_with_desc = tuple(data_with_desc)
+ self.assertEqual(data_with_desc, data)
+
+ def test_create_network_trunk_subports_without_required_key_fail(self):
+ subport = self.new_trunk.sub_ports[0]
+ arglist = [
+ '--parent-port', self.new_trunk.port_id,
+ '--subport', 'segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type']},
+ self.new_trunk.name,
+ ]
+ verifylist = [
+ ('name', self.new_trunk.name),
+ ('parent_port', self.new_trunk.port_id),
+ ('add_subports', [{
+ 'segmentation_id': str(subport['segmentation_id']),
+ 'segmentation_type': subport['segmentation_type']}]),
+ ]
+
+ with testtools.ExpectedException(argparse.ArgumentTypeError):
+ self.check_parser(self.cmd, arglist, verifylist)
+
+
+class TestDeleteNetworkTrunk(TestNetworkTrunk):
+ # The trunk to be deleted.
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+
+ new_trunks = network_fakes.create_trunks(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+
+ def setUp(self):
+ super().setUp()
+ self.network.find_trunk = mock.Mock(
+ side_effect=[self.new_trunks[0], self.new_trunks[1]])
+ self.network.delete_trunk = mock.Mock(return_value=None)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.parent_port, self.sub_port])
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.DeleteNetworkTrunk(self.app, self.namespace)
+
+ def test_delete_trunkx(self):
+ arglist = [
+ self.new_trunks[0].name,
+ ]
+ verifylist = [
+ ('trunk', [self.new_trunks[0].name]),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.network.delete_trunk.assert_called_once_with(
+ self.new_trunks[0].id)
+ self.assertIsNone(result)
+
+ def test_delete_trunk_multiple(self):
+ arglist = []
+ verifylist = []
+
+ for t in self.new_trunks:
+ arglist.append(t['name'])
+ verifylist = [
+ ('trunk', arglist),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ calls = []
+ for t in self.new_trunks:
+ calls.append(call(t.id))
+ self.network.delete_trunk.assert_has_calls(calls)
+ self.assertIsNone(result)
+
+ def test_delete_trunk_multiple_with_exception(self):
+ arglist = [
+ self.new_trunks[0].name,
+ 'unexist_trunk',
+ ]
+ verifylist = [
+ ('trunk',
+ [self.new_trunks[0].name, 'unexist_trunk']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.find_trunk = mock.Mock(
+ side_effect=[self.new_trunks[0], exceptions.CommandError])
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual('1 of 2 trunks failed to delete.', str(e))
+ self.network.delete_trunk.assert_called_once_with(
+ self.new_trunks[0].id
+ )
+
+
+class TestShowNetworkTrunk(TestNetworkTrunk):
+
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ # The trunk to set.
+ new_trunk = network_fakes.create_one_trunk()
+ columns = (
+ 'description',
+ 'id',
+ 'is_admin_state_up',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ 'tags'
+ )
+ data = (
+ new_trunk.description,
+ new_trunk.id,
+ new_trunk.is_admin_state_up,
+ new_trunk.name,
+ new_trunk.port_id,
+ new_trunk.project_id,
+ new_trunk.status,
+ format_columns.ListDictColumn(new_trunk.sub_ports),
+ [],
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.network.find_trunk = mock.Mock(return_value=self.new_trunk)
+ self.network.get_trunk = mock.Mock(return_value=self.new_trunk)
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.ShowNetworkTrunk(self.app, self.namespace)
+
+ def test_show_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_show_all_options(self):
+ arglist = [
+ self.new_trunk.id,
+ ]
+ verifylist = [
+ ('trunk', self.new_trunk.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.get_trunk.assert_called_once_with(self.new_trunk.id)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+
+class TestListNetworkTrunk(TestNetworkTrunk):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ # Create trunks to be listed.
+ new_trunks = network_fakes.create_trunks(
+ {'created_at': '2001-01-01 00:00:00',
+ 'updated_at': '2001-01-01 00:00:00'}, count=3)
+
+ columns = (
+ 'ID',
+ 'Name',
+ 'Parent Port',
+ 'Description'
+ )
+ columns_long = columns + (
+ 'Status',
+ 'State',
+ 'Created At',
+ 'Updated At'
+ )
+ data = []
+ for t in new_trunks:
+ data.append((
+ t['id'],
+ t['name'],
+ t['port_id'],
+ t['description']
+ ))
+ data_long = []
+ for t in new_trunks:
+ data_long.append((
+ t['id'],
+ t['name'],
+ t['port_id'],
+ t['description'],
+ t['status'],
+ network_trunk.AdminStateColumn(''),
+ '2001-01-01 00:00:00',
+ '2001-01-01 00:00:00',
+ ))
+
+ def setUp(self):
+ super().setUp()
+ self.network.trunks = mock.Mock(return_value=self.new_trunks)
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.ListNetworkTrunk(self.app, self.namespace)
+
+ def test_trunk_list_no_option(self):
+ arglist = []
+ verifylist = []
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.trunks.assert_called_once_with()
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
+ def test_trunk_list_long(self):
+ arglist = [
+ '--long',
+ ]
+ verifylist = [
+ ('long', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.trunks.assert_called_once_with()
+ self.assertEqual(self.columns_long, columns)
+ self.assertEqual(self.data_long, list(data))
+
+
+class TestSetNetworkTrunk(TestNetworkTrunk):
+
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+ # Create trunks to be listed.
+ _trunk = network_fakes.create_one_trunk(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+ columns = (
+ 'admin_state_up',
+ 'id',
+ 'name',
+ 'description',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ )
+ data = (
+ _trunk.id,
+ _trunk.name,
+ _trunk.description,
+ _trunk.port_id,
+ _trunk.project_id,
+ _trunk.status,
+ format_columns.ListDictColumn(_trunk.sub_ports),
+ )
+
+ def setUp(self):
+ super().setUp()
+ self.network.update_trunk = mock.Mock(return_value=self._trunk)
+ self.network.add_trunk_subports = mock.Mock(return_value=self._trunk)
+ self.network.find_trunk = mock.Mock(return_value=self._trunk)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.sub_port, self.sub_port])
+
+ self.projects_mock.get.return_value = self.project
+ self.domains_mock.get.return_value = self.domain
+
+ # Get the command object to test
+ self.cmd = network_trunk.SetNetworkTrunk(self.app, self.namespace)
+
+ def _test_set_network_trunk_attr(self, attr, value):
+ arglist = [
+ '--%s' % attr, value,
+ self._trunk[attr],
+ ]
+ verifylist = [
+ (attr, value),
+ ('trunk', self._trunk[attr]),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ attr: value,
+ }
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_name(self):
+ self._test_set_network_trunk_attr('name', 'trunky')
+
+ def test_set_network_trunk_description(self):
+ self._test_set_network_trunk_attr('description', 'description')
+
+ def test_set_network_trunk_admin_state_up_disable(self):
+ arglist = [
+ '--disable',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('disable', True),
+ ('trunk', self._trunk['name']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ 'admin_state_up': False,
+ }
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_admin_state_up_enable(self):
+ arglist = [
+ '--enable',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('enable', True),
+ ('trunk', self._trunk['name']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {
+ 'admin_state_up': True,
+ }
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_nothing(self):
+ arglist = [self._trunk['name'], ]
+ verifylist = [('trunk', self._trunk['name']), ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ attrs = {}
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_subports(self):
+ subport = self._trunk['sub_ports'][0]
+ arglist = [
+ '--subport', 'port=%(port)s,segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type'],
+ 'port': subport['port_id']},
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{
+ 'port': subport['port_id'],
+ 'segmentation-id': str(subport['segmentation_id']),
+ 'segmentation-type': subport['segmentation_type']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.add_trunk_subports.assert_called_once_with(
+ self._trunk, [subport])
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_subports_without_optional_keys(self):
+ subport = copy.copy(self._trunk['sub_ports'][0])
+ # Pop out the segmentation-id and segmentation-type
+ subport.pop('segmentation_type')
+ subport.pop('segmentation_id')
+ arglist = [
+ '--subport', 'port=%(port)s' % {'port': subport['port_id']},
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{
+ 'port': subport['port_id']}]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.add_trunk_subports.assert_called_once_with(
+ self._trunk, [subport])
+ self.assertIsNone(result)
+
+ def test_set_network_trunk_subports_without_required_key_fail(self):
+ subport = self._trunk['sub_ports'][0]
+ arglist = [
+ '--subport', 'segmentation-type=%(seg_type)s,'
+ 'segmentation-id=%(seg_id)s' % {
+ 'seg_id': subport['segmentation_id'],
+ 'seg_type': subport['segmentation_type']},
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{
+ 'segmentation-id': str(subport['segmentation_id']),
+ 'segmentation-type': subport['segmentation_type']}]),
+ ]
+
+ with testtools.ExpectedException(argparse.ArgumentTypeError):
+ self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.add_trunk_subports.assert_not_called()
+
+ def test_set_trunk_attrs_with_exception(self):
+ arglist = [
+ '--name', 'reallylongname',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('name', 'reallylongname'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.update_trunk = (
+ mock.Mock(side_effect=exceptions.CommandError)
+ )
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual(
+ "Failed to set trunk '%s': " % self._trunk['name'],
+ str(e))
+ attrs = {'name': 'reallylongname'}
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk, **attrs)
+ self.network.add_trunk_subports.assert_not_called()
+
+ def test_set_trunk_add_subport_with_exception(self):
+ arglist = [
+ '--subport', 'port=invalid_subport',
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('set_subports', [{'port': 'invalid_subport'}]),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.network.add_trunk_subports = (
+ mock.Mock(side_effect=exceptions.CommandError)
+ )
+ self.network.find_port = (mock.Mock(
+ return_value={'id': 'invalid_subport'}))
+ with testtools.ExpectedException(exceptions.CommandError) as e:
+ self.cmd.take_action(parsed_args)
+ self.assertEqual(
+ "Failed to add subports to trunk '%s': " % self._trunk['name'],
+ str(e))
+ self.network.update_trunk.assert_called_once_with(
+ self._trunk)
+ self.network.add_trunk_subports.assert_called_once_with(
+ self._trunk, [{'port_id': 'invalid_subport'}])
+
+
+class TestListNetworkSubport(TestNetworkTrunk):
+
+ _trunk = network_fakes.create_one_trunk()
+ _subports = _trunk['sub_ports']
+
+ columns = (
+ 'Port',
+ 'Segmentation Type',
+ 'Segmentation ID',
+ )
+ data = []
+ for s in _subports:
+ data.append((
+ s['port_id'],
+ s['segmentation_type'],
+ s['segmentation_id'],
+ ))
+
+ def setUp(self):
+ super().setUp()
+
+ self.network.find_trunk = mock.Mock(return_value=self._trunk)
+ self.network.get_trunk_subports = mock.Mock(
+ return_value={network_trunk.SUB_PORTS: self._subports})
+
+ # Get the command object to test
+ self.cmd = network_trunk.ListNetworkSubport(self.app, self.namespace)
+
+ def test_subport_list(self):
+ arglist = [
+ '--trunk', self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.get_trunk_subports.assert_called_once_with(self._trunk)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
+
+class TestUnsetNetworkTrunk(TestNetworkTrunk):
+ project = identity_fakes_v3.FakeProject.create_one_project()
+ domain = identity_fakes_v3.FakeDomain.create_one_domain()
+ trunk_networks = network_fakes.create_networks(count=2)
+ parent_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[0]['id']})
+ sub_port = network_fakes.create_one_port(
+ attrs={'project_id': project.id,
+ 'network_id': trunk_networks[1]['id']})
+ _trunk = network_fakes.create_one_trunk(
+ attrs={'project_id': project.id,
+ 'port_id': parent_port['id'],
+ 'sub_ports': {
+ 'port_id': sub_port['id'],
+ 'segmentation_id': 42,
+ 'segmentation_type': 'vlan'}
+ })
+
+ columns = (
+ 'admin_state_up',
+ 'id',
+ 'name',
+ 'port_id',
+ 'project_id',
+ 'status',
+ 'sub_ports',
+ )
+ data = (
+ network_trunk.AdminStateColumn(_trunk['admin_state_up']),
+ _trunk['id'],
+ _trunk['name'],
+ _trunk['port_id'],
+ _trunk['project_id'],
+ _trunk['status'],
+ format_columns.ListDictColumn(_trunk['sub_ports']),
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.network.find_trunk = mock.Mock(return_value=self._trunk)
+ self.network.find_port = mock.Mock(
+ side_effect=[self.sub_port, self.sub_port])
+ self.network.delete_trunk_subports = mock.Mock(return_value=None)
+
+ # Get the command object to test
+ self.cmd = network_trunk.UnsetNetworkTrunk(self.app, self.namespace)
+
+ def test_unset_network_trunk_subport(self):
+ subport = self._trunk['sub_ports'][0]
+ arglist = [
+ "--subport", subport['port_id'],
+ self._trunk['name'],
+ ]
+
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ('unset_subports', [subport['port_id']]),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.network.delete_trunk_subports.assert_called_once_with(
+ self._trunk,
+ [{'port_id': subport['port_id']}]
+ )
+ self.assertIsNone(result)
+
+ def test_unset_subport_no_arguments_fail(self):
+ arglist = [
+ self._trunk['name'],
+ ]
+ verifylist = [
+ ('trunk', self._trunk['name']),
+ ]
+ self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd, arglist, verifylist)
diff --git a/openstackclient/tests/unit/network/v2/test_port.py b/openstackclient/tests/unit/network/v2/test_port.py
index bde01800..04412c5a 100644
--- a/openstackclient/tests/unit/network/v2/test_port.py
+++ b/openstackclient/tests/unit/network/v2/test_port.py
@@ -50,6 +50,7 @@ class TestPort(network_fakes.TestNetworkV2):
'binding_vif_details',
'binding_vif_type',
'binding_vnic_type',
+ 'created_at',
'data_plane_status',
'description',
'device_id',
@@ -61,6 +62,7 @@ class TestPort(network_fakes.TestNetworkV2):
'extra_dhcp_opts',
'fixed_ips',
'id',
+ 'ip_allocation',
'mac_address',
'name',
'network_id',
@@ -68,21 +70,26 @@ class TestPort(network_fakes.TestNetworkV2):
'port_security_enabled',
'project_id',
'propagate_uplink_status',
+ 'resource_request',
+ 'revision_number',
'qos_network_policy_id',
'qos_policy_id',
'security_group_ids',
'status',
'tags',
+ 'trunk_details',
+ 'updated_at',
)
data = (
- port.AdminStateColumn(fake_port.admin_state_up),
+ port.AdminStateColumn(fake_port.is_admin_state_up),
format_columns.ListDictColumn(fake_port.allowed_address_pairs),
fake_port.binding_host_id,
format_columns.DictColumn(fake_port.binding_profile),
format_columns.DictColumn(fake_port.binding_vif_details),
fake_port.binding_vif_type,
fake_port.binding_vnic_type,
+ fake_port.created_at,
fake_port.data_plane_status,
fake_port.description,
fake_port.device_id,
@@ -94,18 +101,23 @@ class TestPort(network_fakes.TestNetworkV2):
format_columns.ListDictColumn(fake_port.extra_dhcp_opts),
format_columns.ListDictColumn(fake_port.fixed_ips),
fake_port.id,
+ fake_port.ip_allocation,
fake_port.mac_address,
fake_port.name,
fake_port.network_id,
fake_port.numa_affinity_policy,
- fake_port.port_security_enabled,
+ fake_port.is_port_security_enabled,
fake_port.project_id,
fake_port.propagate_uplink_status,
+ fake_port.resource_request,
+ fake_port.revision_number,
fake_port.qos_network_policy_id,
fake_port.qos_policy_id,
format_columns.ListColumn(fake_port.security_group_ids),
fake_port.status,
format_columns.ListColumn(fake_port.tags),
+ fake_port.trunk_details,
+ fake_port.updated_at,
)
return columns, data
@@ -113,7 +125,7 @@ class TestPort(network_fakes.TestNetworkV2):
class TestCreatePort(TestPort):
- _port = network_fakes.FakePort.create_one_port()
+ _port = network_fakes.create_one_port()
columns, data = TestPort._get_common_cols_data(_port)
def setUp(self):
@@ -152,7 +164,7 @@ class TestCreatePort(TestPort):
})
self.assertFalse(self.network.set_tags.called)
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_full_options(self):
@@ -210,7 +222,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_invalid_json_binding_profile(self):
@@ -261,7 +273,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_with_security_group(self):
@@ -290,7 +302,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_port_with_dns_name(self):
@@ -316,7 +328,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_with_security_groups(self):
@@ -346,7 +358,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_with_no_security_groups(self):
@@ -372,7 +384,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_with_no_fixed_ips(self):
@@ -398,7 +410,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_port_with_allowed_address_pair_ipaddr(self):
@@ -428,7 +440,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_port_with_allowed_address_pair(self):
@@ -464,7 +476,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_port_with_qos(self):
@@ -492,7 +504,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_port_security_enabled(self):
@@ -601,7 +613,7 @@ class TestCreatePort(TestPort):
else:
self.assertFalse(self.network.set_tags.called)
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_with_tags(self):
@@ -644,7 +656,7 @@ class TestCreatePort(TestPort):
'name': 'test-port',
})
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_with_uplink_status_propagation_enabled(self):
@@ -724,7 +736,7 @@ class TestCreatePort(TestPort):
create_args['numa_affinity_policy'] = numa_affinity_policy
self.network.create_port.assert_called_once_with(**create_args)
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
def test_create_with_numa_affinity_policy_required(self):
@@ -763,20 +775,20 @@ class TestCreatePort(TestPort):
'device_profile': 'cyborg_device_profile_1',
}
self.network.create_port.assert_called_once_with(**create_args)
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
class TestDeletePort(TestPort):
# Ports to delete.
- _ports = network_fakes.FakePort.create_ports(count=2)
+ _ports = network_fakes.create_ports(count=2)
def setUp(self):
super(TestDeletePort, self).setUp()
self.network.delete_port = mock.Mock(return_value=None)
- self.network.find_port = network_fakes.FakePort.get_ports(
+ self.network.find_port = network_fakes.get_ports(
ports=self._ports)
# Get the command object to test
self.cmd = port.DeletePort(self.app, self.namespace)
@@ -848,7 +860,7 @@ class TestDeletePort(TestPort):
class TestListPort(TestPort):
- _ports = network_fakes.FakePort.create_ports(count=3)
+ _ports = network_fakes.create_ports(count=3)
columns = (
'ID',
@@ -1317,7 +1329,7 @@ class TestListPort(TestPort):
class TestSetPort(TestPort):
- _port = network_fakes.FakePort.create_one_port({'tags': ['green', 'red']})
+ _port = network_fakes.create_one_port({'tags': ['green', 'red']})
def setUp(self):
super(TestSetPort, self).setUp()
@@ -1345,7 +1357,7 @@ class TestSetPort(TestPort):
self.assertIsNone(result)
def test_set_port_fixed_ip(self):
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'fixed_ips': [{'ip_address': '0.0.0.1'}]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
@@ -1369,7 +1381,7 @@ class TestSetPort(TestPort):
self.assertIsNone(result)
def test_set_port_fixed_ip_clear(self):
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'fixed_ips': [{'ip_address': '0.0.0.1'}]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
@@ -1412,7 +1424,7 @@ class TestSetPort(TestPort):
self.assertIsNone(result)
def test_set_port_overwrite_binding_profile(self):
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'binding_profile': {'lok_i': 'visi_on'}})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
@@ -1434,7 +1446,7 @@ class TestSetPort(TestPort):
self.assertIsNone(result)
def test_overwrite_mac_address(self):
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'mac_address': '11:22:33:44:55:66'})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
@@ -1578,7 +1590,7 @@ class TestSetPort(TestPort):
sg_2 = network_fakes.FakeSecurityGroup.create_one_security_group()
sg_3 = network_fakes.FakeSecurityGroup.create_one_security_group()
self.network.find_security_group = mock.Mock(side_effect=[sg_2, sg_3])
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'security_group_ids': [sg_1.id]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
@@ -1620,7 +1632,7 @@ class TestSetPort(TestPort):
def test_set_port_security_group_replace(self):
sg1 = network_fakes.FakeSecurityGroup.create_one_security_group()
sg2 = network_fakes.FakeSecurityGroup.create_one_security_group()
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'security_group_ids': [sg1.id]})
self.network.find_port = mock.Mock(return_value=_testport)
self.network.find_security_group = mock.Mock(return_value=sg2)
@@ -1662,7 +1674,7 @@ class TestSetPort(TestPort):
self.assertIsNone(result)
def test_set_port_append_allowed_address_pair(self):
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
@@ -1685,7 +1697,7 @@ class TestSetPort(TestPort):
self.assertIsNone(result)
def test_set_port_overwrite_allowed_address_pair(self):
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
@@ -1787,7 +1799,7 @@ class TestSetPort(TestPort):
def test_set_port_with_qos(self):
qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'qos_policy_id': None})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
@@ -1809,7 +1821,7 @@ class TestSetPort(TestPort):
self.assertIsNone(result)
def test_set_port_data_plane_status(self):
- _testport = network_fakes.FakePort.create_one_port(
+ _testport = network_fakes.create_one_port(
{'data_plane_status': None})
self.network.find_port = mock.Mock(return_value=_testport)
arglist = [
@@ -1900,7 +1912,7 @@ class TestSetPort(TestPort):
class TestShowPort(TestPort):
# The port to show.
- _port = network_fakes.FakePort.create_one_port()
+ _port = network_fakes.create_one_port()
columns, data = TestPort._get_common_cols_data(_port)
def setUp(self):
@@ -1932,7 +1944,7 @@ class TestShowPort(TestPort):
self.network.find_port.assert_called_once_with(
self._port.name, ignore_missing=False)
- self.assertEqual(self.columns, columns)
+ self.assertEqual(set(self.columns), set(columns))
self.assertCountEqual(self.data, data)
@@ -1940,7 +1952,7 @@ class TestUnsetPort(TestPort):
def setUp(self):
super(TestUnsetPort, self).setUp()
- self._testport = network_fakes.FakePort.create_one_port(
+ self._testport = network_fakes.create_one_port(
{'fixed_ips': [{'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152',
'ip_address': '0.0.0.1'},
{'subnet_id': '042eb10a-3a18-4658-ab-cf47c8d03152',
@@ -2024,7 +2036,7 @@ class TestUnsetPort(TestPort):
def test_unset_security_group(self):
_fake_sg1 = network_fakes.FakeSecurityGroup.create_one_security_group()
_fake_sg2 = network_fakes.FakeSecurityGroup.create_one_security_group()
- _fake_port = network_fakes.FakePort.create_one_port(
+ _fake_port = network_fakes.create_one_port(
{'security_group_ids': [_fake_sg1.id, _fake_sg2.id]})
self.network.find_port = mock.Mock(return_value=_fake_port)
self.network.find_security_group = mock.Mock(return_value=_fake_sg2)
@@ -2049,7 +2061,7 @@ class TestUnsetPort(TestPort):
def test_unset_port_security_group_not_existent(self):
_fake_sg1 = network_fakes.FakeSecurityGroup.create_one_security_group()
_fake_sg2 = network_fakes.FakeSecurityGroup.create_one_security_group()
- _fake_port = network_fakes.FakePort.create_one_port(
+ _fake_port = network_fakes.create_one_port(
{'security_group_ids': [_fake_sg1.id]})
self.network.find_security_group = mock.Mock(return_value=_fake_sg2)
arglist = [
@@ -2066,7 +2078,7 @@ class TestUnsetPort(TestPort):
parsed_args)
def test_unset_port_allowed_address_pair(self):
- _fake_port = network_fakes.FakePort.create_one_port(
+ _fake_port = network_fakes.create_one_port(
{'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]})
self.network.find_port = mock.Mock(return_value=_fake_port)
arglist = [
@@ -2088,7 +2100,7 @@ class TestUnsetPort(TestPort):
self.assertIsNone(result)
def test_unset_port_allowed_address_pair_not_existent(self):
- _fake_port = network_fakes.FakePort.create_one_port(
+ _fake_port = network_fakes.create_one_port(
{'allowed_address_pairs': [{'ip_address': '192.168.1.123'}]})
self.network.find_port = mock.Mock(return_value=_fake_port)
arglist = [
@@ -2105,7 +2117,7 @@ class TestUnsetPort(TestPort):
parsed_args)
def test_unset_port_data_plane_status(self):
- _fake_port = network_fakes.FakePort.create_one_port(
+ _fake_port = network_fakes.create_one_port(
{'data_plane_status': 'ACTIVE'})
self.network.find_port = mock.Mock(return_value=_fake_port)
arglist = [
@@ -2156,7 +2168,7 @@ class TestUnsetPort(TestPort):
self._test_unset_tags(with_tags=False)
def test_unset_numa_affinity_policy(self):
- _fake_port = network_fakes.FakePort.create_one_port(
+ _fake_port = network_fakes.create_one_port(
{'numa_affinity_policy': 'required'})
self.network.find_port = mock.Mock(return_value=_fake_port)
arglist = [
diff --git a/openstackclient/tests/unit/network/v2/test_router.py b/openstackclient/tests/unit/network/v2/test_router.py
index 14840e1d..fb9673cd 100644
--- a/openstackclient/tests/unit/network/v2/test_router.py
+++ b/openstackclient/tests/unit/network/v2/test_router.py
@@ -36,7 +36,7 @@ class TestRouter(network_fakes.TestNetworkV2):
class TestAddPortToRouter(TestRouter):
'''Add port to Router '''
- _port = network_fakes.FakePort.create_one_port()
+ _port = network_fakes.create_one_port()
_router = network_fakes.FakeRouter.create_one_router(
attrs={'port': _port.id})
@@ -512,8 +512,7 @@ class TestListRouter(TestRouter):
self.network.routers = mock.Mock(return_value=self.routers)
self.network.find_extension = mock.Mock(return_value=self._extensions)
self.network.find_router = mock.Mock(return_value=self.routers[0])
- self._testagent = \
- network_fakes.FakeNetworkAgent.create_one_network_agent()
+ self._testagent = network_fakes.create_one_network_agent()
self.network.get_agent = mock.Mock(return_value=self._testagent)
self.network.get_router = mock.Mock(return_value=self.routers[0])
@@ -739,7 +738,7 @@ class TestListRouter(TestRouter):
class TestRemovePortFromRouter(TestRouter):
'''Remove port from a Router '''
- _port = network_fakes.FakePort.create_one_port()
+ _port = network_fakes.create_one_port()
_router = network_fakes.FakeRouter.create_one_router(
attrs={'port': _port.id})
@@ -1364,7 +1363,7 @@ class TestShowRouter(TestRouter):
# The router to set.
_router = network_fakes.FakeRouter.create_one_router()
- _port = network_fakes.FakePort.create_one_port({
+ _port = network_fakes.create_one_port({
'device_owner': 'network:router_interface',
'device_id': _router.id
})
diff --git a/openstackclient/tests/unit/network/v2/test_subnet.py b/openstackclient/tests/unit/network/v2/test_subnet.py
index 6b3ab2cc..7aaa583d 100644
--- a/openstackclient/tests/unit/network/v2/test_subnet.py
+++ b/openstackclient/tests/unit/network/v2/test_subnet.py
@@ -918,7 +918,7 @@ class TestListSubnet(TestSubnet):
self.network.subnets.assert_called_once_with(**filters)
self.assertEqual(self.columns, columns)
- self.assertItemsEqual(self.data, list(data))
+ self.assertCountEqual(self.data, list(data))
def test_subnet_list_subnetpool_by_id(self):
subnet_pool = network_fakes.FakeSubnetPool.create_one_subnet_pool()
@@ -939,7 +939,7 @@ class TestListSubnet(TestSubnet):
self.network.subnets.assert_called_once_with(**filters)
self.assertEqual(self.columns, columns)
- self.assertItemsEqual(self.data, list(data))
+ self.assertCountEqual(self.data, list(data))
def test_list_with_tag_options(self):
arglist = [
diff --git a/openstackclient/tests/unit/volume/v1/fakes.py b/openstackclient/tests/unit/volume/v1/fakes.py
index 438a60ad..76b208b2 100644
--- a/openstackclient/tests/unit/volume/v1/fakes.py
+++ b/openstackclient/tests/unit/volume/v1/fakes.py
@@ -23,336 +23,7 @@ from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes
from openstackclient.tests.unit import utils
-class FakeTransfer(object):
- """Fake one or more Transfer."""
-
- @staticmethod
- def create_one_transfer(attrs=None):
- """Create a fake transfer.
-
- :param Dictionary attrs:
- A dictionary with all attributes of Transfer Request
- :return:
- A FakeResource object with volume_id, name, id.
- """
- # Set default attribute
- transfer_info = {
- 'volume_id': 'volume-id-' + uuid.uuid4().hex,
- 'name': 'fake_transfer_name',
- 'id': 'id-' + uuid.uuid4().hex,
- 'links': 'links-' + uuid.uuid4().hex,
- }
-
- # Overwrite default attributes if there are some attributes set
- attrs = attrs or {}
-
- transfer_info.update(attrs)
-
- transfer = fakes.FakeResource(
- None,
- transfer_info,
- loaded=True)
-
- return transfer
-
- @staticmethod
- def create_transfers(attrs=None, count=2):
- """Create multiple fake transfers.
-
- :param Dictionary attrs:
- A dictionary with all attributes of transfer
- :param Integer count:
- The number of transfers to be faked
- :return:
- A list of FakeResource objects
- """
- transfers = []
- for n in range(0, count):
- transfers.append(FakeTransfer.create_one_transfer(attrs))
-
- return transfers
-
- @staticmethod
- def get_transfers(transfers=None, count=2):
- """Get an iterable MagicMock object with a list of faked transfers.
-
- If transfers list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List transfers:
- A list of FakeResource objects faking transfers
- :param Integer count:
- The number of transfers to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- transfers
- """
- if transfers is None:
- transfers = FakeTransfer.create_transfers(count)
-
- return mock.Mock(side_effect=transfers)
-
-
-class FakeService(object):
- """Fake one or more Services."""
-
- @staticmethod
- def create_one_service(attrs=None):
- """Create a fake service.
-
- :param Dictionary attrs:
- A dictionary with all attributes of service
- :return:
- A FakeResource object with host, status, etc.
- """
- # Set default attribute
- service_info = {
- 'host': 'host_test',
- 'binary': 'cinder_test',
- 'status': 'enabled',
- 'disabled_reason': 'LongHoliday-GoldenWeek',
- 'zone': 'fake_zone',
- 'updated_at': 'fake_date',
- 'state': 'fake_state',
- }
-
- # Overwrite default attributes if there are some attributes set
- attrs = attrs or {}
-
- service_info.update(attrs)
-
- service = fakes.FakeResource(
- None,
- service_info,
- loaded=True)
-
- return service
-
- @staticmethod
- def create_services(attrs=None, count=2):
- """Create multiple fake services.
-
- :param Dictionary attrs:
- A dictionary with all attributes of service
- :param Integer count:
- The number of services to be faked
- :return:
- A list of FakeResource objects
- """
- services = []
- for n in range(0, count):
- services.append(FakeService.create_one_service(attrs))
-
- return services
-
- @staticmethod
- def get_services(services=None, count=2):
- """Get an iterable MagicMock object with a list of faked services.
-
- If services list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List services:
- A list of FakeResource objects faking services
- :param Integer count:
- The number of services to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- services
- """
- if services is None:
- services = FakeService.create_services(count)
-
- return mock.Mock(side_effect=services)
-
-
-class FakeQos(object):
- """Fake one or more Qos specification."""
-
- @staticmethod
- def create_one_qos(attrs=None):
- """Create a fake Qos specification.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, consumer, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- qos_info = {
- "id": 'qos-id-' + uuid.uuid4().hex,
- "name": 'qos-name-' + uuid.uuid4().hex,
- "consumer": 'front-end',
- "specs": {"foo": "bar", "iops": "9001"},
- }
-
- # Overwrite default attributes.
- qos_info.update(attrs)
-
- qos = fakes.FakeResource(
- info=copy.deepcopy(qos_info),
- loaded=True)
- return qos
-
- @staticmethod
- def create_one_qos_association(attrs=None):
- """Create a fake Qos specification association.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, association_type, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- qos_association_info = {
- "id": 'type-id-' + uuid.uuid4().hex,
- "name": 'type-name-' + uuid.uuid4().hex,
- "association_type": 'volume_type',
- }
-
- # Overwrite default attributes.
- qos_association_info.update(attrs)
-
- qos_association = fakes.FakeResource(
- info=copy.deepcopy(qos_association_info),
- loaded=True)
- return qos_association
-
- @staticmethod
- def create_qoses(attrs=None, count=2):
- """Create multiple fake Qos specifications.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :param int count:
- The number of Qos specifications to fake
- :return:
- A list of FakeResource objects faking the Qos specifications
- """
- qoses = []
- for i in range(0, count):
- qos = FakeQos.create_one_qos(attrs)
- qoses.append(qos)
-
- return qoses
-
- @staticmethod
- def get_qoses(qoses=None, count=2):
- """Get an iterable MagicMock object with a list of faked qoses.
-
- If qoses list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List volumes:
- A list of FakeResource objects faking qoses
- :param Integer count:
- The number of qoses to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- qoses
- """
- if qoses is None:
- qoses = FakeQos.create_qoses(count)
-
- return mock.Mock(side_effect=qoses)
-
-
-class FakeVolume(object):
- """Fake one or more volumes."""
-
- @staticmethod
- def create_one_volume(attrs=None):
- """Create a fake volume.
-
- :param Dictionary attrs:
- A dictionary with all attributes of volume
- :return:
- A FakeResource object with id, name, status, etc.
- """
- attrs = attrs or {}
-
- # Set default attribute
- volume_info = {
- 'id': 'volume-id' + uuid.uuid4().hex,
- 'display_name': 'volume-name' + uuid.uuid4().hex,
- 'display_description': 'description' + uuid.uuid4().hex,
- 'status': 'available',
- 'size': 10,
- 'volume_type':
- random.choice(['fake_lvmdriver-1', 'fake_lvmdriver-2']),
- 'bootable': 'true',
- 'metadata': {
- 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
- 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
- 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex},
- 'snapshot_id': 'snapshot-id-' + uuid.uuid4().hex,
- 'availability_zone': 'zone' + uuid.uuid4().hex,
- 'attachments': [{
- 'device': '/dev/' + uuid.uuid4().hex,
- 'server_id': uuid.uuid4().hex,
- }, ],
- 'created_at': 'time-' + uuid.uuid4().hex,
- }
-
- # Overwrite default attributes if there are some attributes set
- volume_info.update(attrs)
-
- volume = fakes.FakeResource(
- None,
- volume_info,
- loaded=True)
- return volume
-
- @staticmethod
- def create_volumes(attrs=None, count=2):
- """Create multiple fake volumes.
-
- :param Dictionary attrs:
- A dictionary with all attributes of volume
- :param Integer count:
- The number of volumes to be faked
- :return:
- A list of FakeResource objects
- """
- volumes = []
- for n in range(0, count):
- volumes.append(FakeVolume.create_one_volume(attrs))
-
- return volumes
-
- @staticmethod
- def get_volumes(volumes=None, count=2):
- """Get an iterable MagicMock object with a list of faked volumes.
-
- If volumes list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List volumes:
- A list of FakeResource objects faking volumes
- :param Integer count:
- The number of volumes to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- volumes
- """
- if volumes is None:
- volumes = FakeVolume.create_volumes(count)
-
- return mock.Mock(side_effect=volumes)
-
-
-class FakeImagev1Client(object):
-
- def __init__(self, **kwargs):
- self.images = mock.Mock()
-
-
-class FakeVolumev1Client(object):
-
+class FakeVolumev1Client:
def __init__(self, **kwargs):
self.volumes = mock.Mock()
self.volumes.resource_class = fakes.FakeResource(None, {})
@@ -365,8 +36,9 @@ class FakeVolumev1Client(object):
self.volume_types = mock.Mock()
self.volume_types.resource_class = fakes.FakeResource(None, {})
self.volume_encryption_types = mock.Mock()
- self.volume_encryption_types.resource_class = (
- fakes.FakeResource(None, {}))
+ self.volume_encryption_types.resource_class = fakes.FakeResource(
+ None, {}
+ )
self.transfers = mock.Mock()
self.transfers.resource_class = fakes.FakeResource(None, {})
self.volume_snapshots = mock.Mock()
@@ -380,9 +52,8 @@ class FakeVolumev1Client(object):
class TestVolumev1(utils.TestCommand):
-
def setUp(self):
- super(TestVolumev1, self).setUp()
+ super().setUp()
self.app.client_manager.volume = FakeVolumev1Client(
endpoint=fakes.AUTH_URL,
@@ -394,261 +65,547 @@ class TestVolumev1(utils.TestCommand):
token=fakes.AUTH_TOKEN,
)
- self.app.client_manager.image = FakeImagev1Client(
+ # avoid circular imports
+ from openstackclient.tests.unit.image.v1 import fakes as image_fakes
+
+ self.app.client_manager.image = image_fakes.FakeImagev1Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
-class FakeVolumeType(object):
- """Fake one or more type."""
-
- @staticmethod
- def create_one_volume_type(attrs=None, methods=None):
- """Create a fake volume type.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :param Dictionary methods:
- A dictionary with all methods
- :return:
- A FakeResource object with id, name, description, etc.
- """
- attrs = attrs or {}
- methods = methods or {}
-
- # Set default attributes.
- volume_type_info = {
- "id": 'type-id-' + uuid.uuid4().hex,
- "name": 'type-name-' + uuid.uuid4().hex,
- "description": 'type-description-' + uuid.uuid4().hex,
- "extra_specs": {"foo": "bar"},
- "is_public": True,
- }
-
- # Overwrite default attributes.
- volume_type_info.update(attrs)
-
- volume_type = fakes.FakeResource(
- info=copy.deepcopy(volume_type_info),
- methods=methods,
- loaded=True)
- return volume_type
-
- @staticmethod
- def create_volume_types(attrs=None, count=2):
- """Create multiple fake types.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :param int count:
- The number of types to fake
- :return:
- A list of FakeResource objects faking the types
- """
- volume_types = []
- for i in range(0, count):
- volume_type = FakeVolumeType.create_one_volume_type(attrs)
- volume_types.append(volume_type)
-
- return volume_types
-
- @staticmethod
- def get_volume_types(volume_types=None, count=2):
- """Get an iterable MagicMock object with a list of faked types.
-
- If types list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List volume_types:
- A list of FakeResource objects faking types
- :param Integer count:
- The number of types to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- types
- """
- if volume_types is None:
- volume_types = FakeVolumeType.create_volume_types(count)
-
- return mock.Mock(side_effect=volume_types)
-
- @staticmethod
- def create_one_encryption_volume_type(attrs=None):
- """Create a fake encryption volume type.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with volume_type_id etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- encryption_info = {
- "volume_type_id": 'type-id-' + uuid.uuid4().hex,
- 'provider': 'LuksEncryptor',
- 'cipher': None,
- 'key_size': None,
- 'control_location': 'front-end',
- }
-
- # Overwrite default attributes.
- encryption_info.update(attrs)
-
- encryption_type = fakes.FakeResource(
- info=copy.deepcopy(encryption_info),
- loaded=True)
- return encryption_type
-
-
-class FakeSnapshot(object):
- """Fake one or more snapshot."""
-
- @staticmethod
- def create_one_snapshot(attrs=None):
- """Create a fake snapshot.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, description, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- snapshot_info = {
- "id": 'snapshot-id-' + uuid.uuid4().hex,
- "display_name": 'snapshot-name-' + uuid.uuid4().hex,
- "display_description": 'snapshot-description-' + uuid.uuid4().hex,
- "size": 10,
- "status": "available",
- "metadata": {"foo": "bar"},
- "created_at": "2015-06-03T18:49:19.000000",
- "volume_id": 'vloume-id-' + uuid.uuid4().hex,
- }
-
- # Overwrite default attributes.
- snapshot_info.update(attrs)
-
- snapshot_method = {'update': None}
-
- snapshot = fakes.FakeResource(
- info=copy.deepcopy(snapshot_info),
- methods=copy.deepcopy(snapshot_method),
- loaded=True)
- return snapshot
-
- @staticmethod
- def create_snapshots(attrs=None, count=2):
- """Create multiple fake snapshots.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :param int count:
- The number of snapshots to fake
- :return:
- A list of FakeResource objects faking the snapshots
- """
- snapshots = []
- for i in range(0, count):
- snapshot = FakeSnapshot.create_one_snapshot(attrs)
- snapshots.append(snapshot)
-
- return snapshots
-
- @staticmethod
- def get_snapshots(snapshots=None, count=2):
- """Get an iterable MagicMock object with a list of faked snapshots.
-
- If snapshots list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List volumes:
- A list of FakeResource objects faking snapshots
- :param Integer count:
- The number of snapshots to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- snapshots
- """
- if snapshots is None:
- snapshots = FakeSnapshot.create_snapshots(count)
-
- return mock.Mock(side_effect=snapshots)
-
-
-class FakeBackup(object):
- """Fake one or more backup."""
-
- @staticmethod
- def create_one_backup(attrs=None):
- """Create a fake backup.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, volume_id, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- backup_info = {
- "id": 'backup-id-' + uuid.uuid4().hex,
- "name": 'backup-name-' + uuid.uuid4().hex,
- "volume_id": 'volume-id-' + uuid.uuid4().hex,
- "snapshot_id": 'snapshot-id' + uuid.uuid4().hex,
- "description": 'description-' + uuid.uuid4().hex,
- "object_count": None,
- "container": 'container-' + uuid.uuid4().hex,
- "size": random.randint(1, 20),
- "status": "error",
- "availability_zone": 'zone' + uuid.uuid4().hex,
- "links": 'links-' + uuid.uuid4().hex,
- }
-
- # Overwrite default attributes.
- backup_info.update(attrs)
-
- backup = fakes.FakeResource(
- info=copy.deepcopy(backup_info),
- loaded=True)
- return backup
-
- @staticmethod
- def create_backups(attrs=None, count=2):
- """Create multiple fake backups.
-
- :param Dictionary attrs:
- A dictionary with all attributes
- :param int count:
- The number of backups to fake
- :return:
- A list of FakeResource objects faking the backups
- """
- backups = []
- for i in range(0, count):
- backup = FakeBackup.create_one_backup(attrs)
- backups.append(backup)
-
- return backups
-
- @staticmethod
- def get_backups(backups=None, count=2):
- """Get an iterable MagicMock object with a list of faked backups.
-
- If backups list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List volumes:
- A list of FakeResource objects faking backups
- :param Integer count:
- The number of backups to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- backups
- """
- if backups is None:
- backups = FakeBackup.create_backups(count)
-
- return mock.Mock(side_effect=backups)
+def create_one_transfer(attrs=None):
+ """Create a fake transfer.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes of Transfer Request
+ :return:
+ A FakeResource object with volume_id, name, id.
+ """
+ # Set default attribute
+ transfer_info = {
+ 'volume_id': 'volume-id-' + uuid.uuid4().hex,
+ 'name': 'fake_transfer_name',
+ 'id': 'id-' + uuid.uuid4().hex,
+ 'links': 'links-' + uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
+
+ transfer_info.update(attrs)
+
+ transfer = fakes.FakeResource(None, transfer_info, loaded=True)
+
+ return transfer
+
+
+def create_transfers(attrs=None, count=2):
+ """Create multiple fake transfers.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes of transfer
+ :param Integer count:
+ The number of transfers to be faked
+ :return:
+ A list of FakeResource objects
+ """
+ transfers = []
+ for n in range(0, count):
+ transfers.append(create_one_transfer(attrs))
+
+ return transfers
+
+
+def get_transfers(transfers=None, count=2):
+ """Get an iterable MagicMock object with a list of faked transfers.
+
+ If transfers list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List transfers:
+ A list of FakeResource objects faking transfers
+ :param Integer count:
+ The number of transfers to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ transfers
+ """
+ if transfers is None:
+ transfers = create_transfers(count)
+
+ return mock.Mock(side_effect=transfers)
+
+
+def create_one_service(attrs=None):
+ """Create a fake service.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes of service
+ :return:
+ A FakeResource object with host, status, etc.
+ """
+ # Set default attribute
+ service_info = {
+ 'host': 'host_test',
+ 'binary': 'cinder_test',
+ 'status': 'enabled',
+ 'disabled_reason': 'LongHoliday-GoldenWeek',
+ 'zone': 'fake_zone',
+ 'updated_at': 'fake_date',
+ 'state': 'fake_state',
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
+
+ service_info.update(attrs)
+
+ service = fakes.FakeResource(None, service_info, loaded=True)
+
+ return service
+
+
+def create_services(attrs=None, count=2):
+ """Create multiple fake services.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes of service
+ :param Integer count:
+ The number of services to be faked
+ :return:
+ A list of FakeResource objects
+ """
+ services = []
+ for n in range(0, count):
+ services.append(create_one_service(attrs))
+
+ return services
+
+
+def get_services(services=None, count=2):
+ """Get an iterable MagicMock object with a list of faked services.
+
+ If services list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List services:
+ A list of FakeResource objects faking services
+ :param Integer count:
+ The number of services to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ services
+ """
+ if services is None:
+ services = create_services(count)
+
+ return mock.Mock(side_effect=services)
+
+
+def create_one_qos(attrs=None):
+ """Create a fake Qos specification.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, consumer, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ qos_info = {
+ "id": 'qos-id-' + uuid.uuid4().hex,
+ "name": 'qos-name-' + uuid.uuid4().hex,
+ "consumer": 'front-end',
+ "specs": {"foo": "bar", "iops": "9001"},
+ }
+
+ # Overwrite default attributes.
+ qos_info.update(attrs)
+
+ qos = fakes.FakeResource(info=copy.deepcopy(qos_info), loaded=True)
+ return qos
+
+
+def create_one_qos_association(attrs=None):
+ """Create a fake Qos specification association.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, association_type, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ qos_association_info = {
+ "id": 'type-id-' + uuid.uuid4().hex,
+ "name": 'type-name-' + uuid.uuid4().hex,
+ "association_type": 'volume_type',
+ }
+
+ # Overwrite default attributes.
+ qos_association_info.update(attrs)
+
+ qos_association = fakes.FakeResource(
+ info=copy.deepcopy(qos_association_info), loaded=True
+ )
+ return qos_association
+
+
+def create_qoses(attrs=None, count=2):
+ """Create multiple fake Qos specifications.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of Qos specifications to fake
+ :return:
+ A list of FakeResource objects faking the Qos specifications
+ """
+ qoses = []
+ for i in range(0, count):
+ qos = create_one_qos(attrs)
+ qoses.append(qos)
+
+ return qoses
+
+
+def get_qoses(qoses=None, count=2):
+ """Get an iterable MagicMock object with a list of faked qoses.
+
+ If qoses list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List volumes:
+ A list of FakeResource objects faking qoses
+ :param Integer count:
+ The number of qoses to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ qoses
+ """
+ if qoses is None:
+ qoses = create_qoses(count)
+
+ return mock.Mock(side_effect=qoses)
+
+
+def create_one_volume(attrs=None):
+ """Create a fake volume.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes of volume
+ :return:
+ A FakeResource object with id, name, status, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attribute
+ volume_info = {
+ 'id': 'volume-id' + uuid.uuid4().hex,
+ 'display_name': 'volume-name' + uuid.uuid4().hex,
+ 'display_description': 'description' + uuid.uuid4().hex,
+ 'status': 'available',
+ 'size': 10,
+ 'volume_type': random.choice(['fake_lvmdriver-1', 'fake_lvmdriver-2']),
+ 'bootable': 'true',
+ 'metadata': {
+ 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
+ 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
+ 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
+ },
+ 'snapshot_id': 'snapshot-id-' + uuid.uuid4().hex,
+ 'availability_zone': 'zone' + uuid.uuid4().hex,
+ 'attachments': [
+ {
+ 'device': '/dev/' + uuid.uuid4().hex,
+ 'server_id': uuid.uuid4().hex,
+ },
+ ],
+ 'created_at': 'time-' + uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ volume_info.update(attrs)
+
+ volume = fakes.FakeResource(None, volume_info, loaded=True)
+ return volume
+
+
+def create_volumes(attrs=None, count=2):
+ """Create multiple fake volumes.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes of volume
+ :param Integer count:
+ The number of volumes to be faked
+ :return:
+ A list of FakeResource objects
+ """
+ volumes = []
+ for n in range(0, count):
+ volumes.append(create_one_volume(attrs))
+
+ return volumes
+
+
+def get_volumes(volumes=None, count=2):
+ """Get an iterable MagicMock object with a list of faked volumes.
+
+ If volumes list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List volumes:
+ A list of FakeResource objects faking volumes
+ :param Integer count:
+ The number of volumes to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ volumes
+ """
+ if volumes is None:
+ volumes = create_volumes(count)
+
+ return mock.Mock(side_effect=volumes)
+
+
+def create_one_volume_type(attrs=None, methods=None):
+ """Create a fake volume type.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param Dictionary methods:
+ A dictionary with all methods
+ :return:
+ A FakeResource object with id, name, description, etc.
+ """
+ attrs = attrs or {}
+ methods = methods or {}
+
+ # Set default attributes.
+ volume_type_info = {
+ "id": 'type-id-' + uuid.uuid4().hex,
+ "name": 'type-name-' + uuid.uuid4().hex,
+ "description": 'type-description-' + uuid.uuid4().hex,
+ "extra_specs": {"foo": "bar"},
+ "is_public": True,
+ }
+
+ # Overwrite default attributes.
+ volume_type_info.update(attrs)
+
+ volume_type = fakes.FakeResource(
+ info=copy.deepcopy(volume_type_info), methods=methods, loaded=True
+ )
+ return volume_type
+
+
+def create_volume_types(attrs=None, count=2):
+ """Create multiple fake types.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of types to fake
+ :return:
+ A list of FakeResource objects faking the types
+ """
+ volume_types = []
+ for i in range(0, count):
+ volume_type = create_one_volume_type(attrs)
+ volume_types.append(volume_type)
+
+ return volume_types
+
+
+def get_volume_types(volume_types=None, count=2):
+ """Get an iterable MagicMock object with a list of faked types.
+
+ If types list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List volume_types:
+ A list of FakeResource objects faking types
+ :param Integer count:
+ The number of types to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ types
+ """
+ if volume_types is None:
+ volume_types = create_volume_types(count)
+
+ return mock.Mock(side_effect=volume_types)
+
+
+def create_one_encryption_volume_type(attrs=None):
+ """Create a fake encryption volume type.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with volume_type_id etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ encryption_info = {
+ "volume_type_id": 'type-id-' + uuid.uuid4().hex,
+ 'provider': 'LuksEncryptor',
+ 'cipher': None,
+ 'key_size': None,
+ 'control_location': 'front-end',
+ }
+
+ # Overwrite default attributes.
+ encryption_info.update(attrs)
+
+ encryption_type = fakes.FakeResource(
+ info=copy.deepcopy(encryption_info), loaded=True
+ )
+ return encryption_type
+
+
+def create_one_snapshot(attrs=None):
+ """Create a fake snapshot.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, description, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ snapshot_info = {
+ "id": 'snapshot-id-' + uuid.uuid4().hex,
+ "display_name": 'snapshot-name-' + uuid.uuid4().hex,
+ "display_description": 'snapshot-description-' + uuid.uuid4().hex,
+ "size": 10,
+ "status": "available",
+ "metadata": {"foo": "bar"},
+ "created_at": "2015-06-03T18:49:19.000000",
+ "volume_id": 'vloume-id-' + uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes.
+ snapshot_info.update(attrs)
+
+ snapshot_method = {'update': None}
+
+ snapshot = fakes.FakeResource(
+ info=copy.deepcopy(snapshot_info),
+ methods=copy.deepcopy(snapshot_method),
+ loaded=True,
+ )
+ return snapshot
+
+
+def create_snapshots(attrs=None, count=2):
+ """Create multiple fake snapshots.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of snapshots to fake
+ :return:
+ A list of FakeResource objects faking the snapshots
+ """
+ snapshots = []
+ for i in range(0, count):
+ snapshot = create_one_snapshot(attrs)
+ snapshots.append(snapshot)
+
+ return snapshots
+
+
+def get_snapshots(snapshots=None, count=2):
+ """Get an iterable MagicMock object with a list of faked snapshots.
+
+ If snapshots list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List volumes:
+ A list of FakeResource objects faking snapshots
+ :param Integer count:
+ The number of snapshots to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ snapshots
+ """
+ if snapshots is None:
+ snapshots = create_snapshots(count)
+
+ return mock.Mock(side_effect=snapshots)
+
+
+def create_one_backup(attrs=None):
+ """Create a fake backup.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, volume_id, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ backup_info = {
+ "id": 'backup-id-' + uuid.uuid4().hex,
+ "name": 'backup-name-' + uuid.uuid4().hex,
+ "volume_id": 'volume-id-' + uuid.uuid4().hex,
+ "snapshot_id": 'snapshot-id' + uuid.uuid4().hex,
+ "description": 'description-' + uuid.uuid4().hex,
+ "object_count": None,
+ "container": 'container-' + uuid.uuid4().hex,
+ "size": random.randint(1, 20),
+ "status": "error",
+ "availability_zone": 'zone' + uuid.uuid4().hex,
+ "links": 'links-' + uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes.
+ backup_info.update(attrs)
+
+ backup = fakes.FakeResource(info=copy.deepcopy(backup_info), loaded=True)
+ return backup
+
+
+def create_backups(attrs=None, count=2):
+ """Create multiple fake backups.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of backups to fake
+ :return:
+ A list of FakeResource objects faking the backups
+ """
+ backups = []
+ for i in range(0, count):
+ backup = create_one_backup(attrs)
+ backups.append(backup)
+
+ return backups
+
+
+def get_backups(backups=None, count=2):
+ """Get an iterable MagicMock object with a list of faked backups.
+
+ If backups list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List volumes:
+ A list of FakeResource objects faking backups
+ :param Integer count:
+ The number of backups to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ backups
+ """
+ if backups is None:
+ backups = create_backups(count)
+
+ return mock.Mock(side_effect=backups)
diff --git a/openstackclient/tests/unit/volume/v1/test_qos_specs.py b/openstackclient/tests/unit/volume/v1/test_qos_specs.py
index d66a8558..f5b35143 100644
--- a/openstackclient/tests/unit/volume/v1/test_qos_specs.py
+++ b/openstackclient/tests/unit/volume/v1/test_qos_specs.py
@@ -28,7 +28,7 @@ from openstackclient.volume.v1 import qos_specs
class TestQos(volume_fakes.TestVolumev1):
def setUp(self):
- super(TestQos, self).setUp()
+ super().setUp()
self.qos_mock = self.app.client_manager.volume.qos_specs
self.qos_mock.reset_mock()
@@ -39,11 +39,11 @@ class TestQos(volume_fakes.TestVolumev1):
class TestQosAssociate(TestQos):
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
- qos_spec = volume_fakes.FakeQos.create_one_qos()
+ volume_type = volume_fakes.create_one_volume_type()
+ qos_spec = volume_fakes.create_one_qos()
def setUp(self):
- super(TestQosAssociate, self).setUp()
+ super().setUp()
self.qos_mock.get.return_value = self.qos_spec
self.types_mock.get.return_value = self.volume_type
@@ -80,8 +80,8 @@ class TestQosCreate(TestQos):
)
def setUp(self):
- super(TestQosCreate, self).setUp()
- self.new_qos_spec = volume_fakes.FakeQos.create_one_qos()
+ super().setUp()
+ self.new_qos_spec = volume_fakes.create_one_qos()
self.datalist = (
self.new_qos_spec.consumer,
self.new_qos_spec.id,
@@ -160,13 +160,13 @@ class TestQosCreate(TestQos):
class TestQosDelete(TestQos):
- qos_specs = volume_fakes.FakeQos.create_qoses(count=2)
+ qos_specs = volume_fakes.create_qoses(count=2)
def setUp(self):
- super(TestQosDelete, self).setUp()
+ super().setUp()
self.qos_mock.get = (
- volume_fakes.FakeQos.get_qoses(self.qos_specs))
+ volume_fakes.get_qoses(self.qos_specs))
# Get the command object to test
self.cmd = qos_specs.DeleteQos(self.app, None)
@@ -263,11 +263,11 @@ class TestQosDelete(TestQos):
class TestQosDisassociate(TestQos):
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
- qos_spec = volume_fakes.FakeQos.create_one_qos()
+ volume_type = volume_fakes.create_one_volume_type()
+ qos_spec = volume_fakes.create_one_qos()
def setUp(self):
- super(TestQosDisassociate, self).setUp()
+ super().setUp()
self.qos_mock.get.return_value = self.qos_spec
self.types_mock.get.return_value = self.volume_type
@@ -311,8 +311,8 @@ class TestQosDisassociate(TestQos):
class TestQosList(TestQos):
- qos_specs = volume_fakes.FakeQos.create_qoses(count=2)
- qos_association = volume_fakes.FakeQos.create_one_qos_association()
+ qos_specs = volume_fakes.create_qoses(count=2)
+ qos_association = volume_fakes.create_one_qos_association()
columns = (
'ID',
@@ -332,7 +332,7 @@ class TestQosList(TestQos):
))
def setUp(self):
- super(TestQosList, self).setUp()
+ super().setUp()
self.qos_mock.list.return_value = self.qos_specs
self.qos_mock.get_associations.return_value = [self.qos_association]
@@ -382,10 +382,10 @@ class TestQosList(TestQos):
class TestQosSet(TestQos):
- qos_spec = volume_fakes.FakeQos.create_one_qos()
+ qos_spec = volume_fakes.create_one_qos()
def setUp(self):
- super(TestQosSet, self).setUp()
+ super().setUp()
self.qos_mock.get.return_value = self.qos_spec
# Get the command object to test
@@ -414,11 +414,11 @@ class TestQosSet(TestQos):
class TestQosShow(TestQos):
- qos_spec = volume_fakes.FakeQos.create_one_qos()
- qos_association = volume_fakes.FakeQos.create_one_qos_association()
+ qos_spec = volume_fakes.create_one_qos()
+ qos_association = volume_fakes.create_one_qos_association()
def setUp(self):
- super(TestQosShow, self).setUp()
+ super().setUp()
self.qos_mock.get.return_value = self.qos_spec
self.qos_mock.get_associations.return_value = [self.qos_association]
# Get the command object to test
@@ -459,10 +459,10 @@ class TestQosShow(TestQos):
class TestQosUnset(TestQos):
- qos_spec = volume_fakes.FakeQos.create_one_qos()
+ qos_spec = volume_fakes.create_one_qos()
def setUp(self):
- super(TestQosUnset, self).setUp()
+ super().setUp()
self.qos_mock.get.return_value = self.qos_spec
# Get the command object to test
diff --git a/openstackclient/tests/unit/volume/v1/test_service.py b/openstackclient/tests/unit/volume/v1/test_service.py
index 82d21bfc..a199c913 100644
--- a/openstackclient/tests/unit/volume/v1/test_service.py
+++ b/openstackclient/tests/unit/volume/v1/test_service.py
@@ -14,14 +14,14 @@
from osc_lib import exceptions
-from openstackclient.tests.unit.volume.v1 import fakes as service_fakes
+from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes
from openstackclient.volume.v1 import service
-class TestService(service_fakes.TestVolumev1):
+class TestService(volume_fakes.TestVolumev1):
def setUp(self):
- super(TestService, self).setUp()
+ super().setUp()
# Get a shortcut to the ServiceManager Mock
self.service_mock = self.app.client_manager.volume.services
@@ -31,10 +31,10 @@ class TestService(service_fakes.TestVolumev1):
class TestServiceList(TestService):
# The service to be listed
- services = service_fakes.FakeService.create_one_service()
+ services = volume_fakes.create_one_service()
def setUp(self):
- super(TestServiceList, self).setUp()
+ super().setUp()
self.service_mock.list.return_value = [self.services]
@@ -144,10 +144,10 @@ class TestServiceList(TestService):
class TestServiceSet(TestService):
- service = service_fakes.FakeService.create_one_service()
+ service = volume_fakes.create_one_service()
def setUp(self):
- super(TestServiceSet, self).setUp()
+ super().setUp()
self.service_mock.enable.return_value = self.service
self.service_mock.disable.return_value = self.service
diff --git a/openstackclient/tests/unit/volume/v1/test_transfer_request.py b/openstackclient/tests/unit/volume/v1/test_transfer_request.py
index 333bf526..97700fbb 100644
--- a/openstackclient/tests/unit/volume/v1/test_transfer_request.py
+++ b/openstackclient/tests/unit/volume/v1/test_transfer_request.py
@@ -18,14 +18,14 @@ from unittest.mock import call
from osc_lib import exceptions
from osc_lib import utils
-from openstackclient.tests.unit.volume.v1 import fakes as transfer_fakes
+from openstackclient.tests.unit.volume.v1 import fakes as volume_fakes
from openstackclient.volume.v1 import volume_transfer_request
-class TestTransfer(transfer_fakes.TestVolumev1):
+class TestTransfer(volume_fakes.TestVolumev1):
def setUp(self):
- super(TestTransfer, self).setUp()
+ super().setUp()
# Get a shortcut to the TransferManager Mock
self.transfer_mock = self.app.client_manager.volume.transfers
@@ -45,10 +45,9 @@ class TestTransferAccept(TestTransfer):
)
def setUp(self):
- super(TestTransferAccept, self).setUp()
+ super().setUp()
- self.volume_transfer = (
- transfer_fakes.FakeTransfer.create_one_transfer())
+ self.volume_transfer = volume_fakes.create_one_transfer()
self.data = (
self.volume_transfer.id,
self.volume_transfer.name,
@@ -103,7 +102,7 @@ class TestTransferAccept(TestTransfer):
class TestTransferCreate(TestTransfer):
- volume = transfer_fakes.FakeVolume.create_one_volume()
+ volume = volume_fakes.create_one_volume()
columns = (
'auth_key',
@@ -114,12 +113,14 @@ class TestTransferCreate(TestTransfer):
)
def setUp(self):
- super(TestTransferCreate, self).setUp()
-
- self.volume_transfer = transfer_fakes.FakeTransfer.create_one_transfer(
- attrs={'volume_id': self.volume.id,
- 'auth_key': 'key',
- 'created_at': 'time'}
+ super().setUp()
+
+ self.volume_transfer = volume_fakes.create_one_transfer(
+ attrs={
+ 'volume_id': self.volume.id,
+ 'auth_key': 'key',
+ 'created_at': 'time',
+ },
)
self.data = (
self.volume_transfer.auth_key,
@@ -173,13 +174,14 @@ class TestTransferCreate(TestTransfer):
class TestTransferDelete(TestTransfer):
- volume_transfers = transfer_fakes.FakeTransfer.create_transfers(count=2)
+ volume_transfers = volume_fakes.create_transfers(count=2)
def setUp(self):
- super(TestTransferDelete, self).setUp()
+ super().setUp()
- self.transfer_mock.get = (
- transfer_fakes.FakeTransfer.get_transfers(self.volume_transfers))
+ self.transfer_mock.get = volume_fakes.get_transfers(
+ self.volume_transfers,
+ )
self.transfer_mock.delete.return_value = None
# Get the command object to mock
@@ -252,10 +254,10 @@ class TestTransferDelete(TestTransfer):
class TestTransferList(TestTransfer):
# The Transfers to be listed
- volume_transfers = transfer_fakes.FakeTransfer.create_one_transfer()
+ volume_transfers = volume_fakes.create_one_transfer()
def setUp(self):
- super(TestTransferList, self).setUp()
+ super().setUp()
self.transfer_mock.list.return_value = [self.volume_transfers]
@@ -346,11 +348,10 @@ class TestTransferShow(TestTransfer):
)
def setUp(self):
- super(TestTransferShow, self).setUp()
+ super().setUp()
- self.volume_transfer = (
- transfer_fakes.FakeTransfer.create_one_transfer(
- attrs={'created_at': 'time'})
+ self.volume_transfer = volume_fakes.create_one_transfer(
+ attrs={'created_at': 'time'}
)
self.data = (
self.volume_transfer.created_at,
diff --git a/openstackclient/tests/unit/volume/v1/test_type.py b/openstackclient/tests/unit/volume/v1/test_type.py
index ca74c3e6..c8788249 100644
--- a/openstackclient/tests/unit/volume/v1/test_type.py
+++ b/openstackclient/tests/unit/volume/v1/test_type.py
@@ -27,7 +27,7 @@ from openstackclient.volume.v1 import volume_type
class TestType(volume_fakes.TestVolumev1):
def setUp(self):
- super(TestType, self).setUp()
+ super().setUp()
self.types_mock = self.app.client_manager.volume.volume_types
self.types_mock.reset_mock()
@@ -47,11 +47,11 @@ class TestTypeCreate(TestType):
)
def setUp(self):
- super(TestTypeCreate, self).setUp()
+ super().setUp()
- self.new_volume_type = \
- volume_fakes.FakeVolumeType.create_one_volume_type(
- methods={'set_keys': {'myprop': 'myvalue'}})
+ self.new_volume_type = volume_fakes.create_one_volume_type(
+ methods={'set_keys': {'myprop': 'myvalue'}},
+ )
self.data = (
self.new_volume_type.description,
self.new_volume_type.id,
@@ -87,12 +87,12 @@ class TestTypeCreate(TestType):
'key_size': '128',
'control_location': 'front-end',
}
- encryption_type = \
- volume_fakes.FakeVolumeType.create_one_encryption_volume_type(
- attrs=encryption_info)
- self.new_volume_type = \
- volume_fakes.FakeVolumeType.create_one_volume_type(
- attrs={'encryption': encryption_info})
+ encryption_type = volume_fakes.create_one_encryption_volume_type(
+ attrs=encryption_info,
+ )
+ self.new_volume_type = volume_fakes.create_one_volume_type(
+ attrs={'encryption': encryption_info},
+ )
self.types_mock.create.return_value = self.new_volume_type
self.encryption_types_mock.create.return_value = encryption_type
encryption_columns = (
@@ -145,13 +145,12 @@ class TestTypeCreate(TestType):
class TestTypeDelete(TestType):
- volume_types = volume_fakes.FakeVolumeType.create_volume_types(count=2)
+ volume_types = volume_fakes.create_volume_types(count=2)
def setUp(self):
- super(TestTypeDelete, self).setUp()
+ super().setUp()
- self.types_mock.get = volume_fakes.FakeVolumeType.get_volume_types(
- self.volume_types)
+ self.types_mock.get = volume_fakes.get_volume_types(self.volume_types)
self.types_mock.delete.return_value = None
# Get the command object to mock
@@ -221,7 +220,7 @@ class TestTypeDelete(TestType):
class TestTypeList(TestType):
- volume_types = volume_fakes.FakeVolumeType.create_volume_types()
+ volume_types = volume_fakes.create_volume_types()
columns = [
"ID",
@@ -252,7 +251,7 @@ class TestTypeList(TestType):
))
def setUp(self):
- super(TestTypeList, self).setUp()
+ super().setUp()
self.types_mock.list.return_value = self.volume_types
self.encryption_types_mock.create.return_value = None
@@ -288,9 +287,9 @@ class TestTypeList(TestType):
self.assertCountEqual(self.data_long, list(data))
def test_type_list_with_encryption(self):
- encryption_type = \
- volume_fakes.FakeVolumeType.create_one_encryption_volume_type(
- attrs={'volume_type_id': self.volume_types[0].id})
+ encryption_type = volume_fakes.create_one_encryption_volume_type(
+ attrs={'volume_type_id': self.volume_types[0].id},
+ )
encryption_info = {
'provider': 'LuksEncryptor',
'cipher': None,
@@ -335,11 +334,12 @@ class TestTypeList(TestType):
class TestTypeSet(TestType):
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type(
- methods={'set_keys': None})
+ volume_type = volume_fakes.create_one_volume_type(
+ methods={'set_keys': None},
+ )
def setUp(self):
- super(TestTypeSet, self).setUp()
+ super().setUp()
self.types_mock.get.return_value = self.volume_type
@@ -441,9 +441,9 @@ class TestTypeShow(TestType):
)
def setUp(self):
- super(TestTypeShow, self).setUp()
+ super().setUp()
- self.volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
+ self.volume_type = volume_fakes.create_one_volume_type()
self.data = (
self.volume_type.description,
self.volume_type.id,
@@ -474,16 +474,16 @@ class TestTypeShow(TestType):
self.assertCountEqual(self.data, data)
def test_type_show_with_encryption(self):
- encryption_type = \
- volume_fakes.FakeVolumeType.create_one_encryption_volume_type()
+ encryption_type = volume_fakes.create_one_encryption_volume_type()
encryption_info = {
'provider': 'LuksEncryptor',
'cipher': None,
'key_size': None,
'control_location': 'front-end',
}
- self.volume_type = volume_fakes.FakeVolumeType.create_one_volume_type(
- attrs={'encryption': encryption_info})
+ self.volume_type = volume_fakes.create_one_volume_type(
+ attrs={'encryption': encryption_info},
+ )
self.types_mock.get.return_value = self.volume_type
self.encryption_types_mock.get.return_value = encryption_type
encryption_columns = (
@@ -521,11 +521,12 @@ class TestTypeShow(TestType):
class TestTypeUnset(TestType):
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type(
- methods={'unset_keys': None})
+ volume_type = volume_fakes.create_one_volume_type(
+ methods={'unset_keys': None},
+ )
def setUp(self):
- super(TestTypeUnset, self).setUp()
+ super().setUp()
self.types_mock.get.return_value = self.volume_type
@@ -599,7 +600,7 @@ class TestTypeUnset(TestType):
class TestColumns(TestType):
def test_encryption_info_column_with_info(self):
- fake_volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
+ fake_volume_type = volume_fakes.create_one_volume_type()
type_id = fake_volume_type.id
encryption_info = {
@@ -615,7 +616,7 @@ class TestColumns(TestType):
self.assertEqual(encryption_info, col.machine_readable())
def test_encryption_info_column_without_info(self):
- fake_volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
+ fake_volume_type = volume_fakes.create_one_volume_type()
type_id = fake_volume_type.id
col = volume_type.EncryptionInfoColumn(type_id, {})
diff --git a/openstackclient/tests/unit/volume/v1/test_volume.py b/openstackclient/tests/unit/volume/v1/test_volume.py
index 584eca2a..9f16b398 100644
--- a/openstackclient/tests/unit/volume/v1/test_volume.py
+++ b/openstackclient/tests/unit/volume/v1/test_volume.py
@@ -31,7 +31,7 @@ from openstackclient.volume.v1 import volume
class TestVolume(volume_fakes.TestVolumev1):
def setUp(self):
- super(TestVolume, self).setUp()
+ super().setUp()
# Get a shortcut to the VolumeManager Mock
self.volumes_mock = self.app.client_manager.volume.volumes
@@ -50,11 +50,9 @@ class TestVolume(volume_fakes.TestVolumev1):
self.images_mock.reset_mock()
def setup_volumes_mock(self, count):
- volumes = volume_fakes.FakeVolume.create_volumes(count=count)
+ volumes = volume_fakes.create_volumes(count=count)
- self.volumes_mock.get = volume_fakes.FakeVolume.get_volumes(
- volumes,
- 0)
+ self.volumes_mock.get = volume_fakes.get_volumes(volumes, 0)
return volumes
@@ -79,8 +77,8 @@ class TestVolumeCreate(TestVolume):
)
def setUp(self):
- super(TestVolumeCreate, self).setUp()
- self.new_volume = volume_fakes.FakeVolume.create_one_volume()
+ super().setUp()
+ self.new_volume = volume_fakes.create_one_volume()
self.datalist = (
self.new_volume.attachments,
self.new_volume.availability_zone,
@@ -635,7 +633,7 @@ class TestVolumeCreate(TestVolume):
class TestVolumeDelete(TestVolume):
def setUp(self):
- super(TestVolumeDelete, self).setUp()
+ super().setUp()
self.volumes_mock.delete.return_value = None
@@ -725,7 +723,7 @@ class TestVolumeDelete(TestVolume):
class TestVolumeList(TestVolume):
- _volume = volume_fakes.FakeVolume.create_one_volume()
+ _volume = volume_fakes.create_one_volume()
columns = (
'ID',
'Name',
@@ -744,7 +742,7 @@ class TestVolumeList(TestVolume):
)
def setUp(self):
- super(TestVolumeList, self).setUp()
+ super().setUp()
self.volumes_mock.list.return_value = [self._volume]
@@ -921,10 +919,10 @@ class TestVolumeList(TestVolume):
class TestVolumeMigrate(TestVolume):
- _volume = volume_fakes.FakeVolume.create_one_volume()
+ _volume = volume_fakes.create_one_volume()
def setUp(self):
- super(TestVolumeMigrate, self).setUp()
+ super().setUp()
self.volumes_mock.get.return_value = self._volume
self.volumes_mock.migrate_volume.return_value = None
@@ -983,10 +981,10 @@ class TestVolumeMigrate(TestVolume):
class TestVolumeSet(TestVolume):
- _volume = volume_fakes.FakeVolume.create_one_volume()
+ _volume = volume_fakes.create_one_volume()
def setUp(self):
- super(TestVolumeSet, self).setUp()
+ super().setUp()
self.volumes_mock.get.return_value = self._volume
@@ -1243,8 +1241,8 @@ class TestVolumeShow(TestVolume):
)
def setUp(self):
- super(TestVolumeShow, self).setUp()
- self._volume = volume_fakes.FakeVolume.create_one_volume()
+ super().setUp()
+ self._volume = volume_fakes.create_one_volume()
self.datalist = (
self._volume.attachments,
self._volume.availability_zone,
@@ -1300,10 +1298,10 @@ class TestVolumeShow(TestVolume):
class TestVolumeUnset(TestVolume):
- _volume = volume_fakes.FakeVolume.create_one_volume()
+ _volume = volume_fakes.create_one_volume()
def setUp(self):
- super(TestVolumeUnset, self).setUp()
+ super().setUp()
self.volumes_mock.get.return_value = self._volume
@@ -1346,7 +1344,7 @@ class TestVolumeUnset(TestVolume):
class TestColumns(TestVolume):
def test_attachments_column_without_server_cache(self):
- _volume = volume_fakes.FakeVolume.create_one_volume()
+ _volume = volume_fakes.create_one_volume()
server_id = _volume.attachments[0]['server_id']
device = _volume.attachments[0]['device']
@@ -1356,7 +1354,7 @@ class TestColumns(TestVolume):
self.assertEqual(_volume.attachments, col.machine_readable())
def test_attachments_column_with_server_cache(self):
- _volume = volume_fakes.FakeVolume.create_one_volume()
+ _volume = volume_fakes.create_one_volume()
server_id = _volume.attachments[0]['server_id']
device = _volume.attachments[0]['device']
diff --git a/openstackclient/tests/unit/volume/v1/test_volume_backup.py b/openstackclient/tests/unit/volume/v1/test_volume_backup.py
index f25a5ffa..b705b4b9 100644
--- a/openstackclient/tests/unit/volume/v1/test_volume_backup.py
+++ b/openstackclient/tests/unit/volume/v1/test_volume_backup.py
@@ -25,7 +25,7 @@ from openstackclient.volume.v1 import volume_backup
class TestBackup(volume_fakes.TestVolumev1):
def setUp(self):
- super(TestBackup, self).setUp()
+ super().setUp()
self.backups_mock = self.app.client_manager.volume.backups
self.backups_mock.reset_mock()
@@ -39,7 +39,7 @@ class TestBackup(volume_fakes.TestVolumev1):
class TestBackupCreate(TestBackup):
- volume = volume_fakes.FakeVolume.create_one_volume()
+ volume = volume_fakes.create_one_volume()
columns = (
'availability_zone',
@@ -55,9 +55,10 @@ class TestBackupCreate(TestBackup):
)
def setUp(self):
- super(TestBackupCreate, self).setUp()
- self.new_backup = volume_fakes.FakeBackup.create_one_backup(
- attrs={'volume_id': self.volume.id})
+ super().setUp()
+ self.new_backup = volume_fakes.create_one_backup(
+ attrs={'volume_id': self.volume.id},
+ )
self.data = (
self.new_backup.availability_zone,
self.new_backup.container,
@@ -129,13 +130,12 @@ class TestBackupCreate(TestBackup):
class TestBackupDelete(TestBackup):
- backups = volume_fakes.FakeBackup.create_backups(count=2)
+ backups = volume_fakes.create_backups(count=2)
def setUp(self):
- super(TestBackupDelete, self).setUp()
+ super().setUp()
- self.backups_mock.get = (
- volume_fakes.FakeBackup.get_backups(self.backups))
+ self.backups_mock.get = volume_fakes.get_backups(self.backups)
self.backups_mock.delete.return_value = None
# Get the command object to mock
@@ -205,9 +205,11 @@ class TestBackupDelete(TestBackup):
class TestBackupList(TestBackup):
- volume = volume_fakes.FakeVolume.create_one_volume()
- backups = volume_fakes.FakeBackup.create_backups(
- attrs={'volume_id': volume.display_name}, count=3)
+ volume = volume_fakes.create_one_volume()
+ backups = volume_fakes.create_backups(
+ attrs={'volume_id': volume.display_name},
+ count=3,
+ )
columns = [
'ID',
@@ -245,7 +247,7 @@ class TestBackupList(TestBackup):
))
def setUp(self):
- super(TestBackupList, self).setUp()
+ super().setUp()
self.volumes_mock.list.return_value = [self.volume]
self.backups_mock.list.return_value = self.backups
@@ -314,34 +316,75 @@ class TestBackupList(TestBackup):
class TestBackupRestore(TestBackup):
- volume = volume_fakes.FakeVolume.create_one_volume()
- backup = volume_fakes.FakeBackup.create_one_backup(
- attrs={'volume_id': volume.id})
+ volume = volume_fakes.create_one_volume()
+ backup = volume_fakes.create_one_backup(
+ attrs={'volume_id': volume.id},
+ )
def setUp(self):
- super(TestBackupRestore, self).setUp()
+ super().setUp()
self.backups_mock.get.return_value = self.backup
self.volumes_mock.get.return_value = self.volume
- self.restores_mock.restore.return_value = None
+ self.restores_mock.restore.return_value = (
+ volume_fakes.create_one_volume(
+ {'id': self.volume['id']},
+ )
+ )
# Get the command object to mock
self.cmd = volume_backup.RestoreVolumeBackup(self.app, None)
def test_backup_restore(self):
arglist = [
self.backup.id,
- self.backup.volume_id
]
verifylist = [
("backup", self.backup.id),
- ("volume", self.backup.volume_id)
+ ("volume", None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.restores_mock.restore.assert_called_with(self.backup.id,
- self.backup.volume_id)
- self.assertIsNone(result)
+ None)
+ self.assertIsNotNone(result)
+
+ def test_backup_restore_with_existing_volume(self):
+ arglist = [
+ self.backup.id,
+ self.backup.volume_id,
+ ]
+ verifylist = [
+ ("backup", self.backup.id),
+ ("volume", self.backup.volume_id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.restores_mock.restore.assert_called_with(
+ self.backup.id, self.backup.volume_id,
+ )
+ self.assertIsNotNone(result)
+
+ def test_backup_restore_with_invalid_volume(self):
+ arglist = [
+ self.backup.id,
+ "unexist_volume",
+ ]
+ verifylist = [
+ ("backup", self.backup.id),
+ ("volume", "unexist_volume"),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ with mock.patch.object(
+ utils, 'find_resource',
+ side_effect=exceptions.CommandError(),
+ ):
+ self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args,
+ )
class TestBackupShow(TestBackup):
@@ -360,8 +403,8 @@ class TestBackupShow(TestBackup):
)
def setUp(self):
- super(TestBackupShow, self).setUp()
- self.backup = volume_fakes.FakeBackup.create_one_backup()
+ super().setUp()
+ self.backup = volume_fakes.create_one_backup()
self.data = (
self.backup.availability_zone,
self.backup.container,
diff --git a/openstackclient/tests/unit/volume/v2/fakes.py b/openstackclient/tests/unit/volume/v2/fakes.py
index 96e381d3..a3ef142f 100644
--- a/openstackclient/tests/unit/volume/v2/fakes.py
+++ b/openstackclient/tests/unit/volume/v2/fakes.py
@@ -40,257 +40,7 @@ QUOTA = {
}
-class FakeTransfer(object):
- """Fake one or more Transfer."""
-
- @staticmethod
- def create_one_transfer(attrs=None):
- """Create a fake transfer.
-
- :param dict attrs:
- A dictionary with all attributes of Transfer Request
- :return:
- A FakeResource object with volume_id, name, id.
- """
- # Set default attribute
- transfer_info = {
- 'volume_id': 'volume-id-' + uuid.uuid4().hex,
- 'name': 'fake_transfer_name',
- 'id': 'id-' + uuid.uuid4().hex,
- 'links': 'links-' + uuid.uuid4().hex,
- }
-
- # Overwrite default attributes if there are some attributes set
- attrs = attrs or {}
-
- transfer_info.update(attrs)
-
- transfer = fakes.FakeResource(
- None,
- transfer_info,
- loaded=True)
-
- return transfer
-
- @staticmethod
- def create_transfers(attrs=None, count=2):
- """Create multiple fake transfers.
-
- :param dict attrs:
- A dictionary with all attributes of transfer
- :param Integer count:
- The number of transfers to be faked
- :return:
- A list of FakeResource objects
- """
- transfers = []
- for n in range(0, count):
- transfers.append(FakeTransfer.create_one_transfer(attrs))
-
- return transfers
-
- @staticmethod
- def get_transfers(transfers=None, count=2):
- """Get an iterable MagicMock object with a list of faked transfers.
-
- If transfers list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List transfers:
- A list of FakeResource objects faking transfers
- :param Integer count:
- The number of transfers to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- transfers
- """
- if transfers is None:
- transfers = FakeTransfer.create_transfers(count)
-
- return mock.Mock(side_effect=transfers)
-
-
-class FakeTypeAccess(object):
- """Fake one or more volume type access."""
-
- @staticmethod
- def create_one_type_access(attrs=None):
- """Create a fake volume type access for project.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object, with Volume_type_ID and Project_ID.
- """
- if attrs is None:
- attrs = {}
-
- # Set default attributes.
- type_access_attrs = {
- 'volume_type_id': 'volume-type-id-' + uuid.uuid4().hex,
- 'project_id': 'project-id-' + uuid.uuid4().hex,
- }
-
- # Overwrite default attributes.
- type_access_attrs.update(attrs)
-
- type_access = fakes.FakeResource(
- None,
- type_access_attrs,
- loaded=True)
-
- return type_access
-
-
-class FakeService(object):
- """Fake one or more Services."""
-
- @staticmethod
- def create_one_service(attrs=None):
- """Create a fake service.
-
- :param dict attrs:
- A dictionary with all attributes of service
- :return:
- A FakeResource object with host, status, etc.
- """
- # Set default attribute
- service_info = {
- 'host': 'host_test',
- 'binary': 'cinder_test',
- 'status': 'enabled',
- 'disabled_reason': 'LongHoliday-GoldenWeek',
- 'zone': 'fake_zone',
- 'updated_at': 'fake_date',
- 'state': 'fake_state',
- }
-
- # Overwrite default attributes if there are some attributes set
- attrs = attrs or {}
-
- service_info.update(attrs)
-
- service = fakes.FakeResource(
- None,
- service_info,
- loaded=True)
-
- return service
-
- @staticmethod
- def create_services(attrs=None, count=2):
- """Create multiple fake services.
-
- :param dict attrs:
- A dictionary with all attributes of service
- :param Integer count:
- The number of services to be faked
- :return:
- A list of FakeResource objects
- """
- services = []
- for n in range(0, count):
- services.append(FakeService.create_one_service(attrs))
-
- return services
-
-
-class FakeCapability(object):
- """Fake capability."""
-
- @staticmethod
- def create_one_capability(attrs=None):
- """Create a fake volume backend capability.
-
- :param dict attrs:
- A dictionary with all attributes of the Capabilities.
- :return:
- A FakeResource object with capability name and attrs.
- """
- # Set default attribute
- capability_info = {
- "namespace": "OS::Storage::Capabilities::fake",
- "vendor_name": "OpenStack",
- "volume_backend_name": "lvmdriver-1",
- "pool_name": "pool",
- "driver_version": "2.0.0",
- "storage_protocol": "iSCSI",
- "display_name": "Capabilities of Cinder LVM driver",
- "description": "Blah, blah.",
- "visibility": "public",
- "replication_targets": [],
- "properties": {
- "compression": {
- "title": "Compression",
- "description": "Enables compression.",
- "type": "boolean"
- },
- "qos": {
- "title": "QoS",
- "description": "Enables QoS.",
- "type": "boolean"
- },
- "replication": {
- "title": "Replication",
- "description": "Enables replication.",
- "type": "boolean"
- },
- "thin_provisioning": {
- "title": "Thin Provisioning",
- "description": "Sets thin provisioning.",
- "type": "boolean"
- }
- }
- }
-
- # Overwrite default attributes if there are some attributes set
- capability_info.update(attrs or {})
-
- capability = fakes.FakeResource(
- None,
- capability_info,
- loaded=True)
-
- return capability
-
-
-class FakePool(object):
- """Fake Pools."""
-
- @staticmethod
- def create_one_pool(attrs=None):
- """Create a fake pool.
-
- :param dict attrs:
- A dictionary with all attributes of the pool
- :return:
- A FakeResource object with pool name and attrs.
- """
- # Set default attribute
- pool_info = {
- 'name': 'host@lvmdriver-1#lvmdriver-1',
- 'storage_protocol': 'iSCSI',
- 'thick_provisioning_support': False,
- 'thin_provisioning_support': True,
- 'total_volumes': 99,
- 'total_capacity_gb': 1000.00,
- 'allocated_capacity_gb': 100,
- 'max_over_subscription_ratio': 200.0,
- }
-
- # Overwrite default attributes if there are some attributes set
- pool_info.update(attrs or {})
-
- pool = fakes.FakeResource(
- None,
- pool_info,
- loaded=True)
-
- return pool
-
-
-class FakeVolumeClient(object):
-
+class FakeVolumeClient:
def __init__(self, **kwargs):
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
@@ -325,8 +75,9 @@ class FakeVolumeClient(object):
self.transfers = mock.Mock()
self.transfers.resource_class = fakes.FakeResource(None, {})
self.volume_encryption_types = mock.Mock()
- self.volume_encryption_types.resource_class = (
- fakes.FakeResource(None, {}))
+ self.volume_encryption_types.resource_class = fakes.FakeResource(
+ None, {}
+ )
self.volume_snapshots = mock.Mock()
self.volume_snapshots.resource_class = fakes.FakeResource(None, {})
self.volume_type_access = mock.Mock()
@@ -338,860 +89,1047 @@ class FakeVolumeClient(object):
class TestVolume(utils.TestCommand):
-
def setUp(self):
- super(TestVolume, self).setUp()
+ super().setUp()
self.app.client_manager.volume = FakeVolumeClient(
- endpoint=fakes.AUTH_URL,
- token=fakes.AUTH_TOKEN
+ endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN
)
self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client(
- endpoint=fakes.AUTH_URL,
- token=fakes.AUTH_TOKEN
+ endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN
)
self.app.client_manager.image = image_fakes.FakeImagev2Client(
- endpoint=fakes.AUTH_URL,
- token=fakes.AUTH_TOKEN
+ endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN
)
-class FakeVolume(object):
- """Fake one or more volumes."""
-
- @staticmethod
- def create_one_volume(attrs=None):
- """Create a fake volume.
-
- :param dict attrs:
- A dictionary with all attributes of volume
- :return:
- A FakeResource object with id, name, status, etc.
- """
- attrs = attrs or {}
-
- # Set default attribute
- volume_info = {
- 'id': 'volume-id' + uuid.uuid4().hex,
- 'name': 'volume-name' + uuid.uuid4().hex,
- 'description': 'description' + uuid.uuid4().hex,
- 'status': random.choice(['available', 'in_use']),
- 'size': random.randint(1, 20),
- 'volume_type':
- random.choice(['fake_lvmdriver-1', 'fake_lvmdriver-2']),
- 'bootable':
- random.randint(0, 1),
- 'metadata': {
- 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
- 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
- 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex},
- 'snapshot_id': random.randint(1, 5),
- 'availability_zone': 'zone' + uuid.uuid4().hex,
- 'attachments': [{
- 'device': '/dev/' + uuid.uuid4().hex,
- 'server_id': uuid.uuid4().hex,
- }, ],
- }
+def create_one_transfer(attrs=None):
+ """Create a fake transfer.
- # Overwrite default attributes if there are some attributes set
- volume_info.update(attrs)
-
- volume = fakes.FakeResource(
- None,
- volume_info,
- loaded=True)
- return volume
-
- @staticmethod
- def create_volumes(attrs=None, count=2):
- """Create multiple fake volumes.
-
- :param dict attrs:
- A dictionary with all attributes of volume
- :param Integer count:
- The number of volumes to be faked
- :return:
- A list of FakeResource objects
- """
- volumes = []
- for n in range(0, count):
- volumes.append(FakeVolume.create_one_volume(attrs))
-
- return volumes
-
- @staticmethod
- def create_one_sdk_volume(attrs=None):
- """Create a fake volume.
-
- :param dict attrs:
- A dictionary with all attributes of volume
- :return:
- A FakeResource object with id, name, status, etc.
- """
- attrs = attrs or {}
-
- # Set default attribute
- volume_info = {
- 'id': 'volume-id' + uuid.uuid4().hex,
- 'name': 'volume-name' + uuid.uuid4().hex,
- 'description': 'description' + uuid.uuid4().hex,
- 'status': random.choice(['available', 'in_use']),
- 'size': random.randint(1, 20),
- 'volume_type':
- random.choice(['fake_lvmdriver-1', 'fake_lvmdriver-2']),
- 'bootable':
- random.choice(['true', 'false']),
- 'metadata': {
- 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
- 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
- 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex},
- 'snapshot_id': random.randint(1, 5),
- 'availability_zone': 'zone' + uuid.uuid4().hex,
- 'attachments': [{
- 'device': '/dev/' + uuid.uuid4().hex,
- 'server_id': uuid.uuid4().hex,
- }, ],
- }
+ :param dict attrs:
+ A dictionary with all attributes of Transfer Request
+ :return:
+ A FakeResource object with volume_id, name, id.
+ """
+ # Set default attribute
+ transfer_info = {
+ 'volume_id': 'volume-id-' + uuid.uuid4().hex,
+ 'name': 'fake_transfer_name',
+ 'id': 'id-' + uuid.uuid4().hex,
+ 'links': 'links-' + uuid.uuid4().hex,
+ }
- # Overwrite default attributes if there are some attributes set
- volume_info.update(attrs)
- return volume.Volume(**volume_info)
-
- @staticmethod
- def create_sdk_volumes(attrs=None, count=2):
- """Create multiple fake volumes.
-
- :param dict attrs:
- A dictionary with all attributes of volume
- :param Integer count:
- The number of volumes to be faked
- :return:
- A list of FakeResource objects
- """
- volumes = []
- for n in range(0, count):
- volumes.append(FakeVolume.create_one_sdk_volume(attrs))
-
- return volumes
-
- @staticmethod
- def get_volumes(volumes=None, count=2):
- """Get an iterable MagicMock object with a list of faked volumes.
-
- If volumes list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List volumes:
- A list of FakeResource objects faking volumes
- :param Integer count:
- The number of volumes to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- volumes
- """
- if volumes is None:
- volumes = FakeVolume.create_volumes(count)
-
- return mock.Mock(side_effect=volumes)
-
- @staticmethod
- def get_volume_columns(volume=None):
- """Get the volume columns from a faked volume object.
-
- :param volume:
- A FakeResource objects faking volume
- :return
- A tuple which may include the following keys:
- ('id', 'name', 'description', 'status', 'size', 'volume_type',
- 'metadata', 'snapshot', 'availability_zone', 'attachments')
- """
- if volume is not None:
- return tuple(k for k in sorted(volume.keys()))
- return tuple([])
-
- @staticmethod
- def get_volume_data(volume=None):
- """Get the volume data from a faked volume object.
-
- :param volume:
- A FakeResource objects faking volume
- :return
- A tuple which may include the following values:
- ('ce26708d', 'fake_volume', 'fake description', 'available',
- 20, 'fake_lvmdriver-1', "Alpha='a', Beta='b', Gamma='g'",
- 1, 'nova', [{'device': '/dev/ice', 'server_id': '1233'}])
- """
- data_list = []
- if volume is not None:
- for x in sorted(volume.keys()):
- if x == 'tags':
- # The 'tags' should be format_list
- data_list.append(
- format_columns.ListColumn(volume.info.get(x)))
- else:
- data_list.append(volume.info.get(x))
- return tuple(data_list)
-
-
-class FakeAvailabilityZone(object):
- """Fake one or more volume availability zones (AZs)."""
-
- @staticmethod
- def create_one_availability_zone(attrs=None):
- """Create a fake AZ.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with zoneName, zoneState, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- availability_zone = {
- 'zoneName': uuid.uuid4().hex,
- 'zoneState': {'available': True},
- }
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
- # Overwrite default attributes.
- availability_zone.update(attrs)
-
- availability_zone = fakes.FakeResource(
- info=copy.deepcopy(availability_zone),
- loaded=True)
- return availability_zone
-
- @staticmethod
- def create_availability_zones(attrs=None, count=2):
- """Create multiple fake AZs.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of AZs to fake
- :return:
- A list of FakeResource objects faking the AZs
- """
- availability_zones = []
- for i in range(0, count):
- availability_zone = \
- FakeAvailabilityZone.create_one_availability_zone(attrs)
- availability_zones.append(availability_zone)
-
- return availability_zones
-
-
-class FakeBackup(object):
- """Fake one or more backup."""
-
- @staticmethod
- def create_one_backup(attrs=None):
- """Create a fake backup.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, volume_id, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- backup_info = {
- "id": 'backup-id-' + uuid.uuid4().hex,
- "name": 'backup-name-' + uuid.uuid4().hex,
- "volume_id": 'volume-id-' + uuid.uuid4().hex,
- "snapshot_id": 'snapshot-id' + uuid.uuid4().hex,
- "description": 'description-' + uuid.uuid4().hex,
- "object_count": None,
- "container": 'container-' + uuid.uuid4().hex,
- "size": random.randint(1, 20),
- "status": "error",
- "availability_zone": 'zone' + uuid.uuid4().hex,
- }
+ transfer_info.update(attrs)
- # Overwrite default attributes.
- backup_info.update(attrs)
-
- backup = fakes.FakeResource(
- info=copy.deepcopy(backup_info),
- loaded=True)
- return backup
-
- @staticmethod
- def create_backups(attrs=None, count=2):
- """Create multiple fake backups.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of backups to fake
- :return:
- A list of FakeResource objects faking the backups
- """
- backups = []
- for i in range(0, count):
- backup = FakeBackup.create_one_backup(attrs)
- backups.append(backup)
-
- return backups
-
- @staticmethod
- def get_backups(backups=None, count=2):
- """Get an iterable MagicMock object with a list of faked backups.
-
- If backups list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List backups:
- A list of FakeResource objects faking backups
- :param Integer count:
- The number of backups to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- backups
- """
- if backups is None:
- backups = FakeBackup.create_backups(count)
-
- return mock.Mock(side_effect=backups)
-
- @staticmethod
- def create_backup_record():
- """Gets a fake backup record for a given backup.
-
- :return: An "exported" backup record.
- """
-
- return {
- 'backup_service': 'cinder.backup.drivers.swift.SwiftBackupDriver',
- 'backup_url': 'eyJzdGF0dXMiOiAiYXZh',
- }
-
- @staticmethod
- def import_backup_record():
- """Creates a fake backup record import response from a backup.
-
- :return: The fake backup object that was encoded.
- """
- return {
- 'backup': {
- 'id': 'backup.id',
- 'name': 'backup.name',
- 'links': [
- {'href': 'link1', 'rel': 'self'},
- {'href': 'link2', 'rel': 'bookmark'},
- ],
- },
- }
+ transfer = fakes.FakeResource(None, transfer_info, loaded=True)
+ return transfer
-class FakeConsistencyGroup(object):
- """Fake one or more consistency group."""
-
- @staticmethod
- def create_one_consistency_group(attrs=None):
- """Create a fake consistency group.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, description, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- consistency_group_info = {
- "id": 'backup-id-' + uuid.uuid4().hex,
- "name": 'backup-name-' + uuid.uuid4().hex,
- "description": 'description-' + uuid.uuid4().hex,
- "status": "error",
- "availability_zone": 'zone' + uuid.uuid4().hex,
- "created_at": 'time-' + uuid.uuid4().hex,
- "volume_types": ['volume-type1'],
- }
-
- # Overwrite default attributes.
- consistency_group_info.update(attrs)
-
- consistency_group = fakes.FakeResource(
- info=copy.deepcopy(consistency_group_info),
- loaded=True)
- return consistency_group
-
- @staticmethod
- def create_consistency_groups(attrs=None, count=2):
- """Create multiple fake consistency groups.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of consistency groups to fake
- :return:
- A list of FakeResource objects faking the consistency groups
- """
- consistency_groups = []
- for i in range(0, count):
- consistency_group = (
- FakeConsistencyGroup.create_one_consistency_group(attrs))
- consistency_groups.append(consistency_group)
-
- return consistency_groups
-
- @staticmethod
- def get_consistency_groups(consistency_groups=None, count=2):
- """Note:
-
- Get an iterable MagicMock object with a list of faked
- consistency_groups.
-
- If consistency_groups list is provided, then initialize
- the Mock object with the list. Otherwise create one.
-
- :param List consistency_groups:
- A list of FakeResource objects faking consistency_groups
- :param Integer count:
- The number of consistency_groups to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- consistency_groups
- """
- if consistency_groups is None:
- consistency_groups = (FakeConsistencyGroup.
- create_consistency_groups(count))
-
- return mock.Mock(side_effect=consistency_groups)
-
-
-class FakeConsistencyGroupSnapshot(object):
- """Fake one or more consistency group snapshot."""
-
- @staticmethod
- def create_one_consistency_group_snapshot(attrs=None):
- """Create a fake consistency group snapshot.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, description, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- consistency_group_snapshot_info = {
- "id": 'id-' + uuid.uuid4().hex,
- "name": 'backup-name-' + uuid.uuid4().hex,
- "description": 'description-' + uuid.uuid4().hex,
- "status": "error",
- "consistencygroup_id": 'consistency-group-id' + uuid.uuid4().hex,
- "created_at": 'time-' + uuid.uuid4().hex,
- }
- # Overwrite default attributes.
- consistency_group_snapshot_info.update(attrs)
-
- consistency_group_snapshot = fakes.FakeResource(
- info=copy.deepcopy(consistency_group_snapshot_info),
- loaded=True)
- return consistency_group_snapshot
-
- @staticmethod
- def create_consistency_group_snapshots(attrs=None, count=2):
- """Create multiple fake consistency group snapshots.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of consistency group snapshots to fake
- :return:
- A list of FakeResource objects faking the
- consistency group snapshots
- """
- consistency_group_snapshots = []
- for i in range(0, count):
- consistency_group_snapshot = (
- FakeConsistencyGroupSnapshot.
- create_one_consistency_group_snapshot(attrs)
- )
- consistency_group_snapshots.append(consistency_group_snapshot)
-
- return consistency_group_snapshots
-
- @staticmethod
- def get_consistency_group_snapshots(snapshots=None, count=2):
- """Get an iterable MagicMock object with a list of faked cgsnapshots.
-
- If consistenct group snapshots list is provided, then initialize
- the Mock object with the list. Otherwise create one.
-
- :param List snapshots:
- A list of FakeResource objects faking consistency group snapshots
- :param Integer count:
- The number of consistency group snapshots to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- consistency groups
- """
- if snapshots is None:
- snapshots = (FakeConsistencyGroupSnapshot.
- create_consistency_group_snapshots(count))
-
- return mock.Mock(side_effect=snapshots)
-
-
-class FakeExtension(object):
- """Fake one or more extension."""
-
- @staticmethod
- def create_one_extension(attrs=None):
- """Create a fake extension.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with name, namespace, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- extension_info = {
- 'name': 'name-' + uuid.uuid4().hex,
- 'namespace': ('http://docs.openstack.org/'
- 'block-service/ext/scheduler-hints/api/v2'),
- 'description': 'description-' + uuid.uuid4().hex,
- 'updated': '2013-04-18T00:00:00+00:00',
- 'alias': 'OS-SCH-HNT',
- 'links': ('[{"href":'
- '"https://github.com/openstack/block-api", "type":'
- ' "text/html", "rel": "describedby"}]'),
- }
+def create_transfers(attrs=None, count=2):
+ """Create multiple fake transfers.
- # Overwrite default attributes.
- extension_info.update(attrs)
+ :param dict attrs:
+ A dictionary with all attributes of transfer
+ :param Integer count:
+ The number of transfers to be faked
+ :return:
+ A list of FakeResource objects
+ """
+ transfers = []
+ for n in range(0, count):
+ transfers.append(create_one_transfer(attrs))
- extension = fakes.FakeResource(
- info=copy.deepcopy(extension_info),
- loaded=True)
- return extension
+ return transfers
-class FakeQos(object):
- """Fake one or more Qos specification."""
+def get_transfers(transfers=None, count=2):
+ """Get an iterable MagicMock object with a list of faked transfers.
- @staticmethod
- def create_one_qos(attrs=None):
- """Create a fake Qos specification.
+ If transfers list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, consumer, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- qos_info = {
- "id": 'qos-id-' + uuid.uuid4().hex,
- "name": 'qos-name-' + uuid.uuid4().hex,
- "consumer": 'front-end',
- "specs": {"foo": "bar", "iops": "9001"},
- }
-
- # Overwrite default attributes.
- qos_info.update(attrs)
-
- qos = fakes.FakeResource(
- info=copy.deepcopy(qos_info),
- loaded=True)
- return qos
-
- @staticmethod
- def create_one_qos_association(attrs=None):
- """Create a fake Qos specification association.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, association_type, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- qos_association_info = {
- "id": 'type-id-' + uuid.uuid4().hex,
- "name": 'type-name-' + uuid.uuid4().hex,
- "association_type": 'volume_type',
- }
-
- # Overwrite default attributes.
- qos_association_info.update(attrs)
-
- qos_association = fakes.FakeResource(
- info=copy.deepcopy(qos_association_info),
- loaded=True)
- return qos_association
-
- @staticmethod
- def create_qoses(attrs=None, count=2):
- """Create multiple fake Qos specifications.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of Qos specifications to fake
- :return:
- A list of FakeResource objects faking the Qos specifications
- """
- qoses = []
- for i in range(0, count):
- qos = FakeQos.create_one_qos(attrs)
- qoses.append(qos)
-
- return qoses
-
- @staticmethod
- def get_qoses(qoses=None, count=2):
- """Get an iterable MagicMock object with a list of faked qoses.
-
- If qoses list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List qoses:
- A list of FakeResource objects faking qoses
- :param Integer count:
- The number of qoses to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- qoses
- """
- if qoses is None:
- qoses = FakeQos.create_qoses(count)
-
- return mock.Mock(side_effect=qoses)
-
-
-class FakeSnapshot(object):
- """Fake one or more snapshot."""
-
- @staticmethod
- def create_one_snapshot(attrs=None):
- """Create a fake snapshot.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with id, name, description, etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- snapshot_info = {
- "id": 'snapshot-id-' + uuid.uuid4().hex,
- "name": 'snapshot-name-' + uuid.uuid4().hex,
- "description": 'snapshot-description-' + uuid.uuid4().hex,
- "size": 10,
- "status": "available",
- "metadata": {"foo": "bar"},
- "created_at": "2015-06-03T18:49:19.000000",
- "volume_id": 'vloume-id-' + uuid.uuid4().hex,
- }
-
- # Overwrite default attributes.
- snapshot_info.update(attrs)
-
- snapshot = fakes.FakeResource(
- info=copy.deepcopy(snapshot_info),
- loaded=True)
- return snapshot
-
- @staticmethod
- def create_snapshots(attrs=None, count=2):
- """Create multiple fake snapshots.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of snapshots to fake
- :return:
- A list of FakeResource objects faking the snapshots
- """
- snapshots = []
- for i in range(0, count):
- snapshot = FakeSnapshot.create_one_snapshot(attrs)
- snapshots.append(snapshot)
-
- return snapshots
-
- @staticmethod
- def get_snapshots(snapshots=None, count=2):
- """Get an iterable MagicMock object with a list of faked snapshots.
-
- If snapshots list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param List snapshots:
- A list of FakeResource objects faking snapshots
- :param Integer count:
- The number of snapshots to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- snapshots
- """
- if snapshots is None:
- snapshots = FakeSnapshot.create_snapshots(count)
-
- return mock.Mock(side_effect=snapshots)
-
-
-class FakeVolumeType(object):
- """Fake one or more volume type."""
-
- @staticmethod
- def create_one_volume_type(attrs=None, methods=None):
- """Create a fake volume type.
-
- :param dict attrs:
- A dictionary with all attributes
- :param dict methods:
- A dictionary with all methods
- :return:
- A FakeResource object with id, name, description, etc.
- """
- attrs = attrs or {}
- methods = methods or {}
-
- # Set default attributes.
- volume_type_info = {
- "id": 'type-id-' + uuid.uuid4().hex,
- "name": 'type-name-' + uuid.uuid4().hex,
- "description": 'type-description-' + uuid.uuid4().hex,
- "extra_specs": {"foo": "bar"},
- "is_public": True,
- }
-
- # Overwrite default attributes.
- volume_type_info.update(attrs)
-
- volume_type = fakes.FakeResource(
- info=copy.deepcopy(volume_type_info),
- methods=methods,
- loaded=True)
- return volume_type
-
- @staticmethod
- def create_volume_types(attrs=None, count=2):
- """Create multiple fake volume_types.
-
- :param dict attrs:
- A dictionary with all attributes
- :param int count:
- The number of types to fake
- :return:
- A list of FakeResource objects faking the types
- """
- volume_types = []
- for i in range(0, count):
- volume_type = FakeVolumeType.create_one_volume_type(attrs)
- volume_types.append(volume_type)
-
- return volume_types
-
- @staticmethod
- def get_volume_types(volume_types=None, count=2):
- """Get an iterable MagicMock object with a list of faked volume types.
-
- If volume_types list is provided, then initialize the Mock object with
- the list. Otherwise create one.
-
- :param List volume_types:
- A list of FakeResource objects faking volume types
- :param Integer count:
- The number of volume types to be faked
- :return
- An iterable Mock object with side_effect set to a list of faked
- volume types
- """
- if volume_types is None:
- volume_types = FakeVolumeType.create_volume_types(count)
-
- return mock.Mock(side_effect=volume_types)
-
- @staticmethod
- def create_one_encryption_volume_type(attrs=None):
- """Create a fake encryption volume type.
-
- :param dict attrs:
- A dictionary with all attributes
- :return:
- A FakeResource object with volume_type_id etc.
- """
- attrs = attrs or {}
-
- # Set default attributes.
- encryption_info = {
- "volume_type_id": 'type-id-' + uuid.uuid4().hex,
- 'provider': 'LuksEncryptor',
- 'cipher': None,
- 'key_size': None,
- 'control_location': 'front-end',
- }
-
- # Overwrite default attributes.
- encryption_info.update(attrs)
-
- encryption_type = fakes.FakeResource(
- info=copy.deepcopy(encryption_info),
- loaded=True)
- return encryption_type
-
-
-class FakeQuota(object):
- """Fake quota"""
-
- @staticmethod
- def create_one_vol_quota(attrs=None):
- """Create one quota"""
- attrs = attrs or {}
-
- quota_attrs = {
- 'id': 'project-id-' + uuid.uuid4().hex,
- 'backups': 100,
- 'backup_gigabytes': 100,
- 'gigabytes': 10,
- 'per_volume_gigabytes': 10,
- 'snapshots': 0,
- 'volumes': 10}
-
- quota_attrs.update(attrs)
-
- quota = fakes.FakeResource(
- info=copy.deepcopy(quota_attrs),
- loaded=True)
- quota.project_id = quota_attrs['id']
-
- return quota
-
- @staticmethod
- def create_one_default_vol_quota(attrs=None):
- """Create one quota"""
- attrs = attrs or {}
-
- quota_attrs = {
- 'id': 'project-id-' + uuid.uuid4().hex,
- 'backups': 100,
- 'backup_gigabytes': 100,
- 'gigabytes': 100,
- 'per_volume_gigabytes': 100,
- 'snapshots': 100,
- 'volumes': 100}
+ :param List transfers:
+ A list of FakeResource objects faking transfers
+ :param Integer count:
+ The number of transfers to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ transfers
+ """
+ if transfers is None:
+ transfers = create_transfers(count)
+
+ return mock.Mock(side_effect=transfers)
+
+
+def create_one_type_access(attrs=None):
+ """Create a fake volume type access for project.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object, with Volume_type_ID and Project_ID.
+ """
+ if attrs is None:
+ attrs = {}
+
+ # Set default attributes.
+ type_access_attrs = {
+ 'volume_type_id': 'volume-type-id-' + uuid.uuid4().hex,
+ 'project_id': 'project-id-' + uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes.
+ type_access_attrs.update(attrs)
+
+ type_access = fakes.FakeResource(None, type_access_attrs, loaded=True)
+
+ return type_access
+
+
+def create_one_service(attrs=None):
+ """Create a fake service.
+
+ :param dict attrs:
+ A dictionary with all attributes of service
+ :return:
+ A FakeResource object with host, status, etc.
+ """
+ # Set default attribute
+ service_info = {
+ 'host': 'host_test',
+ 'binary': 'cinder_test',
+ 'status': 'enabled',
+ 'disabled_reason': 'LongHoliday-GoldenWeek',
+ 'zone': 'fake_zone',
+ 'updated_at': 'fake_date',
+ 'state': 'fake_state',
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
+
+ service_info.update(attrs)
+
+ service = fakes.FakeResource(None, service_info, loaded=True)
+
+ return service
+
+
+def create_services(attrs=None, count=2):
+ """Create multiple fake services.
+
+ :param dict attrs:
+ A dictionary with all attributes of service
+ :param Integer count:
+ The number of services to be faked
+ :return:
+ A list of FakeResource objects
+ """
+ services = []
+ for n in range(0, count):
+ services.append(create_one_service(attrs))
+
+ return services
+
+
+def create_one_capability(attrs=None):
+ """Create a fake volume backend capability.
+
+ :param dict attrs:
+ A dictionary with all attributes of the Capabilities.
+ :return:
+ A FakeResource object with capability name and attrs.
+ """
+ # Set default attribute
+ capability_info = {
+ "namespace": "OS::Storage::Capabilities::fake",
+ "vendor_name": "OpenStack",
+ "volume_backend_name": "lvmdriver-1",
+ "pool_name": "pool",
+ "driver_version": "2.0.0",
+ "storage_protocol": "iSCSI",
+ "display_name": "Capabilities of Cinder LVM driver",
+ "description": "Blah, blah.",
+ "visibility": "public",
+ "replication_targets": [],
+ "properties": {
+ "compression": {
+ "title": "Compression",
+ "description": "Enables compression.",
+ "type": "boolean",
+ },
+ "qos": {
+ "title": "QoS",
+ "description": "Enables QoS.",
+ "type": "boolean",
+ },
+ "replication": {
+ "title": "Replication",
+ "description": "Enables replication.",
+ "type": "boolean",
+ },
+ "thin_provisioning": {
+ "title": "Thin Provisioning",
+ "description": "Sets thin provisioning.",
+ "type": "boolean",
+ },
+ },
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ capability_info.update(attrs or {})
+
+ capability = fakes.FakeResource(None, capability_info, loaded=True)
+
+ return capability
+
+
+def create_one_pool(attrs=None):
+ """Create a fake pool.
+
+ :param dict attrs:
+ A dictionary with all attributes of the pool
+ :return:
+ A FakeResource object with pool name and attrs.
+ """
+ # Set default attribute
+ pool_info = {
+ 'name': 'host@lvmdriver-1#lvmdriver-1',
+ 'storage_protocol': 'iSCSI',
+ 'thick_provisioning_support': False,
+ 'thin_provisioning_support': True,
+ 'total_volumes': 99,
+ 'total_capacity_gb': 1000.00,
+ 'allocated_capacity_gb': 100,
+ 'max_over_subscription_ratio': 200.0,
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ pool_info.update(attrs or {})
+
+ pool = fakes.FakeResource(None, pool_info, loaded=True)
+
+ return pool
+
+
+def create_one_volume(attrs=None):
+ """Create a fake volume.
+
+ :param dict attrs:
+ A dictionary with all attributes of volume
+ :return:
+ A FakeResource object with id, name, status, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attribute
+ volume_info = {
+ 'id': 'volume-id' + uuid.uuid4().hex,
+ 'name': 'volume-name' + uuid.uuid4().hex,
+ 'description': 'description' + uuid.uuid4().hex,
+ 'status': random.choice(['available', 'in_use']),
+ 'size': random.randint(1, 20),
+ 'volume_type': random.choice(['fake_lvmdriver-1', 'fake_lvmdriver-2']),
+ 'bootable': random.randint(0, 1),
+ 'metadata': {
+ 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
+ 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
+ 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
+ },
+ 'snapshot_id': random.randint(1, 5),
+ 'availability_zone': 'zone' + uuid.uuid4().hex,
+ 'attachments': [
+ {
+ 'device': '/dev/' + uuid.uuid4().hex,
+ 'server_id': uuid.uuid4().hex,
+ },
+ ],
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ volume_info.update(attrs)
+
+ volume = fakes.FakeResource(None, volume_info, loaded=True)
+ return volume
+
+
+def create_volumes(attrs=None, count=2):
+ """Create multiple fake volumes.
+
+ :param dict attrs:
+ A dictionary with all attributes of volume
+ :param Integer count:
+ The number of volumes to be faked
+ :return:
+ A list of FakeResource objects
+ """
+ volumes = []
+ for n in range(0, count):
+ volumes.append(create_one_volume(attrs))
+
+ return volumes
+
+
+def create_one_sdk_volume(attrs=None):
+ """Create a fake volume.
+
+ :param dict attrs:
+ A dictionary with all attributes of volume
+ :return:
+ A FakeResource object with id, name, status, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attribute
+ volume_info = {
+ 'id': 'volume-id' + uuid.uuid4().hex,
+ 'name': 'volume-name' + uuid.uuid4().hex,
+ 'description': 'description' + uuid.uuid4().hex,
+ 'status': random.choice(['available', 'in_use']),
+ 'size': random.randint(1, 20),
+ 'volume_type': random.choice(['fake_lvmdriver-1', 'fake_lvmdriver-2']),
+ 'bootable': random.choice(['true', 'false']),
+ 'metadata': {
+ 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
+ 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
+ 'key' + uuid.uuid4().hex: 'val' + uuid.uuid4().hex,
+ },
+ 'snapshot_id': random.randint(1, 5),
+ 'availability_zone': 'zone' + uuid.uuid4().hex,
+ 'attachments': [
+ {
+ 'device': '/dev/' + uuid.uuid4().hex,
+ 'server_id': uuid.uuid4().hex,
+ },
+ ],
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ volume_info.update(attrs)
+ return volume.Volume(**volume_info)
+
+
+def create_sdk_volumes(attrs=None, count=2):
+ """Create multiple fake volumes.
+
+ :param dict attrs:
+ A dictionary with all attributes of volume
+ :param Integer count:
+ The number of volumes to be faked
+ :return:
+ A list of FakeResource objects
+ """
+ volumes = []
+ for n in range(0, count):
+ volumes.append(create_one_sdk_volume(attrs))
+
+ return volumes
+
+
+def get_volumes(volumes=None, count=2):
+ """Get an iterable MagicMock object with a list of faked volumes.
+
+ If volumes list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List volumes:
+ A list of FakeResource objects faking volumes
+ :param Integer count:
+ The number of volumes to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ volumes
+ """
+ if volumes is None:
+ volumes = create_volumes(count)
+
+ return mock.Mock(side_effect=volumes)
+
+
+def get_volume_columns(volume=None):
+ """Get the volume columns from a faked volume object.
+
+ :param volume:
+ A FakeResource objects faking volume
+ :return
+ A tuple which may include the following keys:
+ ('id', 'name', 'description', 'status', 'size', 'volume_type',
+ 'metadata', 'snapshot', 'availability_zone', 'attachments')
+ """
+ if volume is not None:
+ return tuple(k for k in sorted(volume.keys()))
+ return tuple([])
+
+
+def get_volume_data(volume=None):
+ """Get the volume data from a faked volume object.
+
+ :param volume:
+ A FakeResource objects faking volume
+ :return
+ A tuple which may include the following values:
+ ('ce26708d', 'fake_volume', 'fake description', 'available',
+ 20, 'fake_lvmdriver-1', "Alpha='a', Beta='b', Gamma='g'",
+ 1, 'nova', [{'device': '/dev/ice', 'server_id': '1233'}])
+ """
+ data_list = []
+ if volume is not None:
+ for x in sorted(volume.keys()):
+ if x == 'tags':
+ # The 'tags' should be format_list
+ data_list.append(format_columns.ListColumn(volume.info.get(x)))
+ else:
+ data_list.append(volume.info.get(x))
+ return tuple(data_list)
+
+
+def create_one_availability_zone(attrs=None):
+ """Create a fake AZ.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with zoneName, zoneState, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ availability_zone = {
+ 'zoneName': uuid.uuid4().hex,
+ 'zoneState': {'available': True},
+ }
+
+ # Overwrite default attributes.
+ availability_zone.update(attrs)
+
+ availability_zone = fakes.FakeResource(
+ info=copy.deepcopy(availability_zone), loaded=True
+ )
+ return availability_zone
+
+
+def create_availability_zones(attrs=None, count=2):
+ """Create multiple fake AZs.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of AZs to fake
+ :return:
+ A list of FakeResource objects faking the AZs
+ """
+ availability_zones = []
+ for i in range(0, count):
+ availability_zone = create_one_availability_zone(attrs)
+ availability_zones.append(availability_zone)
+
+ return availability_zones
+
+
+def create_one_backup(attrs=None):
+ """Create a fake backup.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, volume_id, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ backup_info = {
+ "id": 'backup-id-' + uuid.uuid4().hex,
+ "name": 'backup-name-' + uuid.uuid4().hex,
+ "volume_id": 'volume-id-' + uuid.uuid4().hex,
+ "snapshot_id": 'snapshot-id' + uuid.uuid4().hex,
+ "description": 'description-' + uuid.uuid4().hex,
+ "object_count": None,
+ "container": 'container-' + uuid.uuid4().hex,
+ "size": random.randint(1, 20),
+ "status": "error",
+ "availability_zone": 'zone' + uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes.
+ backup_info.update(attrs)
+
+ backup = fakes.FakeResource(info=copy.deepcopy(backup_info), loaded=True)
+ return backup
+
+
+def create_backups(attrs=None, count=2):
+ """Create multiple fake backups.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of backups to fake
+ :return:
+ A list of FakeResource objects faking the backups
+ """
+ backups = []
+ for i in range(0, count):
+ backup = create_one_backup(attrs)
+ backups.append(backup)
+
+ return backups
+
+
+def get_backups(backups=None, count=2):
+ """Get an iterable MagicMock object with a list of faked backups.
+
+ If backups list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List backups:
+ A list of FakeResource objects faking backups
+ :param Integer count:
+ The number of backups to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ backups
+ """
+ if backups is None:
+ backups = create_backups(count)
+
+ return mock.Mock(side_effect=backups)
+
+
+def create_backup_record():
+ """Gets a fake backup record for a given backup.
+
+ :return: An "exported" backup record.
+ """
+
+ return {
+ 'backup_service': 'cinder.backup.drivers.swift.SwiftBackupDriver',
+ 'backup_url': 'eyJzdGF0dXMiOiAiYXZh',
+ }
+
+
+def import_backup_record():
+ """Creates a fake backup record import response from a backup.
+
+ :return: The fake backup object that was encoded.
+ """
+ return {
+ 'backup': {
+ 'id': 'backup.id',
+ 'name': 'backup.name',
+ 'links': [
+ {'href': 'link1', 'rel': 'self'},
+ {'href': 'link2', 'rel': 'bookmark'},
+ ],
+ },
+ }
+
+
+def create_one_consistency_group(attrs=None):
+ """Create a fake consistency group.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, description, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ consistency_group_info = {
+ "id": 'backup-id-' + uuid.uuid4().hex,
+ "name": 'backup-name-' + uuid.uuid4().hex,
+ "description": 'description-' + uuid.uuid4().hex,
+ "status": "error",
+ "availability_zone": 'zone' + uuid.uuid4().hex,
+ "created_at": 'time-' + uuid.uuid4().hex,
+ "volume_types": ['volume-type1'],
+ }
+
+ # Overwrite default attributes.
+ consistency_group_info.update(attrs)
+
+ consistency_group = fakes.FakeResource(
+ info=copy.deepcopy(consistency_group_info), loaded=True
+ )
+ return consistency_group
+
+
+def create_consistency_groups(attrs=None, count=2):
+ """Create multiple fake consistency groups.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of consistency groups to fake
+ :return:
+ A list of FakeResource objects faking the consistency groups
+ """
+ consistency_groups = []
+ for i in range(0, count):
+ consistency_group = create_one_consistency_group(attrs)
+ consistency_groups.append(consistency_group)
+
+ return consistency_groups
+
+
+def get_consistency_groups(consistency_groups=None, count=2):
+ """Note:
+
+ Get an iterable MagicMock object with a list of faked
+ consistency_groups.
+
+ If consistency_groups list is provided, then initialize
+ the Mock object with the list. Otherwise create one.
+
+ :param List consistency_groups:
+ A list of FakeResource objects faking consistency_groups
+ :param Integer count:
+ The number of consistency_groups to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ consistency_groups
+ """
+ if consistency_groups is None:
+ consistency_groups = create_consistency_groups(count)
+
+ return mock.Mock(side_effect=consistency_groups)
+
+
+def create_one_consistency_group_snapshot(attrs=None):
+ """Create a fake consistency group snapshot.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, description, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ consistency_group_snapshot_info = {
+ "id": 'id-' + uuid.uuid4().hex,
+ "name": 'backup-name-' + uuid.uuid4().hex,
+ "description": 'description-' + uuid.uuid4().hex,
+ "status": "error",
+ "consistencygroup_id": 'consistency-group-id' + uuid.uuid4().hex,
+ "created_at": 'time-' + uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes.
+ consistency_group_snapshot_info.update(attrs)
+
+ consistency_group_snapshot = fakes.FakeResource(
+ info=copy.deepcopy(consistency_group_snapshot_info), loaded=True
+ )
+ return consistency_group_snapshot
+
+
+def create_consistency_group_snapshots(attrs=None, count=2):
+ """Create multiple fake consistency group snapshots.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of consistency group snapshots to fake
+ :return:
+ A list of FakeResource objects faking the
+ consistency group snapshots
+ """
+ consistency_group_snapshots = []
+ for i in range(0, count):
+ consistency_group_snapshot = create_one_consistency_group_snapshot(
+ attrs,
+ )
+ consistency_group_snapshots.append(consistency_group_snapshot)
+
+ return consistency_group_snapshots
+
+
+def get_consistency_group_snapshots(snapshots=None, count=2):
+ """Get an iterable MagicMock object with a list of faked cgsnapshots.
+
+ If consistenct group snapshots list is provided, then initialize
+ the Mock object with the list. Otherwise create one.
+
+ :param List snapshots:
+ A list of FakeResource objects faking consistency group snapshots
+ :param Integer count:
+ The number of consistency group snapshots to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ consistency groups
+ """
+ if snapshots is None:
+ snapshots = create_consistency_group_snapshots(count)
+
+ return mock.Mock(side_effect=snapshots)
+
+
+def create_one_extension(attrs=None):
+ """Create a fake extension.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with name, namespace, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ extension_info = {
+ 'name': 'name-' + uuid.uuid4().hex,
+ 'namespace': (
+ 'http://docs.openstack.org/'
+ 'block-service/ext/scheduler-hints/api/v2'
+ ),
+ 'description': 'description-' + uuid.uuid4().hex,
+ 'updated': '2013-04-18T00:00:00+00:00',
+ 'alias': 'OS-SCH-HNT',
+ 'links': (
+ '[{"href":'
+ '"https://github.com/openstack/block-api", "type":'
+ ' "text/html", "rel": "describedby"}]'
+ ),
+ }
+
+ # Overwrite default attributes.
+ extension_info.update(attrs)
+
+ extension = fakes.FakeResource(
+ info=copy.deepcopy(extension_info), loaded=True
+ )
+ return extension
+
+
+def create_one_qos(attrs=None):
+ """Create a fake Qos specification.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, consumer, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ qos_info = {
+ "id": 'qos-id-' + uuid.uuid4().hex,
+ "name": 'qos-name-' + uuid.uuid4().hex,
+ "consumer": 'front-end',
+ "specs": {"foo": "bar", "iops": "9001"},
+ }
+
+ # Overwrite default attributes.
+ qos_info.update(attrs)
+
+ qos = fakes.FakeResource(info=copy.deepcopy(qos_info), loaded=True)
+ return qos
+
+
+def create_one_qos_association(attrs=None):
+ """Create a fake Qos specification association.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, association_type, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ qos_association_info = {
+ "id": 'type-id-' + uuid.uuid4().hex,
+ "name": 'type-name-' + uuid.uuid4().hex,
+ "association_type": 'volume_type',
+ }
+
+ # Overwrite default attributes.
+ qos_association_info.update(attrs)
+
+ qos_association = fakes.FakeResource(
+ info=copy.deepcopy(qos_association_info), loaded=True
+ )
+ return qos_association
+
+
+def create_qoses(attrs=None, count=2):
+ """Create multiple fake Qos specifications.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of Qos specifications to fake
+ :return:
+ A list of FakeResource objects faking the Qos specifications
+ """
+ qoses = []
+ for i in range(0, count):
+ qos = create_one_qos(attrs)
+ qoses.append(qos)
+
+ return qoses
+
+
+def get_qoses(qoses=None, count=2):
+ """Get an iterable MagicMock object with a list of faked qoses.
+
+ If qoses list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List qoses:
+ A list of FakeResource objects faking qoses
+ :param Integer count:
+ The number of qoses to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ qoses
+ """
+ if qoses is None:
+ qoses = create_qoses(count)
+
+ return mock.Mock(side_effect=qoses)
+
+
+def create_one_snapshot(attrs=None):
+ """Create a fake snapshot.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with id, name, description, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ snapshot_info = {
+ "id": 'snapshot-id-' + uuid.uuid4().hex,
+ "name": 'snapshot-name-' + uuid.uuid4().hex,
+ "description": 'snapshot-description-' + uuid.uuid4().hex,
+ "size": 10,
+ "status": "available",
+ "metadata": {"foo": "bar"},
+ "created_at": "2015-06-03T18:49:19.000000",
+ "volume_id": 'vloume-id-' + uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes.
+ snapshot_info.update(attrs)
+
+ snapshot = fakes.FakeResource(
+ info=copy.deepcopy(snapshot_info), loaded=True
+ )
+ return snapshot
+
+
+def create_snapshots(attrs=None, count=2):
+ """Create multiple fake snapshots.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of snapshots to fake
+ :return:
+ A list of FakeResource objects faking the snapshots
+ """
+ snapshots = []
+ for i in range(0, count):
+ snapshot = create_one_snapshot(attrs)
+ snapshots.append(snapshot)
+
+ return snapshots
+
+
+def get_snapshots(snapshots=None, count=2):
+ """Get an iterable MagicMock object with a list of faked snapshots.
+
+ If snapshots list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param List snapshots:
+ A list of FakeResource objects faking snapshots
+ :param Integer count:
+ The number of snapshots to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ snapshots
+ """
+ if snapshots is None:
+ snapshots = create_snapshots(count)
+
+ return mock.Mock(side_effect=snapshots)
+
+
+def create_one_volume_type(attrs=None, methods=None):
+ """Create a fake volume type.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :param dict methods:
+ A dictionary with all methods
+ :return:
+ A FakeResource object with id, name, description, etc.
+ """
+ attrs = attrs or {}
+ methods = methods or {}
+
+ # Set default attributes.
+ volume_type_info = {
+ "id": 'type-id-' + uuid.uuid4().hex,
+ "name": 'type-name-' + uuid.uuid4().hex,
+ "description": 'type-description-' + uuid.uuid4().hex,
+ "extra_specs": {"foo": "bar"},
+ "is_public": True,
+ }
+
+ # Overwrite default attributes.
+ volume_type_info.update(attrs)
+
+ volume_type = fakes.FakeResource(
+ info=copy.deepcopy(volume_type_info), methods=methods, loaded=True
+ )
+ return volume_type
+
+
+def create_volume_types(attrs=None, count=2):
+ """Create multiple fake volume_types.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :param int count:
+ The number of types to fake
+ :return:
+ A list of FakeResource objects faking the types
+ """
+ volume_types = []
+ for i in range(0, count):
+ volume_type = create_one_volume_type(attrs)
+ volume_types.append(volume_type)
+
+ return volume_types
+
+
+def get_volume_types(volume_types=None, count=2):
+ """Get an iterable MagicMock object with a list of faked volume types.
+
+ If volume_types list is provided, then initialize the Mock object with
+ the list. Otherwise create one.
+
+ :param List volume_types:
+ A list of FakeResource objects faking volume types
+ :param Integer count:
+ The number of volume types to be faked
+ :return
+ An iterable Mock object with side_effect set to a list of faked
+ volume types
+ """
+ if volume_types is None:
+ volume_types = create_volume_types(count)
+
+ return mock.Mock(side_effect=volume_types)
+
+
+def create_one_encryption_volume_type(attrs=None):
+ """Create a fake encryption volume type.
+
+ :param dict attrs:
+ A dictionary with all attributes
+ :return:
+ A FakeResource object with volume_type_id etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attributes.
+ encryption_info = {
+ "volume_type_id": 'type-id-' + uuid.uuid4().hex,
+ 'provider': 'LuksEncryptor',
+ 'cipher': None,
+ 'key_size': None,
+ 'control_location': 'front-end',
+ }
+
+ # Overwrite default attributes.
+ encryption_info.update(attrs)
+
+ encryption_type = fakes.FakeResource(
+ info=copy.deepcopy(encryption_info), loaded=True
+ )
+ return encryption_type
+
+
+def create_one_vol_quota(attrs=None):
+ """Create one quota"""
+ attrs = attrs or {}
+
+ quota_attrs = {
+ 'id': 'project-id-' + uuid.uuid4().hex,
+ 'backups': 100,
+ 'backup_gigabytes': 100,
+ 'gigabytes': 10,
+ 'per_volume_gigabytes': 10,
+ 'snapshots': 0,
+ 'volumes': 10,
+ }
+
+ quota_attrs.update(attrs)
+
+ quota = fakes.FakeResource(info=copy.deepcopy(quota_attrs), loaded=True)
+ quota.project_id = quota_attrs['id']
+
+ return quota
+
+
+def create_one_default_vol_quota(attrs=None):
+ """Create one quota"""
+ attrs = attrs or {}
+
+ quota_attrs = {
+ 'id': 'project-id-' + uuid.uuid4().hex,
+ 'backups': 100,
+ 'backup_gigabytes': 100,
+ 'gigabytes': 100,
+ 'per_volume_gigabytes': 100,
+ 'snapshots': 100,
+ 'volumes': 100,
+ }
+
+ quota_attrs.update(attrs)
+
+ quota = fakes.FakeResource(info=copy.deepcopy(quota_attrs), loaded=True)
+ quota.project_id = quota_attrs['id']
+
+ return quota
+
+
+def create_one_detailed_quota(attrs=None):
+ """Create one quota"""
+ attrs = attrs or {}
- quota_attrs.update(attrs)
+ quota_attrs = {
+ 'volumes': {'limit': 3, 'in_use': 1, 'reserved': 0},
+ 'per_volume_gigabytes': {'limit': -1, 'in_use': 0, 'reserved': 0},
+ 'snapshots': {'limit': 10, 'in_use': 0, 'reserved': 0},
+ 'gigabytes': {'limit': 1000, 'in_use': 5, 'reserved': 0},
+ 'backups': {'limit': 10, 'in_use': 0, 'reserved': 0},
+ 'backup_gigabytes': {'limit': 1000, 'in_use': 0, 'reserved': 0},
+ 'volumes_lvmdriver-1': {'limit': -1, 'in_use': 1, 'reserved': 0},
+ 'gigabytes_lvmdriver-1': {'limit': -1, 'in_use': 5, 'reserved': 0},
+ 'snapshots_lvmdriver-1': {'limit': -1, 'in_use': 0, 'reserved': 0},
+ 'volumes___DEFAULT__': {'limit': -1, 'in_use': 0, 'reserved': 0},
+ 'gigabytes___DEFAULT__': {'limit': -1, 'in_use': 0, 'reserved': 0},
+ 'snapshots___DEFAULT__': {'limit': -1, 'in_use': 0, 'reserved': 0},
+ 'groups': {'limit': 10, 'in_use': 0, 'reserved': 0},
+ 'id': uuid.uuid4().hex,
+ }
+ quota_attrs.update(attrs)
- quota = fakes.FakeResource(
- info=copy.deepcopy(quota_attrs),
- loaded=True)
- quota.project_id = quota_attrs['id']
+ quota = fakes.FakeResource(info=copy.deepcopy(quota_attrs), loaded=True)
- return quota
+ return quota
class FakeLimits(object):
@@ -1208,37 +1146,39 @@ class FakeLimits(object):
'maxTotalVolumes': 10,
'totalVolumesUsed': 4,
'totalBackupsUsed': 0,
- 'totalGigabytesUsed': 35
+ 'totalGigabytesUsed': 35,
}
absolute_attrs = absolute_attrs or {}
self.absolute_limits_attrs.update(absolute_attrs)
- self.rate_limits_attrs = [{
- "uri": "*",
- "limit": [
- {
- "value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-12-15T22:42:45Z"
- },
- {
- "value": 10,
- "verb": "PUT",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-12-15T22:42:45Z"
- },
- {
- "value": 100,
- "verb": "DELETE",
- "remaining": 100,
- "unit": "MINUTE",
- "next-available": "2011-12-15T22:42:45Z"
- }
- ]
- }]
+ self.rate_limits_attrs = [
+ {
+ "uri": "*",
+ "limit": [
+ {
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-12-15T22:42:45Z",
+ },
+ {
+ "value": 10,
+ "verb": "PUT",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-12-15T22:42:45Z",
+ },
+ {
+ "value": 100,
+ "verb": "DELETE",
+ "remaining": 100,
+ "unit": "MINUTE",
+ "next-available": "2011-12-15T22:42:45Z",
+ },
+ ],
+ }
+ ]
@property
def absolute(self):
@@ -1256,18 +1196,30 @@ class FakeLimits(object):
for group in self.rate_limits_attrs:
uri = group['uri']
for rate in group['limit']:
- yield FakeRateLimit(rate['verb'], uri, rate['value'],
- rate['remaining'], rate['unit'],
- rate['next-available'])
+ yield FakeRateLimit(
+ rate['verb'],
+ uri,
+ rate['value'],
+ rate['remaining'],
+ rate['unit'],
+ rate['next-available'],
+ )
def rate_limits(self):
reference_data = []
for group in self.rate_limits_attrs:
uri = group['uri']
for rate in group['limit']:
- reference_data.append((rate['verb'], uri, rate['value'],
- rate['remaining'], rate['unit'],
- rate['next-available']))
+ reference_data.append(
+ (
+ rate['verb'],
+ uri,
+ rate['value'],
+ rate['remaining'],
+ rate['unit'],
+ rate['next-available'],
+ )
+ )
return reference_data
@@ -1282,8 +1234,7 @@ class FakeAbsoluteLimit(object):
class FakeRateLimit(object):
"""Data model that represents a flattened view of a single rate limit."""
- def __init__(self, verb, uri, value, remain,
- unit, next_available):
+ def __init__(self, verb, uri, value, remain, unit, next_available):
self.verb = verb
self.uri = uri
self.value = value
diff --git a/openstackclient/tests/unit/volume/v2/test_backup_record.py b/openstackclient/tests/unit/volume/v2/test_backup_record.py
index 0e24174c..aa9a25a2 100644
--- a/openstackclient/tests/unit/volume/v2/test_backup_record.py
+++ b/openstackclient/tests/unit/volume/v2/test_backup_record.py
@@ -19,7 +19,7 @@ from openstackclient.volume.v2 import backup_record
class TestBackupRecord(volume_fakes.TestVolume):
def setUp(self):
- super(TestBackupRecord, self).setUp()
+ super().setUp()
self.backups_mock = self.app.client_manager.volume.backups
self.backups_mock.reset_mock()
@@ -27,12 +27,13 @@ class TestBackupRecord(volume_fakes.TestVolume):
class TestBackupRecordExport(TestBackupRecord):
- new_backup = volume_fakes.FakeBackup.create_one_backup(
- attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'})
- new_record = volume_fakes.FakeBackup.create_backup_record()
+ new_backup = volume_fakes.create_one_backup(
+ attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'},
+ )
+ new_record = volume_fakes.create_backup_record()
def setUp(self):
- super(TestBackupRecordExport, self).setUp()
+ super().setUp()
self.backups_mock.export_record.return_value = self.new_record
self.backups_mock.get.return_value = self.new_backup
@@ -81,12 +82,13 @@ class TestBackupRecordExport(TestBackupRecord):
class TestBackupRecordImport(TestBackupRecord):
- new_backup = volume_fakes.FakeBackup.create_one_backup(
- attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'})
- new_import = volume_fakes.FakeBackup.import_backup_record()
+ new_backup = volume_fakes.create_one_backup(
+ attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'},
+ )
+ new_import = volume_fakes.import_backup_record()
def setUp(self):
- super(TestBackupRecordImport, self).setUp()
+ super().setUp()
self.backups_mock.import_record.return_value = self.new_import
diff --git a/openstackclient/tests/unit/volume/v2/test_consistency_group.py b/openstackclient/tests/unit/volume/v2/test_consistency_group.py
index 434d0cf9..c5537ed8 100644
--- a/openstackclient/tests/unit/volume/v2/test_consistency_group.py
+++ b/openstackclient/tests/unit/volume/v2/test_consistency_group.py
@@ -26,7 +26,7 @@ from openstackclient.volume.v2 import consistency_group
class TestConsistencyGroup(volume_fakes.TestVolume):
def setUp(self):
- super(TestConsistencyGroup, self).setUp()
+ super().setUp()
# Get a shortcut to the TransferManager Mock
self.consistencygroups_mock = (
@@ -47,11 +47,10 @@ class TestConsistencyGroup(volume_fakes.TestVolume):
class TestConsistencyGroupAddVolume(TestConsistencyGroup):
- _consistency_group = (
- volume_fakes.FakeConsistencyGroup.create_one_consistency_group())
+ _consistency_group = volume_fakes.create_one_consistency_group()
def setUp(self):
- super(TestConsistencyGroupAddVolume, self).setUp()
+ super().setUp()
self.consistencygroups_mock.get.return_value = (
self._consistency_group)
@@ -60,7 +59,7 @@ class TestConsistencyGroupAddVolume(TestConsistencyGroup):
consistency_group.AddVolumeToConsistencyGroup(self.app, None)
def test_add_one_volume_to_consistency_group(self):
- volume = volume_fakes.FakeVolume.create_one_volume()
+ volume = volume_fakes.create_one_volume()
self.volumes_mock.get.return_value = volume
arglist = [
self._consistency_group.id,
@@ -85,8 +84,8 @@ class TestConsistencyGroupAddVolume(TestConsistencyGroup):
self.assertIsNone(result)
def test_add_multiple_volumes_to_consistency_group(self):
- volumes = volume_fakes.FakeVolume.create_volumes(count=2)
- self.volumes_mock.get = volume_fakes.FakeVolume.get_volumes(volumes)
+ volumes = volume_fakes.create_volumes(count=2)
+ self.volumes_mock.get = volume_fakes.get_volumes(volumes)
arglist = [
self._consistency_group.id,
volumes[0].id,
@@ -112,8 +111,9 @@ class TestConsistencyGroupAddVolume(TestConsistencyGroup):
@mock.patch.object(consistency_group.LOG, 'error')
def test_add_multiple_volumes_to_consistency_group_with_exception(
- self, mock_error):
- volume = volume_fakes.FakeVolume.create_one_volume()
+ self, mock_error,
+ ):
+ volume = volume_fakes.create_one_volume()
arglist = [
self._consistency_group.id,
volume.id,
@@ -148,13 +148,10 @@ class TestConsistencyGroupAddVolume(TestConsistencyGroup):
class TestConsistencyGroupCreate(TestConsistencyGroup):
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
- new_consistency_group = (
- volume_fakes.FakeConsistencyGroup.create_one_consistency_group())
+ volume_type = volume_fakes.create_one_volume_type()
+ new_consistency_group = volume_fakes.create_one_consistency_group()
consistency_group_snapshot = (
- volume_fakes.
- FakeConsistencyGroupSnapshot.
- create_one_consistency_group_snapshot()
+ volume_fakes.create_one_consistency_group_snapshot()
)
columns = (
@@ -177,7 +174,7 @@ class TestConsistencyGroupCreate(TestConsistencyGroup):
)
def setUp(self):
- super(TestConsistencyGroupCreate, self).setUp()
+ super().setUp()
self.consistencygroups_mock.create.return_value = (
self.new_consistency_group)
self.consistencygroups_mock.create_from_src.return_value = (
@@ -260,7 +257,7 @@ class TestConsistencyGroupCreate(TestConsistencyGroup):
self.new_consistency_group.name,
]
verifylist = [
- ('consistency_group_source', self.new_consistency_group.id),
+ ('source', self.new_consistency_group.id),
('description', self.new_consistency_group.description),
('name', self.new_consistency_group.name),
]
@@ -288,7 +285,7 @@ class TestConsistencyGroupCreate(TestConsistencyGroup):
self.new_consistency_group.name,
]
verifylist = [
- ('consistency_group_snapshot', self.consistency_group_snapshot.id),
+ ('snapshot', self.consistency_group_snapshot.id),
('description', self.new_consistency_group.description),
('name', self.new_consistency_group.name),
]
@@ -313,13 +310,14 @@ class TestConsistencyGroupCreate(TestConsistencyGroup):
class TestConsistencyGroupDelete(TestConsistencyGroup):
consistency_groups =\
- volume_fakes.FakeConsistencyGroup.create_consistency_groups(count=2)
+ volume_fakes.create_consistency_groups(count=2)
def setUp(self):
- super(TestConsistencyGroupDelete, self).setUp()
+ super().setUp()
- self.consistencygroups_mock.get = volume_fakes.FakeConsistencyGroup.\
- get_consistency_groups(self.consistency_groups)
+ self.consistencygroups_mock.get = volume_fakes.get_consistency_groups(
+ self.consistency_groups,
+ )
self.consistencygroups_mock.delete.return_value = None
# Get the command object to mock
@@ -409,8 +407,7 @@ class TestConsistencyGroupDelete(TestConsistencyGroup):
class TestConsistencyGroupList(TestConsistencyGroup):
- consistency_groups = (
- volume_fakes.FakeConsistencyGroup.create_consistency_groups(count=2))
+ consistency_groups = volume_fakes.create_consistency_groups(count=2)
columns = [
'ID',
@@ -444,7 +441,7 @@ class TestConsistencyGroupList(TestConsistencyGroup):
))
def setUp(self):
- super(TestConsistencyGroupList, self).setUp()
+ super().setUp()
self.consistencygroups_mock.list.return_value = self.consistency_groups
# Get the command to test
@@ -502,11 +499,10 @@ class TestConsistencyGroupList(TestConsistencyGroup):
class TestConsistencyGroupRemoveVolume(TestConsistencyGroup):
- _consistency_group = (
- volume_fakes.FakeConsistencyGroup.create_one_consistency_group())
+ _consistency_group = volume_fakes.create_one_consistency_group()
def setUp(self):
- super(TestConsistencyGroupRemoveVolume, self).setUp()
+ super().setUp()
self.consistencygroups_mock.get.return_value = (
self._consistency_group)
@@ -515,7 +511,7 @@ class TestConsistencyGroupRemoveVolume(TestConsistencyGroup):
consistency_group.RemoveVolumeFromConsistencyGroup(self.app, None)
def test_remove_one_volume_from_consistency_group(self):
- volume = volume_fakes.FakeVolume.create_one_volume()
+ volume = volume_fakes.create_one_volume()
self.volumes_mock.get.return_value = volume
arglist = [
self._consistency_group.id,
@@ -540,8 +536,8 @@ class TestConsistencyGroupRemoveVolume(TestConsistencyGroup):
self.assertIsNone(result)
def test_remove_multi_volumes_from_consistency_group(self):
- volumes = volume_fakes.FakeVolume.create_volumes(count=2)
- self.volumes_mock.get = volume_fakes.FakeVolume.get_volumes(volumes)
+ volumes = volume_fakes.create_volumes(count=2)
+ self.volumes_mock.get = volume_fakes.get_volumes(volumes)
arglist = [
self._consistency_group.id,
volumes[0].id,
@@ -567,8 +563,10 @@ class TestConsistencyGroupRemoveVolume(TestConsistencyGroup):
@mock.patch.object(consistency_group.LOG, 'error')
def test_remove_multiple_volumes_from_consistency_group_with_exception(
- self, mock_error):
- volume = volume_fakes.FakeVolume.create_one_volume()
+ self,
+ mock_error,
+ ):
+ volume = volume_fakes.create_one_volume()
arglist = [
self._consistency_group.id,
volume.id,
@@ -603,11 +601,10 @@ class TestConsistencyGroupRemoveVolume(TestConsistencyGroup):
class TestConsistencyGroupSet(TestConsistencyGroup):
- consistency_group = (
- volume_fakes.FakeConsistencyGroup.create_one_consistency_group())
+ consistency_group = volume_fakes.create_one_consistency_group()
def setUp(self):
- super(TestConsistencyGroupSet, self).setUp()
+ super().setUp()
self.consistencygroups_mock.get.return_value = (
self.consistency_group)
@@ -677,10 +674,9 @@ class TestConsistencyGroupShow(TestConsistencyGroup):
)
def setUp(self):
- super(TestConsistencyGroupShow, self).setUp()
+ super().setUp()
- self.consistency_group = (
- volume_fakes.FakeConsistencyGroup.create_one_consistency_group())
+ self.consistency_group = volume_fakes.create_one_consistency_group()
self.data = (
self.consistency_group.availability_zone,
self.consistency_group.created_at,
diff --git a/openstackclient/tests/unit/volume/v2/test_consistency_group_snapshot.py b/openstackclient/tests/unit/volume/v2/test_consistency_group_snapshot.py
index 2202b85b..e3c738c8 100644
--- a/openstackclient/tests/unit/volume/v2/test_consistency_group_snapshot.py
+++ b/openstackclient/tests/unit/volume/v2/test_consistency_group_snapshot.py
@@ -35,12 +35,9 @@ class TestConsistencyGroupSnapshot(volume_fakes.TestVolume):
class TestConsistencyGroupSnapshotCreate(TestConsistencyGroupSnapshot):
_consistency_group_snapshot = (
- volume_fakes.
- FakeConsistencyGroupSnapshot.
- create_one_consistency_group_snapshot()
+ volume_fakes.create_one_consistency_group_snapshot()
)
- consistency_group = (
- volume_fakes.FakeConsistencyGroup.create_one_consistency_group())
+ consistency_group = volume_fakes.create_one_consistency_group()
columns = (
'consistencygroup_id',
@@ -124,16 +121,16 @@ class TestConsistencyGroupSnapshotCreate(TestConsistencyGroupSnapshot):
class TestConsistencyGroupSnapshotDelete(TestConsistencyGroupSnapshot):
consistency_group_snapshots = (
- volume_fakes.FakeConsistencyGroupSnapshot.
- create_consistency_group_snapshots(count=2)
+ volume_fakes.create_consistency_group_snapshots(count=2)
)
def setUp(self):
super(TestConsistencyGroupSnapshotDelete, self).setUp()
self.cgsnapshots_mock.get = (
- volume_fakes.FakeConsistencyGroupSnapshot.
- get_consistency_group_snapshots(self.consistency_group_snapshots)
+ volume_fakes.get_consistency_group_snapshots(
+ self.consistency_group_snapshots
+ )
)
self.cgsnapshots_mock.delete.return_value = None
@@ -178,12 +175,9 @@ class TestConsistencyGroupSnapshotDelete(TestConsistencyGroupSnapshot):
class TestConsistencyGroupSnapshotList(TestConsistencyGroupSnapshot):
consistency_group_snapshots = (
- volume_fakes.FakeConsistencyGroupSnapshot.
- create_consistency_group_snapshots(count=2)
- )
- consistency_group = (
- volume_fakes.FakeConsistencyGroup.create_one_consistency_group()
+ volume_fakes.create_consistency_group_snapshots(count=2)
)
+ consistency_group = volume_fakes.create_one_consistency_group()
columns = [
'ID',
@@ -306,9 +300,7 @@ class TestConsistencyGroupSnapshotList(TestConsistencyGroupSnapshot):
class TestConsistencyGroupSnapshotShow(TestConsistencyGroupSnapshot):
_consistency_group_snapshot = (
- volume_fakes.
- FakeConsistencyGroupSnapshot.
- create_one_consistency_group_snapshot()
+ volume_fakes.create_one_consistency_group_snapshot()
)
columns = (
diff --git a/openstackclient/tests/unit/volume/v2/test_qos_specs.py b/openstackclient/tests/unit/volume/v2/test_qos_specs.py
index 41c18014..6f258dd5 100644
--- a/openstackclient/tests/unit/volume/v2/test_qos_specs.py
+++ b/openstackclient/tests/unit/volume/v2/test_qos_specs.py
@@ -39,8 +39,8 @@ class TestQos(volume_fakes.TestVolume):
class TestQosAssociate(TestQos):
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
- qos_spec = volume_fakes.FakeQos.create_one_qos()
+ volume_type = volume_fakes.create_one_volume_type()
+ qos_spec = volume_fakes.create_one_qos()
def setUp(self):
super(TestQosAssociate, self).setUp()
@@ -82,7 +82,7 @@ class TestQosCreate(TestQos):
def setUp(self):
super(TestQosCreate, self).setUp()
- self.new_qos_spec = volume_fakes.FakeQos.create_one_qos()
+ self.new_qos_spec = volume_fakes.create_one_qos()
self.qos_mock.create.return_value = self.new_qos_spec
self.data = (
@@ -164,13 +164,13 @@ class TestQosCreate(TestQos):
class TestQosDelete(TestQos):
- qos_specs = volume_fakes.FakeQos.create_qoses(count=2)
+ qos_specs = volume_fakes.create_qoses(count=2)
def setUp(self):
super(TestQosDelete, self).setUp()
self.qos_mock.get = (
- volume_fakes.FakeQos.get_qoses(self.qos_specs))
+ volume_fakes.get_qoses(self.qos_specs))
# Get the command object to test
self.cmd = qos_specs.DeleteQos(self.app, None)
@@ -255,8 +255,8 @@ class TestQosDelete(TestQos):
class TestQosDisassociate(TestQos):
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
- qos_spec = volume_fakes.FakeQos.create_one_qos()
+ volume_type = volume_fakes.create_one_volume_type()
+ qos_spec = volume_fakes.create_one_qos()
def setUp(self):
super(TestQosDisassociate, self).setUp()
@@ -303,8 +303,8 @@ class TestQosDisassociate(TestQos):
class TestQosList(TestQos):
- qos_specs = volume_fakes.FakeQos.create_qoses(count=2)
- qos_association = volume_fakes.FakeQos.create_one_qos_association()
+ qos_specs = volume_fakes.create_qoses(count=2)
+ qos_association = volume_fakes.create_one_qos_association()
columns = (
'ID',
@@ -374,7 +374,7 @@ class TestQosList(TestQos):
class TestQosSet(TestQos):
- qos_spec = volume_fakes.FakeQos.create_one_qos()
+ qos_spec = volume_fakes.create_one_qos()
def setUp(self):
super(TestQosSet, self).setUp()
@@ -406,8 +406,8 @@ class TestQosSet(TestQos):
class TestQosShow(TestQos):
- qos_spec = volume_fakes.FakeQos.create_one_qos()
- qos_association = volume_fakes.FakeQos.create_one_qos_association()
+ qos_spec = volume_fakes.create_one_qos()
+ qos_association = volume_fakes.create_one_qos_association()
columns = (
'associations',
@@ -454,7 +454,7 @@ class TestQosShow(TestQos):
class TestQosUnset(TestQos):
- qos_spec = volume_fakes.FakeQos.create_one_qos()
+ qos_spec = volume_fakes.create_one_qos()
def setUp(self):
super(TestQosUnset, self).setUp()
diff --git a/openstackclient/tests/unit/volume/v2/test_service.py b/openstackclient/tests/unit/volume/v2/test_service.py
index 3e9b2df9..e9e39f41 100644
--- a/openstackclient/tests/unit/volume/v2/test_service.py
+++ b/openstackclient/tests/unit/volume/v2/test_service.py
@@ -14,14 +14,14 @@
from osc_lib import exceptions
-from openstackclient.tests.unit.volume.v2 import fakes as service_fakes
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
from openstackclient.volume.v2 import service
-class TestService(service_fakes.TestVolume):
+class TestService(volume_fakes.TestVolume):
def setUp(self):
- super(TestService, self).setUp()
+ super().setUp()
# Get a shortcut to the ServiceManager Mock
self.service_mock = self.app.client_manager.volume.services
@@ -31,10 +31,10 @@ class TestService(service_fakes.TestVolume):
class TestServiceList(TestService):
# The service to be listed
- services = service_fakes.FakeService.create_one_service()
+ services = volume_fakes.create_one_service()
def setUp(self):
- super(TestServiceList, self).setUp()
+ super().setUp()
self.service_mock.list.return_value = [self.services]
@@ -144,10 +144,10 @@ class TestServiceList(TestService):
class TestServiceSet(TestService):
- service = service_fakes.FakeService.create_one_service()
+ service = volume_fakes.create_one_service()
def setUp(self):
- super(TestServiceSet, self).setUp()
+ super().setUp()
self.service_mock.enable.return_value = self.service
self.service_mock.disable.return_value = self.service
diff --git a/openstackclient/tests/unit/volume/v2/test_type.py b/openstackclient/tests/unit/volume/v2/test_type.py
index d94dc1e0..1cb46c45 100644
--- a/openstackclient/tests/unit/volume/v2/test_type.py
+++ b/openstackclient/tests/unit/volume/v2/test_type.py
@@ -28,7 +28,7 @@ from openstackclient.volume.v2 import volume_type
class TestType(volume_fakes.TestVolume):
def setUp(self):
- super(TestType, self).setUp()
+ super().setUp()
self.types_mock = self.app.client_manager.volume.volume_types
self.types_mock.reset_mock()
@@ -56,10 +56,9 @@ class TestTypeCreate(TestType):
)
def setUp(self):
- super(TestTypeCreate, self).setUp()
+ super().setUp()
- self.new_volume_type = \
- volume_fakes.FakeVolumeType.create_one_volume_type()
+ self.new_volume_type = volume_fakes.create_one_volume_type()
self.data = (
self.new_volume_type.description,
self.new_volume_type.id,
@@ -144,12 +143,12 @@ class TestTypeCreate(TestType):
'key_size': '128',
'control_location': 'front-end',
}
- encryption_type = \
- volume_fakes.FakeVolumeType.create_one_encryption_volume_type(
- attrs=encryption_info)
- self.new_volume_type = \
- volume_fakes.FakeVolumeType.create_one_volume_type(
- attrs={'encryption': encryption_info})
+ encryption_type = volume_fakes.create_one_encryption_volume_type(
+ attrs=encryption_info,
+ )
+ self.new_volume_type = volume_fakes.create_one_volume_type(
+ attrs={'encryption': encryption_info},
+ )
self.types_mock.create.return_value = self.new_volume_type
self.encryption_types_mock.create.return_value = encryption_type
encryption_columns = (
@@ -203,13 +202,14 @@ class TestTypeCreate(TestType):
class TestTypeDelete(TestType):
- volume_types = volume_fakes.FakeVolumeType.create_volume_types(count=2)
+ volume_types = volume_fakes.create_volume_types(count=2)
def setUp(self):
- super(TestTypeDelete, self).setUp()
+ super().setUp()
- self.types_mock.get = volume_fakes.FakeVolumeType.get_volume_types(
- self.volume_types)
+ self.types_mock.get = volume_fakes.get_volume_types(
+ self.volume_types,
+ )
self.types_mock.delete.return_value = None
# Get the command object to mock
@@ -278,7 +278,7 @@ class TestTypeDelete(TestType):
class TestTypeList(TestType):
- volume_types = volume_fakes.FakeVolumeType.create_volume_types()
+ volume_types = volume_fakes.create_volume_types()
columns = [
"ID",
@@ -312,7 +312,7 @@ class TestTypeList(TestType):
))
def setUp(self):
- super(TestTypeList, self).setUp()
+ super().setUp()
self.types_mock.list.return_value = self.volume_types
self.types_mock.default.return_value = self.volume_types[0]
@@ -388,9 +388,9 @@ class TestTypeList(TestType):
self.assertCountEqual(self.data_with_default_type, list(data))
def test_type_list_with_encryption(self):
- encryption_type = \
- volume_fakes.FakeVolumeType.create_one_encryption_volume_type(
- attrs={'volume_type_id': self.volume_types[0].id})
+ encryption_type = volume_fakes.create_one_encryption_volume_type(
+ attrs={'volume_type_id': self.volume_types[0].id},
+ )
encryption_info = {
'provider': 'LuksEncryptor',
'cipher': None,
@@ -436,11 +436,12 @@ class TestTypeList(TestType):
class TestTypeSet(TestType):
project = identity_fakes.FakeProject.create_one_project()
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type(
- methods={'set_keys': None})
+ volume_type = volume_fakes.create_one_volume_type(
+ methods={'set_keys': None},
+ )
def setUp(self):
- super(TestTypeSet, self).setUp()
+ super().setUp()
self.types_mock.get.return_value = self.volume_type
@@ -685,9 +686,9 @@ class TestTypeShow(TestType):
)
def setUp(self):
- super(TestTypeShow, self).setUp()
+ super().setUp()
- self.volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
+ self.volume_type = volume_fakes.create_one_volume_type()
self.data = (
None,
self.volume_type.description,
@@ -727,13 +728,20 @@ class TestTypeShow(TestType):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- private_type = volume_fakes.FakeVolumeType.create_one_volume_type(
- attrs={'is_public': False})
- type_access_list = volume_fakes.FakeTypeAccess.create_one_type_access()
- with mock.patch.object(self.types_mock, 'get',
- return_value=private_type):
- with mock.patch.object(self.types_access_mock, 'list',
- return_value=[type_access_list]):
+ private_type = volume_fakes.create_one_volume_type(
+ attrs={'is_public': False},
+ )
+ type_access_list = volume_fakes.create_one_type_access()
+ with mock.patch.object(
+ self.types_mock,
+ 'get',
+ return_value=private_type,
+ ):
+ with mock.patch.object(
+ self.types_access_mock,
+ 'list',
+ return_value=[type_access_list],
+ ):
columns, data = self.cmd.take_action(parsed_args)
self.types_mock.get.assert_called_once_with(
self.volume_type.id)
@@ -760,8 +768,9 @@ class TestTypeShow(TestType):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- private_type = volume_fakes.FakeVolumeType.create_one_volume_type(
- attrs={'is_public': False})
+ private_type = volume_fakes.create_one_volume_type(
+ attrs={'is_public': False},
+ )
with mock.patch.object(self.types_mock, 'get',
return_value=private_type):
with mock.patch.object(self.types_access_mock, 'list',
@@ -784,16 +793,16 @@ class TestTypeShow(TestType):
self.assertCountEqual(private_type_data, data)
def test_type_show_with_encryption(self):
- encryption_type = \
- volume_fakes.FakeVolumeType.create_one_encryption_volume_type()
+ encryption_type = volume_fakes.create_one_encryption_volume_type()
encryption_info = {
'provider': 'LuksEncryptor',
'cipher': None,
'key_size': None,
'control_location': 'front-end',
}
- self.volume_type = volume_fakes.FakeVolumeType.create_one_volume_type(
- attrs={'encryption': encryption_info})
+ self.volume_type = volume_fakes.create_one_volume_type(
+ attrs={'encryption': encryption_info},
+ )
self.types_mock.get.return_value = self.volume_type
self.encryption_types_mock.get.return_value = encryption_type
encryption_columns = (
@@ -834,11 +843,12 @@ class TestTypeShow(TestType):
class TestTypeUnset(TestType):
project = identity_fakes.FakeProject.create_one_project()
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type(
- methods={'unset_keys': None})
+ volume_type = volume_fakes.create_one_volume_type(
+ methods={'unset_keys': None},
+ )
def setUp(self):
- super(TestTypeUnset, self).setUp()
+ super().setUp()
self.types_mock.get.return_value = self.volume_type
@@ -936,7 +946,7 @@ class TestTypeUnset(TestType):
class TestColumns(TestType):
def test_encryption_info_column_with_info(self):
- fake_volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
+ fake_volume_type = volume_fakes.create_one_volume_type()
type_id = fake_volume_type.id
encryption_info = {
@@ -952,7 +962,7 @@ class TestColumns(TestType):
self.assertEqual(encryption_info, col.machine_readable())
def test_encryption_info_column_without_info(self):
- fake_volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
+ fake_volume_type = volume_fakes.create_one_volume_type()
type_id = fake_volume_type.id
col = volume_type.EncryptionInfoColumn(type_id, {})
diff --git a/openstackclient/tests/unit/volume/v2/test_volume.py b/openstackclient/tests/unit/volume/v2/test_volume.py
index ec82e674..c930002f 100644
--- a/openstackclient/tests/unit/volume/v2/test_volume.py
+++ b/openstackclient/tests/unit/volume/v2/test_volume.py
@@ -16,6 +16,7 @@ import argparse
from unittest import mock
from unittest.mock import call
+from cinderclient import api_versions
from osc_lib.cli import format_columns
from osc_lib import exceptions
from osc_lib import utils
@@ -30,7 +31,7 @@ from openstackclient.volume.v2 import volume
class TestVolume(volume_fakes.TestVolume):
def setUp(self):
- super(TestVolume, self).setUp()
+ super().setUp()
self.volumes_mock = self.app.client_manager.volume.volumes
self.volumes_mock.reset_mock()
@@ -47,6 +48,9 @@ class TestVolume(volume_fakes.TestVolume):
self.snapshots_mock = self.app.client_manager.volume.volume_snapshots
self.snapshots_mock.reset_mock()
+ self.backups_mock = self.app.client_manager.volume.backups
+ self.backups_mock.reset_mock()
+
self.types_mock = self.app.client_manager.volume.volume_types
self.types_mock.reset_mock()
@@ -55,11 +59,9 @@ class TestVolume(volume_fakes.TestVolume):
self.consistencygroups_mock.reset_mock()
def setup_volumes_mock(self, count):
- volumes = volume_fakes.FakeVolume.create_volumes(count=count)
+ volumes = volume_fakes.create_volumes(count=count)
- self.volumes_mock.get = volume_fakes.FakeVolume.get_volumes(
- volumes,
- 0)
+ self.volumes_mock.get = volume_fakes.get_volumes(volumes, 0)
return volumes
@@ -83,9 +85,9 @@ class TestVolumeCreate(TestVolume):
)
def setUp(self):
- super(TestVolumeCreate, self).setUp()
+ super().setUp()
- self.new_volume = volume_fakes.FakeVolume.create_one_volume()
+ self.new_volume = volume_fakes.create_one_volume()
self.volumes_mock.create.return_value = self.new_volume
self.datalist = (
@@ -108,11 +110,9 @@ class TestVolumeCreate(TestVolume):
def test_volume_create_min_options(self):
arglist = [
'--size', str(self.new_volume.size),
- self.new_volume.name,
]
verifylist = [
('size', self.new_volume.size),
- ('name', self.new_volume.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -124,7 +124,7 @@ class TestVolumeCreate(TestVolume):
self.volumes_mock.create.assert_called_with(
size=self.new_volume.size,
snapshot_id=None,
- name=self.new_volume.name,
+ name=None,
description=None,
volume_type=None,
availability_zone=None,
@@ -133,14 +133,14 @@ class TestVolumeCreate(TestVolume):
source_volid=None,
consistencygroup_id=None,
scheduler_hints=None,
+ backup_id=None,
)
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.datalist, data)
def test_volume_create_options(self):
- consistency_group = (
- volume_fakes.FakeConsistencyGroup.create_one_consistency_group())
+ consistency_group = volume_fakes.create_one_consistency_group()
self.consistencygroups_mock.get.return_value = consistency_group
arglist = [
'--size', str(self.new_volume.size),
@@ -179,6 +179,7 @@ class TestVolumeCreate(TestVolume):
source_volid=None,
consistencygroup_id=consistency_group.id,
scheduler_hints={'k': 'v'},
+ backup_id=None,
)
self.assertEqual(self.columns, columns)
@@ -215,6 +216,7 @@ class TestVolumeCreate(TestVolume):
source_volid=None,
consistencygroup_id=None,
scheduler_hints=None,
+ backup_id=None,
)
self.assertEqual(self.columns, columns)
@@ -253,6 +255,7 @@ class TestVolumeCreate(TestVolume):
source_volid=None,
consistencygroup_id=None,
scheduler_hints=None,
+ backup_id=None,
)
self.assertEqual(self.columns, columns)
@@ -291,13 +294,14 @@ class TestVolumeCreate(TestVolume):
source_volid=None,
consistencygroup_id=None,
scheduler_hints=None,
+ backup_id=None,
)
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.datalist, data)
def test_volume_create_with_snapshot(self):
- snapshot = volume_fakes.FakeSnapshot.create_one_snapshot()
+ snapshot = volume_fakes.create_one_snapshot()
self.new_volume.snapshot_id = snapshot.id
arglist = [
'--snapshot', self.new_volume.snapshot_id,
@@ -328,6 +332,104 @@ class TestVolumeCreate(TestVolume):
source_volid=None,
consistencygroup_id=None,
scheduler_hints=None,
+ backup_id=None,
+ )
+
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.datalist, data)
+
+ def test_volume_create_with_backup(self):
+ backup = volume_fakes.create_one_backup()
+ self.new_volume.backup_id = backup.id
+ arglist = [
+ '--backup', self.new_volume.backup_id,
+ self.new_volume.name,
+ ]
+ verifylist = [
+ ('backup', self.new_volume.backup_id),
+ ('name', self.new_volume.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.backups_mock.get.return_value = backup
+
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.47')
+
+ # In base command class ShowOne in cliff, abstract method take_action()
+ # returns a two-part tuple with a tuple of column names and a tuple of
+ # data to be shown.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.create.assert_called_once_with(
+ size=backup.size,
+ snapshot_id=None,
+ name=self.new_volume.name,
+ description=None,
+ volume_type=None,
+ availability_zone=None,
+ metadata=None,
+ imageRef=None,
+ source_volid=None,
+ consistencygroup_id=None,
+ scheduler_hints=None,
+ backup_id=backup.id,
+ )
+
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.datalist, data)
+
+ def test_volume_create_with_backup_pre_347(self):
+ backup = volume_fakes.create_one_backup()
+ self.new_volume.backup_id = backup.id
+ arglist = [
+ '--backup', self.new_volume.backup_id,
+ self.new_volume.name,
+ ]
+ verifylist = [
+ ('backup', self.new_volume.backup_id),
+ ('name', self.new_volume.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.backups_mock.get.return_value = backup
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn("--os-volume-api-version 3.47 or greater", str(exc))
+
+ def test_volume_create_with_source_volume(self):
+ source_vol = "source_vol"
+ arglist = [
+ '--source', self.new_volume.id,
+ source_vol,
+ ]
+ verifylist = [
+ ('source', self.new_volume.id),
+ ('name', source_vol),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.volumes_mock.get.return_value = self.new_volume
+
+ # In base command class ShowOne in cliff, abstract method take_action()
+ # returns a two-part tuple with a tuple of column names and a tuple of
+ # data to be shown.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.create.assert_called_once_with(
+ size=self.new_volume.size,
+ snapshot_id=None,
+ name=source_vol,
+ description=None,
+ volume_type=None,
+ availability_zone=None,
+ metadata=None,
+ imageRef=None,
+ source_volid=self.new_volume.id,
+ consistencygroup_id=None,
+ scheduler_hints=None,
+ backup_id=None,
)
self.assertEqual(self.columns, columns)
@@ -366,6 +468,7 @@ class TestVolumeCreate(TestVolume):
source_volid=None,
consistencygroup_id=None,
scheduler_hints=None,
+ backup_id=None,
)
self.assertEqual(self.columns, columns)
@@ -408,6 +511,7 @@ class TestVolumeCreate(TestVolume):
source_volid=None,
consistencygroup_id=None,
scheduler_hints=None,
+ backup_id=None,
)
self.assertEqual(self.columns, columns)
@@ -459,6 +563,7 @@ class TestVolumeCreate(TestVolume):
source_volid=None,
consistencygroup_id=None,
scheduler_hints=None,
+ backup_id=None,
)
self.assertEqual(2, mock_error.call_count)
@@ -504,7 +609,7 @@ class TestVolumeCreate(TestVolume):
class TestVolumeDelete(TestVolume):
def setUp(self):
- super(TestVolumeDelete, self).setUp()
+ super().setUp()
self.volumes_mock.delete.return_value = None
@@ -632,9 +737,9 @@ class TestVolumeList(TestVolume):
]
def setUp(self):
- super(TestVolumeList, self).setUp()
+ super().setUp()
- self.mock_volume = volume_fakes.FakeVolume.create_one_volume()
+ self.mock_volume = volume_fakes.create_one_volume()
self.volumes_mock.list.return_value = [self.mock_volume]
self.users_mock.get.return_value = self.user
@@ -1107,10 +1212,10 @@ class TestVolumeList(TestVolume):
class TestVolumeMigrate(TestVolume):
- _volume = volume_fakes.FakeVolume.create_one_volume()
+ _volume = volume_fakes.create_one_volume()
def setUp(self):
- super(TestVolumeMigrate, self).setUp()
+ super().setUp()
self.volumes_mock.get.return_value = self._volume
self.volumes_mock.migrate_volume.return_value = None
@@ -1173,12 +1278,12 @@ class TestVolumeMigrate(TestVolume):
class TestVolumeSet(TestVolume):
- volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
+ volume_type = volume_fakes.create_one_volume_type()
def setUp(self):
- super(TestVolumeSet, self).setUp()
+ super().setUp()
- self.new_volume = volume_fakes.FakeVolume.create_one_volume()
+ self.new_volume = volume_fakes.create_one_volume()
self.volumes_mock.get.return_value = self.new_volume
self.types_mock.get.return_value = self.volume_type
@@ -1427,9 +1532,9 @@ class TestVolumeSet(TestVolume):
class TestVolumeShow(TestVolume):
def setUp(self):
- super(TestVolumeShow, self).setUp()
+ super().setUp()
- self._volume = volume_fakes.FakeVolume.create_one_volume()
+ self._volume = volume_fakes.create_one_volume()
self.volumes_mock.get.return_value = self._volume
# Get the command object to test
self.cmd = volume.ShowVolume(self.app, None)
@@ -1447,20 +1552,21 @@ class TestVolumeShow(TestVolume):
self.volumes_mock.get.assert_called_with(self._volume.id)
self.assertEqual(
- volume_fakes.FakeVolume.get_volume_columns(self._volume),
- columns)
-
+ volume_fakes.get_volume_columns(self._volume),
+ columns,
+ )
self.assertCountEqual(
- volume_fakes.FakeVolume.get_volume_data(self._volume),
- data)
+ volume_fakes.get_volume_data(self._volume),
+ data,
+ )
class TestVolumeUnset(TestVolume):
def setUp(self):
- super(TestVolumeUnset, self).setUp()
+ super().setUp()
- self.new_volume = volume_fakes.FakeVolume.create_one_volume()
+ self.new_volume = volume_fakes.create_one_volume()
self.volumes_mock.get.return_value = self.new_volume
# Get the command object to set property
@@ -1538,7 +1644,7 @@ class TestVolumeUnset(TestVolume):
class TestColumns(TestVolume):
def test_attachments_column_without_server_cache(self):
- _volume = volume_fakes.FakeVolume.create_one_volume()
+ _volume = volume_fakes.create_one_volume()
server_id = _volume.attachments[0]['server_id']
device = _volume.attachments[0]['device']
@@ -1548,7 +1654,7 @@ class TestColumns(TestVolume):
self.assertEqual(_volume.attachments, col.machine_readable())
def test_attachments_column_with_server_cache(self):
- _volume = volume_fakes.FakeVolume.create_one_volume()
+ _volume = volume_fakes.create_one_volume()
server_id = _volume.attachments[0]['server_id']
device = _volume.attachments[0]['device']
diff --git a/openstackclient/tests/unit/volume/v2/test_volume_backend.py b/openstackclient/tests/unit/volume/v2/test_volume_backend.py
index d9ac2c96..6c64f645 100644
--- a/openstackclient/tests/unit/volume/v2/test_volume_backend.py
+++ b/openstackclient/tests/unit/volume/v2/test_volume_backend.py
@@ -20,10 +20,10 @@ class TestShowVolumeCapability(volume_fakes.TestVolume):
"""Test backend capability functionality."""
# The capability to be listed
- capability = volume_fakes.FakeCapability.create_one_capability()
+ capability = volume_fakes.create_one_capability()
def setUp(self):
- super(TestShowVolumeCapability, self).setUp()
+ super().setUp()
# Get a shortcut to the capability Mock
self.capability_mock = self.app.client_manager.volume.capabilities
@@ -77,10 +77,10 @@ class TestListVolumePool(volume_fakes.TestVolume):
"""Tests for volume backend pool listing."""
# The pool to be listed
- pools = volume_fakes.FakePool.create_one_pool()
+ pools = volume_fakes.create_one_pool()
def setUp(self):
- super(TestListVolumePool, self).setUp()
+ super().setUp()
self.pool_mock = self.app.client_manager.volume.pools
self.pool_mock.list.return_value = [self.pools]
diff --git a/openstackclient/tests/unit/volume/v2/test_volume_backup.py b/openstackclient/tests/unit/volume/v2/test_volume_backup.py
index 4b9212d0..7d00b8bf 100644
--- a/openstackclient/tests/unit/volume/v2/test_volume_backup.py
+++ b/openstackclient/tests/unit/volume/v2/test_volume_backup.py
@@ -26,7 +26,7 @@ from openstackclient.volume.v2 import volume_backup
class TestBackup(volume_fakes.TestVolume):
def setUp(self):
- super(TestBackup, self).setUp()
+ super().setUp()
self.backups_mock = self.app.client_manager.volume.backups
self.backups_mock.reset_mock()
@@ -40,9 +40,9 @@ class TestBackup(volume_fakes.TestVolume):
class TestBackupCreate(TestBackup):
- volume = volume_fakes.FakeVolume.create_one_volume()
- snapshot = volume_fakes.FakeSnapshot.create_one_snapshot()
- new_backup = volume_fakes.FakeBackup.create_one_backup(
+ volume = volume_fakes.create_one_volume()
+ snapshot = volume_fakes.create_one_snapshot()
+ new_backup = volume_fakes.create_one_backup(
attrs={'volume_id': volume.id, 'snapshot_id': snapshot.id})
columns = (
@@ -71,7 +71,7 @@ class TestBackupCreate(TestBackup):
)
def setUp(self):
- super(TestBackupCreate, self).setUp()
+ super().setUp()
self.volumes_mock.get.return_value = self.volume
self.snapshots_mock.get.return_value = self.snapshot
@@ -242,13 +242,13 @@ class TestBackupCreate(TestBackup):
class TestBackupDelete(TestBackup):
- backups = volume_fakes.FakeBackup.create_backups(count=2)
+ backups = volume_fakes.create_backups(count=2)
def setUp(self):
- super(TestBackupDelete, self).setUp()
+ super().setUp()
self.backups_mock.get = (
- volume_fakes.FakeBackup.get_backups(self.backups))
+ volume_fakes.get_backups(self.backups))
self.backups_mock.delete.return_value = None
# Get the command object to mock
@@ -334,8 +334,8 @@ class TestBackupDelete(TestBackup):
class TestBackupList(TestBackup):
- volume = volume_fakes.FakeVolume.create_one_volume()
- backups = volume_fakes.FakeBackup.create_backups(
+ volume = volume_fakes.create_one_volume()
+ backups = volume_fakes.create_backups(
attrs={'volume_id': volume.name}, count=3)
columns = (
@@ -374,7 +374,7 @@ class TestBackupList(TestBackup):
))
def setUp(self):
- super(TestBackupList, self).setUp()
+ super().setUp()
self.volumes_mock.list.return_value = [self.volume]
self.backups_mock.list.return_value = self.backups
@@ -456,46 +456,106 @@ class TestBackupList(TestBackup):
class TestBackupRestore(TestBackup):
- volume = volume_fakes.FakeVolume.create_one_volume()
- backup = volume_fakes.FakeBackup.create_one_backup(
- attrs={'volume_id': volume.id})
+ volume = volume_fakes.create_one_volume()
+ backup = volume_fakes.create_one_backup(
+ attrs={'volume_id': volume.id},
+ )
def setUp(self):
- super(TestBackupRestore, self).setUp()
+ super().setUp()
self.backups_mock.get.return_value = self.backup
self.volumes_mock.get.return_value = self.volume
self.restores_mock.restore.return_value = (
- volume_fakes.FakeVolume.create_one_volume(
- {'id': self.volume['id']}))
+ volume_fakes.create_one_volume(
+ {'id': self.volume['id']},
+ )
+ )
# Get the command object to mock
self.cmd = volume_backup.RestoreVolumeBackup(self.app, None)
def test_backup_restore(self):
+ self.volumes_mock.get.side_effect = exceptions.CommandError()
+ self.volumes_mock.find.side_effect = exceptions.CommandError()
+ arglist = [
+ self.backup.id
+ ]
+ verifylist = [
+ ("backup", self.backup.id),
+ ("volume", None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.restores_mock.restore.assert_called_with(
+ self.backup.id, None, None,
+ )
+ self.assertIsNotNone(result)
+
+ def test_backup_restore_with_volume(self):
+ self.volumes_mock.get.side_effect = exceptions.CommandError()
+ self.volumes_mock.find.side_effect = exceptions.CommandError()
+ arglist = [
+ self.backup.id,
+ self.backup.volume_id,
+ ]
+ verifylist = [
+ ("backup", self.backup.id),
+ ("volume", self.backup.volume_id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.restores_mock.restore.assert_called_with(
+ self.backup.id, None, self.backup.volume_id,
+ )
+ self.assertIsNotNone(result)
+
+ def test_backup_restore_with_volume_force(self):
arglist = [
+ "--force",
self.backup.id,
- self.backup.volume_id
+ self.volume.name,
]
verifylist = [
+ ("force", True),
("backup", self.backup.id),
- ("volume", self.backup.volume_id)
+ ("volume", self.volume.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
- self.restores_mock.restore.assert_called_with(self.backup.id,
- self.backup.volume_id)
+ self.restores_mock.restore.assert_called_with(
+ self.backup.id, self.volume.id, None,
+ )
self.assertIsNotNone(result)
+ def test_backup_restore_with_volume_existing(self):
+ arglist = [
+ self.backup.id,
+ self.volume.name,
+ ]
+ verifylist = [
+ ("backup", self.backup.id),
+ ("volume", self.volume.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args,
+ )
+
class TestBackupSet(TestBackup):
- backup = volume_fakes.FakeBackup.create_one_backup(
+ backup = volume_fakes.create_one_backup(
attrs={'metadata': {'wow': 'cool'}},
)
def setUp(self):
- super(TestBackupSet, self).setUp()
+ super().setUp()
self.backups_mock.get.return_value = self.backup
@@ -724,7 +784,7 @@ class TestBackupSet(TestBackup):
class TestBackupUnset(TestBackup):
- backup = volume_fakes.FakeBackup.create_one_backup(
+ backup = volume_fakes.create_one_backup(
attrs={'metadata': {'foo': 'bar'}},
)
@@ -785,7 +845,7 @@ class TestBackupUnset(TestBackup):
class TestBackupShow(TestBackup):
- backup = volume_fakes.FakeBackup.create_one_backup()
+ backup = volume_fakes.create_one_backup()
columns = (
'availability_zone',
@@ -813,7 +873,7 @@ class TestBackupShow(TestBackup):
)
def setUp(self):
- super(TestBackupShow, self).setUp()
+ super().setUp()
self.backups_mock.get.return_value = self.backup
# Get the command object to test
diff --git a/openstackclient/tests/unit/volume/v2/test_volume_host.py b/openstackclient/tests/unit/volume/v2/test_volume_host.py
index b024329a..730085a3 100644
--- a/openstackclient/tests/unit/volume/v2/test_volume_host.py
+++ b/openstackclient/tests/unit/volume/v2/test_volume_host.py
@@ -12,14 +12,14 @@
# under the License.
#
-from openstackclient.tests.unit.volume.v2 import fakes as host_fakes
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
from openstackclient.volume.v2 import volume_host
-class TestVolumeHost(host_fakes.TestVolume):
+class TestVolumeHost(volume_fakes.TestVolume):
def setUp(self):
- super(TestVolumeHost, self).setUp()
+ super().setUp()
self.host_mock = self.app.client_manager.volume.services
self.host_mock.reset_mock()
@@ -27,10 +27,10 @@ class TestVolumeHost(host_fakes.TestVolume):
class TestVolumeHostSet(TestVolumeHost):
- service = host_fakes.FakeService.create_one_service()
+ service = volume_fakes.create_one_service()
def setUp(self):
- super(TestVolumeHostSet, self).setUp()
+ super().setUp()
self.host_mock.freeze_host.return_value = None
self.host_mock.thaw_host.return_value = None
@@ -89,10 +89,10 @@ class TestVolumeHostSet(TestVolumeHost):
class TestVolumeHostFailover(TestVolumeHost):
- service = host_fakes.FakeService.create_one_service()
+ service = volume_fakes.create_one_service()
def setUp(self):
- super(TestVolumeHostFailover, self).setUp()
+ super().setUp()
self.host_mock.failover_host.return_value = None
diff --git a/openstackclient/tests/unit/volume/v2/test_volume_snapshot.py b/openstackclient/tests/unit/volume/v2/test_volume_snapshot.py
index 33a5a98a..6cffcaac 100644
--- a/openstackclient/tests/unit/volume/v2/test_volume_snapshot.py
+++ b/openstackclient/tests/unit/volume/v2/test_volume_snapshot.py
@@ -54,8 +54,8 @@ class TestVolumeSnapshotCreate(TestVolumeSnapshot):
def setUp(self):
super().setUp()
- self.volume = volume_fakes.FakeVolume.create_one_volume()
- self.new_snapshot = volume_fakes.FakeSnapshot.create_one_snapshot(
+ self.volume = volume_fakes.create_one_volume()
+ self.new_snapshot = volume_fakes.create_one_snapshot(
attrs={'volume_id': self.volume.id})
self.data = (
@@ -179,13 +179,13 @@ class TestVolumeSnapshotCreate(TestVolumeSnapshot):
class TestVolumeSnapshotDelete(TestVolumeSnapshot):
- snapshots = volume_fakes.FakeSnapshot.create_snapshots(count=2)
+ snapshots = volume_fakes.create_snapshots(count=2)
def setUp(self):
super().setUp()
self.snapshots_mock.get = (
- volume_fakes.FakeSnapshot.get_snapshots(self.snapshots))
+ volume_fakes.get_snapshots(self.snapshots))
self.snapshots_mock.delete.return_value = None
# Get the command object to mock
@@ -273,9 +273,9 @@ class TestVolumeSnapshotDelete(TestVolumeSnapshot):
class TestVolumeSnapshotList(TestVolumeSnapshot):
- volume = volume_fakes.FakeVolume.create_one_volume()
+ volume = volume_fakes.create_one_volume()
project = project_fakes.FakeProject.create_one_project()
- snapshots = volume_fakes.FakeSnapshot.create_snapshots(
+ snapshots = volume_fakes.create_snapshots(
attrs={'volume_id': volume.name}, count=3)
columns = [
@@ -495,7 +495,7 @@ class TestVolumeSnapshotList(TestVolumeSnapshot):
class TestVolumeSnapshotSet(TestVolumeSnapshot):
- snapshot = volume_fakes.FakeSnapshot.create_one_snapshot()
+ snapshot = volume_fakes.create_one_snapshot()
def setUp(self):
super().setUp()
@@ -677,7 +677,7 @@ class TestVolumeSnapshotShow(TestVolumeSnapshot):
def setUp(self):
super().setUp()
- self.snapshot = volume_fakes.FakeSnapshot.create_one_snapshot()
+ self.snapshot = volume_fakes.create_one_snapshot()
self.data = (
self.snapshot.created_at,
@@ -712,7 +712,7 @@ class TestVolumeSnapshotShow(TestVolumeSnapshot):
class TestVolumeSnapshotUnset(TestVolumeSnapshot):
- snapshot = volume_fakes.FakeSnapshot.create_one_snapshot()
+ snapshot = volume_fakes.create_one_snapshot()
def setUp(self):
super().setUp()
diff --git a/openstackclient/tests/unit/volume/v2/test_volume_transfer_request.py b/openstackclient/tests/unit/volume/v2/test_volume_transfer_request.py
index 1a1f220f..c8c6fac9 100644
--- a/openstackclient/tests/unit/volume/v2/test_volume_transfer_request.py
+++ b/openstackclient/tests/unit/volume/v2/test_volume_transfer_request.py
@@ -20,14 +20,14 @@ from osc_lib import exceptions
from osc_lib import utils
from openstackclient.tests.unit import utils as test_utils
-from openstackclient.tests.unit.volume.v2 import fakes as transfer_fakes
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
from openstackclient.volume.v2 import volume_transfer_request
-class TestTransfer(transfer_fakes.TestVolume):
+class TestTransfer(volume_fakes.TestVolume):
def setUp(self):
- super(TestTransfer, self).setUp()
+ super().setUp()
# Get a shortcut to the TransferManager Mock
self.transfer_mock = self.app.client_manager.volume.transfers
@@ -47,10 +47,9 @@ class TestTransferAccept(TestTransfer):
)
def setUp(self):
- super(TestTransferAccept, self).setUp()
+ super().setUp()
- self.volume_transfer = (
- transfer_fakes.FakeTransfer.create_one_transfer())
+ self.volume_transfer = volume_fakes.create_one_transfer()
self.data = (
self.volume_transfer.id,
self.volume_transfer.name,
@@ -106,7 +105,7 @@ class TestTransferAccept(TestTransfer):
class TestTransferCreate(TestTransfer):
- volume = transfer_fakes.FakeVolume.create_one_volume()
+ volume = volume_fakes.create_one_volume()
columns = (
'auth_key',
@@ -117,12 +116,14 @@ class TestTransferCreate(TestTransfer):
)
def setUp(self):
- super(TestTransferCreate, self).setUp()
-
- self.volume_transfer = transfer_fakes.FakeTransfer.create_one_transfer(
- attrs={'volume_id': self.volume.id,
- 'auth_key': 'key',
- 'created_at': 'time'}
+ super().setUp()
+
+ self.volume_transfer = volume_fakes.create_one_transfer(
+ attrs={
+ 'volume_id': self.volume.id,
+ 'auth_key': 'key',
+ 'created_at': 'time',
+ },
)
self.data = (
self.volume_transfer.auth_key,
@@ -221,13 +222,14 @@ class TestTransferCreate(TestTransfer):
class TestTransferDelete(TestTransfer):
- volume_transfers = transfer_fakes.FakeTransfer.create_transfers(count=2)
+ volume_transfers = volume_fakes.create_transfers(count=2)
def setUp(self):
- super(TestTransferDelete, self).setUp()
+ super().setUp()
- self.transfer_mock.get = (
- transfer_fakes.FakeTransfer.get_transfers(self.volume_transfers))
+ self.transfer_mock.get = volume_fakes.get_transfers(
+ self.volume_transfers,
+ )
self.transfer_mock.delete.return_value = None
# Get the command object to mock
@@ -300,10 +302,10 @@ class TestTransferDelete(TestTransfer):
class TestTransferList(TestTransfer):
# The Transfers to be listed
- volume_transfers = transfer_fakes.FakeTransfer.create_one_transfer()
+ volume_transfers = volume_fakes.create_one_transfer()
def setUp(self):
- super(TestTransferList, self).setUp()
+ super().setUp()
self.transfer_mock.list.return_value = [self.volume_transfers]
@@ -394,11 +396,10 @@ class TestTransferShow(TestTransfer):
)
def setUp(self):
- super(TestTransferShow, self).setUp()
+ super().setUp()
- self.volume_transfer = (
- transfer_fakes.FakeTransfer.create_one_transfer(
- attrs={'created_at': 'time'})
+ self.volume_transfer = volume_fakes.create_one_transfer(
+ attrs={'created_at': 'time'},
)
self.data = (
self.volume_transfer.created_at,
diff --git a/openstackclient/tests/unit/volume/v3/fakes.py b/openstackclient/tests/unit/volume/v3/fakes.py
index 9040b2be..62383580 100644
--- a/openstackclient/tests/unit/volume/v3/fakes.py
+++ b/openstackclient/tests/unit/volume/v3/fakes.py
@@ -23,8 +23,7 @@ from openstackclient.tests.unit import utils
from openstackclient.tests.unit.volume.v2 import fakes as volume_v2_fakes
-class FakeVolumeClient(object):
-
+class FakeVolumeClient:
def __init__(self, **kwargs):
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
@@ -32,6 +31,8 @@ class FakeVolumeClient(object):
self.attachments = mock.Mock()
self.attachments.resource_class = fakes.FakeResource(None, {})
+ self.clusters = mock.Mock()
+ self.clusters.resource_class = fakes.FakeResource(None, {})
self.groups = mock.Mock()
self.groups.resource_class = fakes.FakeResource(None, {})
self.group_snapshots = mock.Mock()
@@ -40,24 +41,27 @@ class FakeVolumeClient(object):
self.group_types.resource_class = fakes.FakeResource(None, {})
self.messages = mock.Mock()
self.messages.resource_class = fakes.FakeResource(None, {})
+ self.resource_filters = mock.Mock()
+ self.resource_filters.resource_class = fakes.FakeResource(None, {})
self.volumes = mock.Mock()
self.volumes.resource_class = fakes.FakeResource(None, {})
self.volume_types = mock.Mock()
self.volume_types.resource_class = fakes.FakeResource(None, {})
+ self.services = mock.Mock()
+ self.services.resource_class = fakes.FakeResource(None, {})
+ self.workers = mock.Mock()
+ self.workers.resource_class = fakes.FakeResource(None, {})
class TestVolume(utils.TestCommand):
-
def setUp(self):
super().setUp()
self.app.client_manager.volume = FakeVolumeClient(
- endpoint=fakes.AUTH_URL,
- token=fakes.AUTH_TOKEN
+ endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN
)
self.app.client_manager.identity = identity_fakes.FakeIdentityv3Client(
- endpoint=fakes.AUTH_URL,
- token=fakes.AUTH_TOKEN
+ endpoint=fakes.AUTH_URL, token=fakes.AUTH_TOKEN
)
self.app.client_manager.compute = compute_fakes.FakeComputev2Client(
endpoint=fakes.AUTH_URL,
@@ -66,255 +70,309 @@ class TestVolume(utils.TestCommand):
# TODO(stephenfin): Check if the responses are actually the same
-FakeVolume = volume_v2_fakes.FakeVolume
-FakeVolumeType = volume_v2_fakes.FakeVolumeType
+create_one_volume = volume_v2_fakes.create_one_volume
+create_one_volume_type = volume_v2_fakes.create_one_volume_type
+
+
+def create_one_cluster(attrs=None):
+ """Create a fake service cluster.
+
+ :param attrs: A dictionary with all attributes of service cluster
+ :return: A FakeResource object with id, name, status, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attribute
+ cluster_info = {
+ 'name': f'cluster-{uuid.uuid4().hex}',
+ 'binary': f'binary-{uuid.uuid4().hex}',
+ 'state': random.choice(['up', 'down']),
+ 'status': random.choice(['enabled', 'disabled']),
+ 'disabled_reason': None,
+ 'num_hosts': random.randint(1, 64),
+ 'num_down_hosts': random.randint(1, 64),
+ 'last_heartbeat': '2015-09-16T09:28:52.000000',
+ 'created_at': '2015-09-16T09:28:52.000000',
+ 'updated_at': '2015-09-16T09:28:52.000000',
+ 'replication_status': None,
+ 'frozen': False,
+ 'active_backend_id': None,
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ cluster_info.update(attrs)
+
+ return fakes.FakeResource(None, cluster_info, loaded=True)
+
+
+def create_clusters(attrs=None, count=2):
+ """Create multiple fake service clusters.
+
+ :param attrs: A dictionary with all attributes of service cluster
+ :param count: The number of service clusters to be faked
+ :return: A list of FakeResource objects
+ """
+ clusters = []
+ for n in range(0, count):
+ clusters.append(create_one_cluster(attrs))
+
+ return clusters
+
+def create_one_resource_filter(attrs=None):
+ """Create a fake resource filter.
-class FakeVolumeGroup:
- """Fake one or more volume groups."""
+ :param attrs: A dictionary with all attributes of resource filter
+ :return: A FakeResource object with id, name, status, etc.
+ """
+ attrs = attrs or {}
- @staticmethod
- def create_one_volume_group(attrs=None):
- """Create a fake group.
+ # Set default attribute
- :param attrs: A dictionary with all attributes of group
- :return: A FakeResource object with id, name, status, etc.
- """
- attrs = attrs or {}
+ resource_filter_info = {
+ 'filters': [
+ 'name',
+ 'status',
+ 'image_metadata',
+ 'bootable',
+ 'migration_status',
+ ],
+ 'resource': 'volume',
+ }
- group_type = attrs.pop('group_type', None) or uuid.uuid4().hex
- volume_types = attrs.pop('volume_types', None) or [uuid.uuid4().hex]
+ # Overwrite default attributes if there are some attributes set
+ resource_filter_info.update(attrs)
- # Set default attribute
- group_info = {
- 'id': uuid.uuid4().hex,
- 'status': random.choice([
+ return fakes.FakeResource(None, resource_filter_info, loaded=True)
+
+
+def create_resource_filters(attrs=None, count=2):
+ """Create multiple fake resource filters.
+
+ :param attrs: A dictionary with all attributes of resource filter
+ :param count: The number of resource filters to be faked
+ :return: A list of FakeResource objects
+ """
+ resource_filters = []
+ for n in range(0, count):
+ resource_filters.append(create_one_resource_filter(attrs))
+
+ return resource_filters
+
+
+def create_one_volume_group(attrs=None):
+ """Create a fake group.
+
+ :param attrs: A dictionary with all attributes of group
+ :return: A FakeResource object with id, name, status, etc.
+ """
+ attrs = attrs or {}
+
+ group_type = attrs.pop('group_type', None) or uuid.uuid4().hex
+ volume_types = attrs.pop('volume_types', None) or [uuid.uuid4().hex]
+
+ # Set default attribute
+ group_info = {
+ 'id': uuid.uuid4().hex,
+ 'status': random.choice(
+ [
'available',
- ]),
- 'availability_zone': f'az-{uuid.uuid4().hex}',
- 'created_at': '2015-09-16T09:28:52.000000',
- 'name': 'first_group',
- 'description': f'description-{uuid.uuid4().hex}',
- 'group_type': group_type,
- 'volume_types': volume_types,
- 'volumes': [f'volume-{uuid.uuid4().hex}'],
- 'group_snapshot_id': None,
- 'source_group_id': None,
- 'project_id': f'project-{uuid.uuid4().hex}',
- }
-
- # Overwrite default attributes if there are some attributes set
- group_info.update(attrs)
-
- group = fakes.FakeResource(
- None,
- group_info,
- loaded=True)
- return group
-
- @staticmethod
- def create_volume_groups(attrs=None, count=2):
- """Create multiple fake groups.
-
- :param attrs: A dictionary with all attributes of group
- :param count: The number of groups to be faked
- :return: A list of FakeResource objects
- """
- groups = []
- for n in range(0, count):
- groups.append(FakeVolumeGroup.create_one_volume_group(attrs))
-
- return groups
-
-
-class FakeVolumeGroupSnapshot:
- """Fake one or more volume group snapshots."""
-
- @staticmethod
- def create_one_volume_group_snapshot(attrs=None, methods=None):
- """Create a fake group snapshot.
-
- :param attrs: A dictionary with all attributes
- :param methods: A dictionary with all methods
- :return: A FakeResource object with id, name, description, etc.
- """
- attrs = attrs or {}
-
- # Set default attribute
- group_snapshot_info = {
- 'id': uuid.uuid4().hex,
- 'name': f'group-snapshot-{uuid.uuid4().hex}',
- 'description': f'description-{uuid.uuid4().hex}',
- 'status': random.choice(['available']),
- 'group_id': uuid.uuid4().hex,
- 'group_type_id': uuid.uuid4().hex,
- 'project_id': uuid.uuid4().hex,
- }
-
- # Overwrite default attributes if there are some attributes set
- group_snapshot_info.update(attrs)
-
- group_snapshot = fakes.FakeResource(
- None,
- group_snapshot_info,
- methods=methods,
- loaded=True)
- return group_snapshot
-
- @staticmethod
- def create_volume_group_snapshots(attrs=None, count=2):
- """Create multiple fake group snapshots.
-
- :param attrs: A dictionary with all attributes of group snapshot
- :param count: The number of group snapshots to be faked
- :return: A list of FakeResource objects
- """
- group_snapshots = []
- for n in range(0, count):
- group_snapshots.append(
- FakeVolumeGroupSnapshot.create_one_volume_group_snapshot(attrs)
- )
-
- return group_snapshots
-
-
-class FakeVolumeGroupType:
- """Fake one or more volume group types."""
-
- @staticmethod
- def create_one_volume_group_type(attrs=None, methods=None):
- """Create a fake group type.
-
- :param attrs: A dictionary with all attributes of group type
- :param methods: A dictionary with all methods
- :return: A FakeResource object with id, name, description, etc.
- """
- attrs = attrs or {}
-
- # Set default attribute
- group_type_info = {
- 'id': uuid.uuid4().hex,
- 'name': f'group-type-{uuid.uuid4().hex}',
- 'description': f'description-{uuid.uuid4().hex}',
- 'is_public': random.choice([True, False]),
- 'group_specs': {},
- }
-
- # Overwrite default attributes if there are some attributes set
- group_type_info.update(attrs)
-
- group_type = fakes.FakeResource(
- None,
- group_type_info,
- methods=methods,
- loaded=True)
- return group_type
-
- @staticmethod
- def create_volume_group_types(attrs=None, count=2):
- """Create multiple fake group types.
-
- :param attrs: A dictionary with all attributes of group type
- :param count: The number of group types to be faked
- :return: A list of FakeResource objects
- """
- group_types = []
- for n in range(0, count):
- group_types.append(
- FakeVolumeGroupType.create_one_volume_group_type(attrs)
- )
-
- return group_types
-
-
-class FakeVolumeMessage:
- """Fake one or more volume messages."""
-
- @staticmethod
- def create_one_volume_message(attrs=None):
- """Create a fake message.
-
- :param attrs: A dictionary with all attributes of message
- :return: A FakeResource object with id, name, status, etc.
- """
- attrs = attrs or {}
-
- # Set default attribute
- message_info = {
- 'created_at': '2016-02-11T11:17:37.000000',
- 'event_id': f'VOLUME_{random.randint(1, 999999):06d}',
- 'guaranteed_until': '2016-02-11T11:17:37.000000',
- 'id': uuid.uuid4().hex,
- 'message_level': 'ERROR',
- 'request_id': f'req-{uuid.uuid4().hex}',
- 'resource_type': 'VOLUME',
- 'resource_uuid': uuid.uuid4().hex,
- 'user_message': f'message-{uuid.uuid4().hex}',
- }
-
- # Overwrite default attributes if there are some attributes set
- message_info.update(attrs)
-
- message = fakes.FakeResource(
- None,
- message_info,
- loaded=True)
- return message
-
- @staticmethod
- def create_volume_messages(attrs=None, count=2):
- """Create multiple fake messages.
-
- :param attrs: A dictionary with all attributes of message
- :param count: The number of messages to be faked
- :return: A list of FakeResource objects
- """
- messages = []
- for n in range(0, count):
- messages.append(FakeVolumeMessage.create_one_volume_message(attrs))
-
- return messages
-
- @staticmethod
- def get_volume_messages(messages=None, count=2):
- """Get an iterable MagicMock object with a list of faked messages.
-
- If messages list is provided, then initialize the Mock object with the
- list. Otherwise create one.
-
- :param messages: A list of FakeResource objects faking messages
- :param count: The number of messages to be faked
- :return An iterable Mock object with side_effect set to a list of faked
- messages
- """
- if messages is None:
- messages = FakeVolumeMessage.create_messages(count)
-
- return mock.Mock(side_effect=messages)
-
-
-class FakeVolumeAttachment:
- """Fake one or more volume attachments."""
-
- @staticmethod
- def create_one_volume_attachment(attrs=None):
- """Create a fake volume attachment.
-
- :param attrs: A dictionary with all attributes of volume attachment
- :return: A FakeResource object with id, status, etc.
- """
- attrs = attrs or {}
-
- attachment_id = uuid.uuid4().hex
- volume_id = attrs.pop('volume_id', None) or uuid.uuid4().hex
- server_id = attrs.pop('instance', None) or uuid.uuid4().hex
-
- # Set default attribute
- attachment_info = {
- 'id': attachment_id,
- 'volume_id': volume_id,
- 'instance': server_id,
- 'status': random.choice([
+ ]
+ ),
+ 'availability_zone': f'az-{uuid.uuid4().hex}',
+ 'created_at': '2015-09-16T09:28:52.000000',
+ 'name': 'first_group',
+ 'description': f'description-{uuid.uuid4().hex}',
+ 'group_type': group_type,
+ 'volume_types': volume_types,
+ 'volumes': [f'volume-{uuid.uuid4().hex}'],
+ 'group_snapshot_id': None,
+ 'source_group_id': None,
+ 'project_id': f'project-{uuid.uuid4().hex}',
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ group_info.update(attrs)
+
+ group = fakes.FakeResource(None, group_info, loaded=True)
+ return group
+
+
+def create_volume_groups(attrs=None, count=2):
+ """Create multiple fake groups.
+
+ :param attrs: A dictionary with all attributes of group
+ :param count: The number of groups to be faked
+ :return: A list of FakeResource objects
+ """
+ groups = []
+ for n in range(0, count):
+ groups.append(create_one_volume_group(attrs))
+
+ return groups
+
+
+def create_one_volume_group_snapshot(attrs=None, methods=None):
+ """Create a fake group snapshot.
+
+ :param attrs: A dictionary with all attributes
+ :param methods: A dictionary with all methods
+ :return: A FakeResource object with id, name, description, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attribute
+ group_snapshot_info = {
+ 'id': uuid.uuid4().hex,
+ 'name': f'group-snapshot-{uuid.uuid4().hex}',
+ 'description': f'description-{uuid.uuid4().hex}',
+ 'status': random.choice(['available']),
+ 'group_id': uuid.uuid4().hex,
+ 'group_type_id': uuid.uuid4().hex,
+ 'project_id': uuid.uuid4().hex,
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ group_snapshot_info.update(attrs)
+
+ group_snapshot = fakes.FakeResource(
+ None, group_snapshot_info, methods=methods, loaded=True
+ )
+ return group_snapshot
+
+
+def create_volume_group_snapshots(attrs=None, count=2):
+ """Create multiple fake group snapshots.
+
+ :param attrs: A dictionary with all attributes of group snapshot
+ :param count: The number of group snapshots to be faked
+ :return: A list of FakeResource objects
+ """
+ group_snapshots = []
+ for n in range(0, count):
+ group_snapshots.append(create_one_volume_group_snapshot(attrs))
+
+ return group_snapshots
+
+
+def create_one_volume_group_type(attrs=None, methods=None):
+ """Create a fake group type.
+
+ :param attrs: A dictionary with all attributes of group type
+ :param methods: A dictionary with all methods
+ :return: A FakeResource object with id, name, description, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attribute
+ group_type_info = {
+ 'id': uuid.uuid4().hex,
+ 'name': f'group-type-{uuid.uuid4().hex}',
+ 'description': f'description-{uuid.uuid4().hex}',
+ 'is_public': random.choice([True, False]),
+ 'group_specs': {},
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ group_type_info.update(attrs)
+
+ group_type = fakes.FakeResource(
+ None, group_type_info, methods=methods, loaded=True
+ )
+ return group_type
+
+
+def create_volume_group_types(attrs=None, count=2):
+ """Create multiple fake group types.
+
+ :param attrs: A dictionary with all attributes of group type
+ :param count: The number of group types to be faked
+ :return: A list of FakeResource objects
+ """
+ group_types = []
+ for n in range(0, count):
+ group_types.append(create_one_volume_group_type(attrs))
+
+ return group_types
+
+
+def create_one_volume_message(attrs=None):
+ """Create a fake message.
+
+ :param attrs: A dictionary with all attributes of message
+ :return: A FakeResource object with id, name, status, etc.
+ """
+ attrs = attrs or {}
+
+ # Set default attribute
+ message_info = {
+ 'created_at': '2016-02-11T11:17:37.000000',
+ 'event_id': f'VOLUME_{random.randint(1, 999999):06d}',
+ 'guaranteed_until': '2016-02-11T11:17:37.000000',
+ 'id': uuid.uuid4().hex,
+ 'message_level': 'ERROR',
+ 'request_id': f'req-{uuid.uuid4().hex}',
+ 'resource_type': 'VOLUME',
+ 'resource_uuid': uuid.uuid4().hex,
+ 'user_message': f'message-{uuid.uuid4().hex}',
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ message_info.update(attrs)
+
+ return fakes.FakeResource(None, message_info, loaded=True)
+
+
+def create_volume_messages(attrs=None, count=2):
+ """Create multiple fake messages.
+
+ :param attrs: A dictionary with all attributes of message
+ :param count: The number of messages to be faked
+ :return: A list of FakeResource objects
+ """
+ messages = []
+ for n in range(0, count):
+ messages.append(create_one_volume_message(attrs))
+
+ return messages
+
+
+def get_volume_messages(messages=None, count=2):
+ """Get an iterable MagicMock object with a list of faked messages.
+
+ If messages list is provided, then initialize the Mock object with the
+ list. Otherwise create one.
+
+ :param messages: A list of FakeResource objects faking messages
+ :param count: The number of messages to be faked
+ :return An iterable Mock object with side_effect set to a list of faked
+ messages
+ """
+ if messages is None:
+ messages = create_volume_messages(count)
+
+ return mock.Mock(side_effect=messages)
+
+
+def create_one_volume_attachment(attrs=None):
+ """Create a fake volume attachment.
+
+ :param attrs: A dictionary with all attributes of volume attachment
+ :return: A FakeResource object with id, status, etc.
+ """
+ attrs = attrs or {}
+
+ attachment_id = uuid.uuid4().hex
+ volume_id = attrs.pop('volume_id', None) or uuid.uuid4().hex
+ server_id = attrs.pop('instance', None) or uuid.uuid4().hex
+
+ # Set default attribute
+ attachment_info = {
+ 'id': attachment_id,
+ 'volume_id': volume_id,
+ 'instance': server_id,
+ 'status': random.choice(
+ [
'attached',
'attaching',
'detached',
@@ -322,68 +380,148 @@ class FakeVolumeAttachment:
'error_attaching',
'error_detaching',
'deleted',
- ]),
- 'attach_mode': random.choice(['ro', 'rw']),
- 'attached_at': '2015-09-16T09:28:52.000000',
- 'detached_at': None,
- 'connection_info': {
- 'access_mode': 'rw',
- 'attachment_id': attachment_id,
- 'auth_method': 'CHAP',
- 'auth_password': 'AcUZ8PpxLHwzypMC',
- 'auth_username': '7j3EZQWT3rbE6pcSGKvK',
- 'cacheable': False,
- 'driver_volume_type': 'iscsi',
- 'encrypted': False,
- 'qos_specs': None,
- 'target_discovered': False,
- 'target_iqn':
- f'iqn.2010-10.org.openstack:volume-{attachment_id}',
- 'target_lun': '1',
- 'target_portal': '192.168.122.170:3260',
- 'volume_id': volume_id,
- },
- }
-
- # Overwrite default attributes if there are some attributes set
- attachment_info.update(attrs)
-
- attachment = fakes.FakeResource(
- None,
- attachment_info,
- loaded=True)
- return attachment
-
- @staticmethod
- def create_volume_attachments(attrs=None, count=2):
- """Create multiple fake volume attachments.
-
- :param attrs: A dictionary with all attributes of volume attachment
- :param count: The number of volume attachments to be faked
- :return: A list of FakeResource objects
- """
- attachments = []
-
- for n in range(0, count):
- attachments.append(
- FakeVolumeAttachment.create_one_volume_attachment(attrs))
-
- return attachments
-
- @staticmethod
- def get_volume_attachments(attachments=None, count=2):
- """Get an iterable MagicMock object with a list of faked volumes.
-
- If attachments list is provided, then initialize the Mock object with
- the list. Otherwise create one.
-
- :param attachments: A list of FakeResource objects faking volume
- attachments
- :param count: The number of volume attachments to be faked
- :return An iterable Mock object with side_effect set to a list of faked
- volume attachments
- """
- if attachments is None:
- attachments = FakeVolumeAttachment.create_volume_attachments(count)
-
- return mock.Mock(side_effect=attachments)
+ ]
+ ),
+ 'attach_mode': random.choice(['ro', 'rw']),
+ 'attached_at': '2015-09-16T09:28:52.000000',
+ 'detached_at': None,
+ 'connection_info': {
+ 'access_mode': 'rw',
+ 'attachment_id': attachment_id,
+ 'auth_method': 'CHAP',
+ 'auth_password': 'AcUZ8PpxLHwzypMC',
+ 'auth_username': '7j3EZQWT3rbE6pcSGKvK',
+ 'cacheable': False,
+ 'driver_volume_type': 'iscsi',
+ 'encrypted': False,
+ 'qos_specs': None,
+ 'target_discovered': False,
+ 'target_iqn': f'iqn.2010-10.org.openstack:volume-{attachment_id}',
+ 'target_lun': '1',
+ 'target_portal': '192.168.122.170:3260',
+ 'volume_id': volume_id,
+ },
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ attachment_info.update(attrs)
+
+ return fakes.FakeResource(None, attachment_info, loaded=True)
+
+
+def create_volume_attachments(attrs=None, count=2):
+ """Create multiple fake volume attachments.
+
+ :param attrs: A dictionary with all attributes of volume attachment
+ :param count: The number of volume attachments to be faked
+ :return: A list of FakeResource objects
+ """
+ attachments = []
+
+ for n in range(0, count):
+ attachments.append(create_one_volume_attachment(attrs))
+
+ return attachments
+
+
+def get_volume_attachments(attachments=None, count=2):
+ """Get an iterable MagicMock object with a list of faked volumes.
+
+ If attachments list is provided, then initialize the Mock object with
+ the list. Otherwise create one.
+
+ :param attachments: A list of FakeResource objects faking volume
+ attachments
+ :param count: The number of volume attachments to be faked
+ :return An iterable Mock object with side_effect set to a list of faked
+ volume attachments
+ """
+ if attachments is None:
+ attachments = create_volume_attachments(count)
+
+ return mock.Mock(side_effect=attachments)
+
+
+def create_service_log_level_entry(attrs=None):
+ service_log_level_info = {
+ 'host': 'host_test',
+ 'binary': 'cinder-api',
+ 'prefix': 'cinder.api.common',
+ 'level': 'DEBUG',
+ }
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
+
+ service_log_level_info.update(attrs)
+
+ service_log_level = fakes.FakeResource(
+ None, service_log_level_info, loaded=True)
+ return service_log_level
+
+
+def create_cleanup_records():
+ """Create fake service cleanup records.
+
+ :return: A list of FakeResource objects
+ """
+ cleaning_records = []
+ unavailable_records = []
+ cleaning_work_info = {
+ 'id': 1,
+ 'host': 'devstack@fakedriver-1',
+ 'binary': 'cinder-volume',
+ 'cluster_name': 'fake_cluster',
+ }
+ unavailable_work_info = {
+ 'id': 2,
+ 'host': 'devstack@fakedriver-2',
+ 'binary': 'cinder-scheduler',
+ 'cluster_name': 'new_cluster',
+ }
+ cleaning_records.append(cleaning_work_info)
+ unavailable_records.append(unavailable_work_info)
+
+ cleaning = [fakes.FakeResource(
+ None, obj, loaded=True) for obj in cleaning_records]
+ unavailable = [fakes.FakeResource(
+ None, obj, loaded=True) for obj in unavailable_records]
+
+ return cleaning, unavailable
+
+
+def create_one_manage_record(attrs=None, snapshot=False):
+ manage_dict = {
+ 'reference': {'source-name': 'fake-volume'},
+ 'size': '1',
+ 'safe_to_manage': False,
+ 'reason_not_safe': 'already managed',
+ 'cinder_id': 'fake-volume',
+ 'extra_info': None,
+ }
+ if snapshot:
+ manage_dict['source_reference'] = {'source-name': 'fake-source'}
+
+ # Overwrite default attributes if there are some attributes set
+ attrs = attrs or {}
+
+ manage_dict.update(attrs)
+ manage_record = fakes.FakeResource(None, manage_dict, loaded=True)
+ return manage_record
+
+
+def create_volume_manage_list_records(count=2):
+ volume_manage_list = []
+ for i in range(count):
+ volume_manage_list.append(
+ create_one_manage_record({'size': str(i + 1)}))
+
+ return volume_manage_list
+
+
+def create_snapshot_manage_list_records(count=2):
+ snapshot_manage_list = []
+ for i in range(count):
+ snapshot_manage_list.append(
+ create_one_manage_record({'size': str(i + 1)}, snapshot=True))
+
+ return snapshot_manage_list
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py b/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py
new file mode 100644
index 00000000..b48ce2f9
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_cleanup.py
@@ -0,0 +1,178 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from cinderclient import api_versions
+from osc_lib import exceptions
+
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_cleanup
+
+
+class TestBlockStorage(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the BlockStorageWorkerManager Mock
+ self.worker_mock = self.app.client_manager.volume.workers
+ self.worker_mock.reset_mock()
+
+
+class TestBlockStorageCleanup(TestBlockStorage):
+
+ cleaning, unavailable = volume_fakes.create_cleanup_records()
+
+ def setUp(self):
+ super().setUp()
+
+ self.worker_mock.clean.return_value = (self.cleaning, self.unavailable)
+
+ # Get the command object to test
+ self.cmd = \
+ block_storage_cleanup.BlockStorageCleanup(self.app, None)
+
+ def test_cleanup(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.24')
+
+ arglist = [
+ ]
+ verifylist = [
+ ('cluster', None),
+ ('host', None),
+ ('binary', None),
+ ('is_up', None),
+ ('disabled', None),
+ ('resource_id', None),
+ ('resource_type', None),
+ ('service_id', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = ('ID', 'Cluster Name', 'Host', 'Binary', 'Status')
+ cleaning_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Cleaning'
+ ) for obj in self.cleaning
+ )
+ unavailable_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Unavailable'
+ ) for obj in self.unavailable
+ )
+ expected_data = cleaning_data + unavailable_data
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, tuple(data))
+
+ # checking if proper call was made to cleanup resources
+ # Since we ignore all parameters with None value, we don't
+ # have any arguments passed to the API
+ self.worker_mock.clean.assert_called_once_with()
+
+ def test_block_storage_cleanup_pre_324(self):
+ arglist = [
+ ]
+ verifylist = [
+ ('cluster', None),
+ ('host', None),
+ ('binary', None),
+ ('is_up', None),
+ ('disabled', None),
+ ('resource_id', None),
+ ('resource_type', None),
+ ('service_id', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.24 or greater is required', str(exc))
+
+ def test_cleanup_with_args(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.24')
+
+ fake_cluster = 'fake-cluster'
+ fake_host = 'fake-host'
+ fake_binary = 'fake-service'
+ fake_resource_id = str(uuid.uuid4())
+ fake_resource_type = 'Volume'
+ fake_service_id = 1
+ arglist = [
+ '--cluster', fake_cluster,
+ '--host', fake_host,
+ '--binary', fake_binary,
+ '--down',
+ '--enabled',
+ '--resource-id', fake_resource_id,
+ '--resource-type', fake_resource_type,
+ '--service-id', str(fake_service_id),
+ ]
+ verifylist = [
+ ('cluster', fake_cluster),
+ ('host', fake_host),
+ ('binary', fake_binary),
+ ('is_up', False),
+ ('disabled', False),
+ ('resource_id', fake_resource_id),
+ ('resource_type', fake_resource_type),
+ ('service_id', fake_service_id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = ('ID', 'Cluster Name', 'Host', 'Binary', 'Status')
+ cleaning_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Cleaning'
+ ) for obj in self.cleaning
+ )
+ unavailable_data = tuple(
+ (
+ obj.id,
+ obj.cluster_name,
+ obj.host,
+ obj.binary,
+ 'Unavailable'
+ ) for obj in self.unavailable
+ )
+ expected_data = cleaning_data + unavailable_data
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, tuple(data))
+
+ # checking if proper call was made to cleanup resources
+ self.worker_mock.clean.assert_called_once_with(
+ cluster_name=fake_cluster,
+ host=fake_host,
+ binary=fake_binary,
+ is_up=False,
+ disabled=False,
+ resource_id=fake_resource_id,
+ resource_type=fake_resource_type,
+ service_id=fake_service_id)
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_cluster.py b/openstackclient/tests/unit/volume/v3/test_block_storage_cluster.py
new file mode 100644
index 00000000..fdfd1100
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_cluster.py
@@ -0,0 +1,434 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinderclient import api_versions
+from osc_lib import exceptions
+
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_cluster
+
+
+class TestBlockStorageCluster(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the BlockStorageClusterManager Mock
+ self.cluster_mock = self.app.client_manager.volume.clusters
+ self.cluster_mock.reset_mock()
+
+
+class TestBlockStorageClusterList(TestBlockStorageCluster):
+
+ # The cluster to be listed
+ fake_clusters = volume_fakes.create_clusters()
+
+ def setUp(self):
+ super().setUp()
+
+ self.cluster_mock.list.return_value = self.fake_clusters
+
+ # Get the command object to test
+ self.cmd = \
+ block_storage_cluster.ListBlockStorageCluster(self.app, None)
+
+ def test_cluster_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.7')
+
+ arglist = [
+ ]
+ verifylist = [
+ ('cluster', None),
+ ('binary', None),
+ ('is_up', None),
+ ('is_disabled', None),
+ ('num_hosts', None),
+ ('num_down_hosts', None),
+ ('long', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = ('Name', 'Binary', 'State', 'Status')
+ expected_data = tuple(
+ (
+ cluster.name,
+ cluster.binary,
+ cluster.state,
+ cluster.status,
+ ) for cluster in self.fake_clusters
+ )
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, tuple(data))
+
+ # checking if proper call was made to list clusters
+ self.cluster_mock.list.assert_called_with(
+ name=None,
+ binary=None,
+ is_up=None,
+ disabled=None,
+ num_hosts=None,
+ num_down_hosts=None,
+ detailed=False,
+ )
+
+ def test_cluster_list_with_full_options(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.7')
+
+ arglist = [
+ '--cluster', 'foo',
+ '--binary', 'bar',
+ '--up',
+ '--disabled',
+ '--num-hosts', '5',
+ '--num-down-hosts', '0',
+ '--long',
+ ]
+ verifylist = [
+ ('cluster', 'foo'),
+ ('binary', 'bar'),
+ ('is_up', True),
+ ('is_disabled', True),
+ ('num_hosts', 5),
+ ('num_down_hosts', 0),
+ ('long', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = (
+ 'Name',
+ 'Binary',
+ 'State',
+ 'Status',
+ 'Num Hosts',
+ 'Num Down Hosts',
+ 'Last Heartbeat',
+ 'Disabled Reason',
+ 'Created At',
+ 'Updated At',
+ )
+ expected_data = tuple(
+ (
+ cluster.name,
+ cluster.binary,
+ cluster.state,
+ cluster.status,
+ cluster.num_hosts,
+ cluster.num_down_hosts,
+ cluster.last_heartbeat,
+ cluster.disabled_reason,
+ cluster.created_at,
+ cluster.updated_at,
+ ) for cluster in self.fake_clusters
+ )
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, tuple(data))
+
+ # checking if proper call was made to list clusters
+ self.cluster_mock.list.assert_called_with(
+ name='foo',
+ binary='bar',
+ is_up=True,
+ disabled=True,
+ num_hosts=5,
+ num_down_hosts=0,
+ detailed=True,
+ )
+
+ def test_cluster_list_pre_v37(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.6')
+
+ arglist = [
+ ]
+ verifylist = [
+ ('cluster', None),
+ ('binary', None),
+ ('is_up', None),
+ ('is_disabled', None),
+ ('num_hosts', None),
+ ('num_down_hosts', None),
+ ('long', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.7 or greater is required', str(exc))
+
+
+class TestBlockStorageClusterSet(TestBlockStorageCluster):
+
+ cluster = volume_fakes.create_one_cluster()
+ columns = (
+ 'Name',
+ 'Binary',
+ 'State',
+ 'Status',
+ 'Disabled Reason',
+ 'Hosts',
+ 'Down Hosts',
+ 'Last Heartbeat',
+ 'Created At',
+ 'Updated At',
+ 'Replication Status',
+ 'Frozen',
+ 'Active Backend ID',
+ )
+ data = (
+ cluster.name,
+ cluster.binary,
+ cluster.state,
+ cluster.status,
+ cluster.disabled_reason,
+ cluster.num_hosts,
+ cluster.num_down_hosts,
+ cluster.last_heartbeat,
+ cluster.created_at,
+ cluster.updated_at,
+ cluster.replication_status,
+ cluster.frozen,
+ cluster.active_backend_id,
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.cluster_mock.update.return_value = self.cluster
+
+ self.cmd = \
+ block_storage_cluster.SetBlockStorageCluster(self.app, None)
+
+ def test_cluster_set(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.7')
+
+ arglist = [
+ '--enable',
+ self.cluster.name,
+ ]
+ verifylist = [
+ ('cluster', self.cluster.name),
+ ('binary', 'cinder-volume'),
+ ('disabled', False),
+ ('disabled_reason', None),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, tuple(data))
+
+ self.cluster_mock.update.assert_called_once_with(
+ self.cluster.name,
+ 'cinder-volume',
+ disabled=False,
+ disabled_reason=None,
+ )
+
+ def test_cluster_set_disable_with_reason(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.7')
+
+ arglist = [
+ '--binary', self.cluster.binary,
+ '--disable',
+ '--disable-reason', 'foo',
+ self.cluster.name,
+ ]
+ verifylist = [
+ ('cluster', self.cluster.name),
+ ('binary', self.cluster.binary),
+ ('disabled', True),
+ ('disabled_reason', 'foo'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, tuple(data))
+
+ self.cluster_mock.update.assert_called_once_with(
+ self.cluster.name,
+ self.cluster.binary,
+ disabled=True,
+ disabled_reason='foo',
+ )
+
+ def test_cluster_set_only_with_disable_reason(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.7')
+
+ arglist = [
+ '--disable-reason', 'foo',
+ self.cluster.name,
+ ]
+ verifylist = [
+ ('cluster', self.cluster.name),
+ ('binary', 'cinder-volume'),
+ ('disabled', None),
+ ('disabled_reason', 'foo'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ "Cannot specify --disable-reason without --disable", str(exc))
+
+ def test_cluster_set_enable_with_disable_reason(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.7')
+
+ arglist = [
+ '--enable',
+ '--disable-reason', 'foo',
+ self.cluster.name,
+ ]
+ verifylist = [
+ ('cluster', self.cluster.name),
+ ('binary', 'cinder-volume'),
+ ('disabled', False),
+ ('disabled_reason', 'foo'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ "Cannot specify --disable-reason without --disable", str(exc))
+
+ def test_cluster_set_pre_v37(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.6')
+
+ arglist = [
+ '--enable',
+ self.cluster.name,
+ ]
+ verifylist = [
+ ('cluster', self.cluster.name),
+ ('binary', 'cinder-volume'),
+ ('disabled', False),
+ ('disabled_reason', None),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.7 or greater is required', str(exc))
+
+
+class TestBlockStorageClusterShow(TestBlockStorageCluster):
+
+ cluster = volume_fakes.create_one_cluster()
+ columns = (
+ 'Name',
+ 'Binary',
+ 'State',
+ 'Status',
+ 'Disabled Reason',
+ 'Hosts',
+ 'Down Hosts',
+ 'Last Heartbeat',
+ 'Created At',
+ 'Updated At',
+ 'Replication Status',
+ 'Frozen',
+ 'Active Backend ID',
+ )
+ data = (
+ cluster.name,
+ cluster.binary,
+ cluster.state,
+ cluster.status,
+ cluster.disabled_reason,
+ cluster.num_hosts,
+ cluster.num_down_hosts,
+ cluster.last_heartbeat,
+ cluster.created_at,
+ cluster.updated_at,
+ cluster.replication_status,
+ cluster.frozen,
+ cluster.active_backend_id,
+ )
+
+ def setUp(self):
+ super().setUp()
+
+ self.cluster_mock.show.return_value = self.cluster
+
+ self.cmd = \
+ block_storage_cluster.ShowBlockStorageCluster(self.app, None)
+
+ def test_cluster_show(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.7')
+
+ arglist = [
+ '--binary', self.cluster.binary,
+ self.cluster.name,
+ ]
+ verifylist = [
+ ('cluster', self.cluster.name),
+ ('binary', self.cluster.binary),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, tuple(data))
+
+ self.cluster_mock.show.assert_called_once_with(
+ self.cluster.name,
+ binary=self.cluster.binary,
+ )
+
+ def test_cluster_show_pre_v37(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.6')
+
+ arglist = [
+ '--binary', self.cluster.binary,
+ self.cluster.name,
+ ]
+ verifylist = [
+ ('cluster', self.cluster.name),
+ ('binary', self.cluster.binary),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.7 or greater is required', str(exc))
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py b/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py
new file mode 100644
index 00000000..35ea6274
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_log_level.py
@@ -0,0 +1,233 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from cinderclient import api_versions
+import ddt
+from osc_lib import exceptions
+
+from openstackclient.tests.unit import utils as tests_utils
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_log_level as service
+
+
+class TestService(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the ServiceManager Mock
+ self.service_mock = self.app.client_manager.volume.services
+ self.service_mock.reset_mock()
+
+
+class TestBlockStorageLogLevelList(TestService):
+
+ service_log = volume_fakes.create_service_log_level_entry()
+
+ def setUp(self):
+ super().setUp()
+
+ self.service_mock.get_log_levels.return_value = [self.service_log]
+
+ # Get the command object to test
+ self.cmd = service.BlockStorageLogLevelList(self.app, None)
+
+ def test_block_storage_log_level_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ '--host', self.service_log.host,
+ '--service', self.service_log.binary,
+ '--log-prefix', self.service_log.prefix,
+ ]
+ verifylist = [
+ ('host', self.service_log.host),
+ ('service', self.service_log.binary),
+ ('log_prefix', self.service_log.prefix),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'Binary',
+ 'Host',
+ 'Prefix',
+ 'Level',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = ((
+ self.service_log.binary,
+ self.service_log.host,
+ self.service_log.prefix,
+ self.service_log.level,
+ ), )
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get log level of services
+ self.service_mock.get_log_levels.assert_called_with(
+ server=self.service_log.host,
+ binary=self.service_log.binary,
+ prefix=self.service_log.prefix,
+ )
+
+ def test_block_storage_log_level_list_pre_332(self):
+ arglist = [
+ '--host', self.service_log.host,
+ '--service', 'cinder-api',
+ '--log-prefix', 'cinder_test.api.common',
+ ]
+ verifylist = [
+ ('host', self.service_log.host),
+ ('service', 'cinder-api'),
+ ('log_prefix', 'cinder_test.api.common'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.32 or greater is required', str(exc))
+
+ def test_block_storage_log_level_list_invalid_service_name(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ '--host', self.service_log.host,
+ '--service', 'nova-api',
+ '--log-prefix', 'cinder_test.api.common',
+ ]
+ verifylist = [
+ ('host', self.service_log.host),
+ ('service', 'nova-api'),
+ ('log_prefix', 'cinder_test.api.common'),
+ ]
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+
+@ddt.ddt
+class TestBlockStorageLogLevelSet(TestService):
+
+ service_log = volume_fakes.create_service_log_level_entry()
+
+ def setUp(self):
+ super().setUp()
+
+ # Get the command object to test
+ self.cmd = service.BlockStorageLogLevelSet(self.app, None)
+
+ def test_block_storage_log_level_set(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ 'ERROR',
+ '--host', self.service_log.host,
+ '--service', self.service_log.binary,
+ '--log-prefix', self.service_log.prefix,
+ ]
+ verifylist = [
+ ('level', 'ERROR'),
+ ('host', self.service_log.host),
+ ('service', self.service_log.binary),
+ ('log_prefix', self.service_log.prefix),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # checking if proper call was made to set log level of services
+ self.service_mock.set_log_levels.assert_called_with(
+ level='ERROR',
+ server=self.service_log.host,
+ binary=self.service_log.binary,
+ prefix=self.service_log.prefix,
+ )
+
+ def test_block_storage_log_level_set_pre_332(self):
+ arglist = [
+ 'ERROR',
+ '--host', self.service_log.host,
+ '--service', 'cinder-api',
+ '--log-prefix', 'cinder_test.api.common',
+ ]
+ verifylist = [
+ ('level', 'ERROR'),
+ ('host', self.service_log.host),
+ ('service', 'cinder-api'),
+ ('log_prefix', 'cinder_test.api.common'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.32 or greater is required', str(exc))
+
+ def test_block_storage_log_level_set_invalid_service_name(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ 'ERROR',
+ '--host', self.service_log.host,
+ '--service', 'nova-api',
+ '--log-prefix', 'cinder.api.common',
+ ]
+ verifylist = [
+ ('level', 'ERROR'),
+ ('host', self.service_log.host),
+ ('service', 'nova-api'),
+ ('log_prefix', 'cinder.api.common'),
+ ]
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ @ddt.data('WARNING', 'info', 'Error', 'debuG', 'fake-log-level')
+ def test_block_storage_log_level_set_log_level(self, log_level):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+ arglist = [
+ log_level,
+ '--host', self.service_log.host,
+ '--service', 'cinder-api',
+ '--log-prefix', 'cinder.api.common',
+ ]
+ verifylist = [
+ ('level', log_level.upper()),
+ ('host', self.service_log.host),
+ ('service', 'cinder-api'),
+ ('log_prefix', 'cinder.api.common'),
+ ]
+
+ if log_level == 'fake-log-level':
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+ else:
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # checking if proper call was made to set log level of services
+ self.service_mock.set_log_levels.assert_called_with(
+ level=log_level.upper(),
+ server=self.service_log.host,
+ binary=self.service_log.binary,
+ prefix=self.service_log.prefix)
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py b/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py
new file mode 100644
index 00000000..afd0fd35
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_manage.py
@@ -0,0 +1,411 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from cinderclient import api_versions
+from osc_lib import exceptions
+
+from openstackclient.tests.unit import utils as tests_utils
+from openstackclient.tests.unit.volume.v2 import fakes as v2_volume_fakes
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_manage
+
+
+class TestBlockStorageManage(v2_volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ self.volumes_mock.reset_mock()
+ self.snapshots_mock = self.app.client_manager.volume.volume_snapshots
+ self.snapshots_mock.reset_mock()
+
+
+class TestBlockStorageVolumeManage(TestBlockStorageManage):
+
+ volume_manage_list = volume_fakes.create_volume_manage_list_records()
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock.list_manageable.return_value = (
+ self.volume_manage_list)
+
+ # Get the command object to test
+ self.cmd = block_storage_manage.BlockStorageManageVolumes(
+ self.app, None)
+
+ def test_block_storage_volume_manage_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for volume_record in self.volume_manage_list:
+ manage_details = (
+ volume_record.reference,
+ volume_record.size,
+ volume_record.safe_to_manage,
+ volume_record.reason_not_safe,
+ volume_record.cinder_id,
+ volume_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get volume manageable list
+ self.volumes_mock.list_manageable.assert_called_with(
+ host=parsed_args.host,
+ detailed=parsed_args.detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=parsed_args.cluster,
+ )
+
+ def test_block_storage_volume_manage_pre_38(self):
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.8 or greater is required', str(exc))
+
+ def test_block_storage_volume_manage_pre_317(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.16')
+ cluster = 'fake_cluster'
+ arglist = [
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('cluster', cluster),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.17 or greater is required', str(exc))
+ self.assertIn('--cluster', str(exc))
+
+ def test_block_storage_volume_manage_host_and_cluster(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.17')
+ host = 'fake_host'
+ cluster = 'fake_cluster'
+ arglist = [
+ host,
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('host', host),
+ ('cluster', cluster),
+ ]
+ exc = self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd,
+ arglist, verifylist)
+ self.assertIn(
+ 'argument --cluster: not allowed with argument <host>', str(exc))
+
+ def test_block_storage_volume_manage_list_all_args(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ detailed = True
+ marker = 'fake_marker'
+ limit = '5'
+ offset = '3'
+ sort = 'size:asc'
+ arglist = [
+ host,
+ '--detailed', str(detailed),
+ '--marker', marker,
+ '--limit', limit,
+ '--offset', offset,
+ '--sort', sort,
+ ]
+ verifylist = [
+ ('host', host),
+ ('detailed', str(detailed)),
+ ('marker', marker),
+ ('limit', limit),
+ ('offset', offset),
+ ('sort', sort),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for volume_record in self.volume_manage_list:
+ manage_details = (
+ volume_record.reference,
+ volume_record.size,
+ volume_record.safe_to_manage,
+ volume_record.reason_not_safe,
+ volume_record.cinder_id,
+ volume_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get volume manageable list
+ self.volumes_mock.list_manageable.assert_called_with(
+ host=host,
+ detailed=detailed,
+ marker=marker,
+ limit=limit,
+ offset=offset,
+ sort=sort,
+ cluster=parsed_args.cluster,
+ )
+
+
+class TestBlockStorageSnapshotManage(TestBlockStorageManage):
+
+ snapshot_manage_list = volume_fakes.create_snapshot_manage_list_records()
+
+ def setUp(self):
+ super().setUp()
+
+ self.snapshots_mock.list_manageable.return_value = (
+ self.snapshot_manage_list)
+
+ # Get the command object to test
+ self.cmd = block_storage_manage.BlockStorageManageSnapshots(
+ self.app, None)
+
+ def test_block_storage_snapshot_manage_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'source_reference',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for snapshot_record in self.snapshot_manage_list:
+ manage_details = (
+ snapshot_record.reference,
+ snapshot_record.size,
+ snapshot_record.safe_to_manage,
+ snapshot_record.source_reference,
+ snapshot_record.reason_not_safe,
+ snapshot_record.cinder_id,
+ snapshot_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get snapshot manageable list
+ self.snapshots_mock.list_manageable.assert_called_with(
+ host=parsed_args.host,
+ detailed=parsed_args.detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=parsed_args.cluster,
+ )
+
+ def test_block_storage_volume_manage_pre_38(self):
+ host = 'fake_host'
+ arglist = [
+ host,
+ ]
+ verifylist = [
+ ('host', host),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.8 or greater is required', str(exc))
+
+ def test_block_storage_volume_manage_pre_317(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.16')
+ cluster = 'fake_cluster'
+ arglist = [
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('cluster', cluster),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.17 or greater is required', str(exc))
+ self.assertIn('--cluster', str(exc))
+
+ def test_block_storage_volume_manage_host_and_cluster(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.17')
+ host = 'fake_host'
+ cluster = 'fake_cluster'
+ arglist = [
+ host,
+ '--cluster', cluster,
+ ]
+ verifylist = [
+ ('host', host),
+ ('cluster', cluster),
+ ]
+ exc = self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd,
+ arglist, verifylist)
+ self.assertIn(
+ 'argument --cluster: not allowed with argument <host>', str(exc))
+
+ def test_block_storage_volume_manage_list_all_args(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.8')
+ host = 'fake_host'
+ detailed = True
+ marker = 'fake_marker'
+ limit = '5'
+ offset = '3'
+ sort = 'size:asc'
+ arglist = [
+ host,
+ '--detailed', str(detailed),
+ '--marker', marker,
+ '--limit', limit,
+ '--offset', offset,
+ '--sort', sort,
+ ]
+ verifylist = [
+ ('host', host),
+ ('detailed', str(detailed)),
+ ('marker', marker),
+ ('limit', limit),
+ ('offset', offset),
+ ('sort', sort),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'source_reference',
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = []
+ for snapshot_record in self.snapshot_manage_list:
+ manage_details = (
+ snapshot_record.reference,
+ snapshot_record.size,
+ snapshot_record.safe_to_manage,
+ snapshot_record.source_reference,
+ snapshot_record.reason_not_safe,
+ snapshot_record.cinder_id,
+ snapshot_record.extra_info,
+ )
+ datalist.append(manage_details)
+ datalist = tuple(datalist)
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to get snapshot manageable list
+ self.snapshots_mock.list_manageable.assert_called_with(
+ host=host,
+ detailed=detailed,
+ marker=marker,
+ limit=limit,
+ offset=offset,
+ sort=sort,
+ cluster=parsed_args.cluster,
+ )
diff --git a/openstackclient/tests/unit/volume/v3/test_block_storage_resource_filter.py b/openstackclient/tests/unit/volume/v3/test_block_storage_resource_filter.py
new file mode 100644
index 00000000..086339ff
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_block_storage_resource_filter.py
@@ -0,0 +1,142 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinderclient import api_versions
+from osc_lib import exceptions
+
+from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
+from openstackclient.volume.v3 import block_storage_resource_filter
+
+
+class TestBlockStorageResourceFilter(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ # Get a shortcut to the ResourceFilterManager Mock
+ self.resource_filter_mock = \
+ self.app.client_manager.volume.resource_filters
+ self.resource_filter_mock.reset_mock()
+
+
+class TestBlockStorageResourceFilterList(TestBlockStorageResourceFilter):
+
+ # The resource filters to be listed
+ fake_resource_filters = volume_fakes.create_resource_filters()
+
+ def setUp(self):
+ super().setUp()
+
+ self.resource_filter_mock.list.return_value = \
+ self.fake_resource_filters
+
+ # Get the command object to test
+ self.cmd = block_storage_resource_filter\
+ .ListBlockStorageResourceFilter(self.app, None)
+
+ def test_resource_filter_list(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.33')
+
+ arglist = []
+ verifylist = []
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = ('Resource', 'Filters')
+ expected_data = tuple(
+ (
+ resource_filter.resource,
+ resource_filter.filters,
+ ) for resource_filter in self.fake_resource_filters
+ )
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, tuple(data))
+
+ # checking if proper call was made to list clusters
+ self.resource_filter_mock.list.assert_called_with()
+
+ def test_resource_filter_list_pre_v333(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+
+ arglist = []
+ verifylist = []
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.33 or greater is required', str(exc))
+
+
+class TestBlockStorageResourceFilterShow(TestBlockStorageResourceFilter):
+
+ # The resource filters to be listed
+ fake_resource_filter = volume_fakes.create_one_resource_filter()
+
+ def setUp(self):
+ super().setUp()
+
+ self.resource_filter_mock.list.return_value = \
+ iter([self.fake_resource_filter])
+
+ # Get the command object to test
+ self.cmd = block_storage_resource_filter\
+ .ShowBlockStorageResourceFilter(self.app, None)
+
+ def test_resource_filter_show(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.33')
+
+ arglist = [
+ self.fake_resource_filter.resource,
+ ]
+ verifylist = [
+ ('resource', self.fake_resource_filter.resource),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ expected_columns = ('filters', 'resource')
+ expected_data = (
+ self.fake_resource_filter.filters,
+ self.fake_resource_filter.resource,
+ )
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(expected_columns, columns)
+ self.assertEqual(expected_data, data)
+
+ # checking if proper call was made to list clusters
+ self.resource_filter_mock.list.assert_called_with(resource='volume')
+
+ def test_resource_filter_show_pre_v333(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.32')
+
+ arglist = [
+ self.fake_resource_filter.resource,
+ ]
+ verifylist = [
+ ('resource', self.fake_resource_filter.resource),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.33 or greater is required', str(exc))
diff --git a/openstackclient/tests/unit/volume/v3/test_volume.py b/openstackclient/tests/unit/volume/v3/test_volume.py
new file mode 100644
index 00000000..ed72bfa1
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v3/test_volume.py
@@ -0,0 +1,179 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import copy
+from unittest import mock
+
+from cinderclient import api_versions
+from osc_lib.cli import format_columns
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
+from openstackclient.volume.v3 import volume
+
+
+class TestVolumeSummary(volume_fakes.TestVolume):
+
+ columns = [
+ 'Total Count',
+ 'Total Size',
+ ]
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ self.volumes_mock.reset_mock()
+ self.mock_vol_1 = volume_fakes.create_one_volume()
+ self.mock_vol_2 = volume_fakes.create_one_volume()
+ self.return_dict = {
+ 'volume-summary': {
+ 'total_count': 2,
+ 'total_size': self.mock_vol_1.size + self.mock_vol_2.size}}
+ self.volumes_mock.summary.return_value = self.return_dict
+
+ # Get the command object to test
+ self.cmd = volume.VolumeSummary(self.app, None)
+
+ def test_volume_summary(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.12')
+ arglist = [
+ '--all-projects',
+ ]
+ verifylist = [
+ ('all_projects', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.summary.assert_called_once_with(
+ all_tenants=True,
+ )
+
+ self.assertEqual(self.columns, columns)
+
+ datalist = (
+ 2,
+ self.mock_vol_1.size + self.mock_vol_2.size)
+ self.assertCountEqual(datalist, tuple(data))
+
+ def test_volume_summary_pre_312(self):
+ arglist = [
+ '--all-projects',
+ ]
+ verifylist = [
+ ('all_projects', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.12 or greater is required',
+ str(exc))
+
+ def test_volume_summary_with_metadata(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.36')
+
+ combine_meta = {**self.mock_vol_1.metadata, **self.mock_vol_2.metadata}
+ meta_dict = copy.deepcopy(self.return_dict)
+ meta_dict['volume-summary']['metadata'] = combine_meta
+ self.volumes_mock.summary.return_value = meta_dict
+
+ new_cols = copy.deepcopy(self.columns)
+ new_cols.extend(['Metadata'])
+
+ arglist = [
+ '--all-projects',
+ ]
+ verifylist = [
+ ('all_projects', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.summary.assert_called_once_with(
+ all_tenants=True,
+ )
+
+ self.assertEqual(new_cols, columns)
+
+ datalist = (
+ 2,
+ self.mock_vol_1.size + self.mock_vol_2.size,
+ format_columns.DictColumn(combine_meta))
+ self.assertCountEqual(datalist, tuple(data))
+
+
+class TestVolumeRevertToSnapshot(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super().setUp()
+
+ self.volumes_mock = self.app.client_manager.volume.volumes
+ self.volumes_mock.reset_mock()
+ self.snapshots_mock = self.app.client_manager.volume.volume_snapshots
+ self.snapshots_mock.reset_mock()
+ self.mock_volume = volume_fakes.create_one_volume()
+ self.mock_snapshot = volume_fakes.create_one_snapshot(
+ attrs={'volume_id': self.volumes_mock.id})
+
+ # Get the command object to test
+ self.cmd = volume.VolumeRevertToSnapshot(self.app, None)
+
+ def test_volume_revert_to_snapshot_pre_340(self):
+ arglist = [
+ self.mock_snapshot.id,
+ ]
+ verifylist = [
+ ('snapshot', self.mock_snapshot.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.40 or greater is required',
+ str(exc))
+
+ def test_volume_revert_to_snapshot(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.40')
+ arglist = [
+ self.mock_snapshot.id,
+ ]
+ verifylist = [
+ ('snapshot', self.mock_snapshot.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ find_mock_result = [self.mock_snapshot, self.mock_volume]
+ with mock.patch.object(utils, 'find_resource',
+ side_effect=find_mock_result) as find_mock:
+ self.cmd.take_action(parsed_args)
+
+ self.volumes_mock.revert_to_snapshot.assert_called_once_with(
+ volume=self.mock_volume,
+ snapshot=self.mock_snapshot,
+ )
+ self.assertEqual(2, find_mock.call_count)
diff --git a/openstackclient/tests/unit/volume/v3/test_volume_attachment.py b/openstackclient/tests/unit/volume/v3/test_volume_attachment.py
index 09f698e7..c0bf5ae7 100644
--- a/openstackclient/tests/unit/volume/v3/test_volume_attachment.py
+++ b/openstackclient/tests/unit/volume/v3/test_volume_attachment.py
@@ -41,11 +41,11 @@ class TestVolumeAttachment(volume_fakes.TestVolume):
class TestVolumeAttachmentCreate(TestVolumeAttachment):
- volume = volume_fakes.FakeVolume.create_one_volume()
+ volume = volume_fakes.create_one_volume()
server = compute_fakes.FakeServer.create_one_server()
- volume_attachment = \
- volume_fakes.FakeVolumeAttachment.create_one_volume_attachment(
- attrs={'instance': server.id, 'volume_id': volume.id})
+ volume_attachment = volume_fakes.create_one_volume_attachment(
+ attrs={'instance': server.id, 'volume_id': volume.id},
+ )
columns = (
'ID',
@@ -73,8 +73,9 @@ class TestVolumeAttachmentCreate(TestVolumeAttachment):
self.volumes_mock.get.return_value = self.volume
self.servers_mock.get.return_value = self.server
+ # VolumeAttachmentManager.create returns a dict
self.volume_attachments_mock.create.return_value = \
- self.volume_attachment
+ self.volume_attachment.to_dict()
self.cmd = volume_attachment.CreateVolumeAttachment(self.app, None)
@@ -237,8 +238,7 @@ class TestVolumeAttachmentCreate(TestVolumeAttachment):
class TestVolumeAttachmentDelete(TestVolumeAttachment):
- volume_attachment = \
- volume_fakes.FakeVolumeAttachment.create_one_volume_attachment()
+ volume_attachment = volume_fakes.create_one_volume_attachment()
def setUp(self):
super().setUp()
@@ -289,8 +289,7 @@ class TestVolumeAttachmentDelete(TestVolumeAttachment):
class TestVolumeAttachmentSet(TestVolumeAttachment):
- volume_attachment = \
- volume_fakes.FakeVolumeAttachment.create_one_volume_attachment()
+ volume_attachment = volume_fakes.create_one_volume_attachment()
columns = (
'ID',
@@ -390,8 +389,7 @@ class TestVolumeAttachmentSet(TestVolumeAttachment):
class TestVolumeAttachmentComplete(TestVolumeAttachment):
- volume_attachment = \
- volume_fakes.FakeVolumeAttachment.create_one_volume_attachment()
+ volume_attachment = volume_fakes.create_one_volume_attachment()
def setUp(self):
super().setUp()
@@ -443,8 +441,7 @@ class TestVolumeAttachmentComplete(TestVolumeAttachment):
class TestVolumeAttachmentList(TestVolumeAttachment):
project = identity_fakes.FakeProject.create_one_project()
- volume_attachments = \
- volume_fakes.FakeVolumeAttachment.create_volume_attachments()
+ volume_attachments = volume_fakes.create_volume_attachments()
columns = (
'ID',
diff --git a/openstackclient/tests/unit/volume/v3/test_volume_group.py b/openstackclient/tests/unit/volume/v3/test_volume_group.py
index 13ef38d2..78717de8 100644
--- a/openstackclient/tests/unit/volume/v3/test_volume_group.py
+++ b/openstackclient/tests/unit/volume/v3/test_volume_group.py
@@ -10,9 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from cinderclient import api_versions
from osc_lib import exceptions
+from openstackclient.tests.unit import utils as tests_utils
from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
from openstackclient.volume.v3 import volume_group
@@ -32,18 +35,23 @@ class TestVolumeGroup(volume_fakes.TestVolume):
self.volume_types_mock = self.app.client_manager.volume.volume_types
self.volume_types_mock.reset_mock()
+ self.volume_group_snapshots_mock = \
+ self.app.client_manager.volume.group_snapshots
+ self.volume_group_snapshots_mock.reset_mock()
+
class TestVolumeGroupCreate(TestVolumeGroup):
- fake_volume_type = volume_fakes.FakeVolumeType.create_one_volume_type()
- fake_volume_group_type = \
- volume_fakes.FakeVolumeGroupType.create_one_volume_group_type()
- fake_volume_group = volume_fakes.FakeVolumeGroup.create_one_volume_group(
+ fake_volume_type = volume_fakes.create_one_volume_type()
+ fake_volume_group_type = volume_fakes.create_one_volume_group_type()
+ fake_volume_group = volume_fakes.create_one_volume_group(
attrs={
'group_type': fake_volume_group_type.id,
'volume_types': [fake_volume_type.id],
},
)
+ fake_volume_group_snapshot = \
+ volume_fakes.create_one_volume_group_snapshot()
columns = (
'ID',
@@ -80,6 +88,10 @@ class TestVolumeGroupCreate(TestVolumeGroup):
self.fake_volume_group_type
self.volume_groups_mock.create.return_value = self.fake_volume_group
self.volume_groups_mock.get.return_value = self.fake_volume_group
+ self.volume_groups_mock.create_from_src.return_value = \
+ self.fake_volume_group
+ self.volume_group_snapshots_mock.get.return_value = \
+ self.fake_volume_group_snapshot
self.cmd = volume_group.CreateVolumeGroup(self.app, None)
@@ -88,8 +100,8 @@ class TestVolumeGroupCreate(TestVolumeGroup):
api_versions.APIVersion('3.13')
arglist = [
- self.fake_volume_group_type.id,
- self.fake_volume_type.id,
+ '--volume-group-type', self.fake_volume_group_type.id,
+ '--volume-type', self.fake_volume_type.id,
]
verifylist = [
('volume_group_type', self.fake_volume_group_type.id),
@@ -116,13 +128,75 @@ class TestVolumeGroupCreate(TestVolumeGroup):
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
- def test_volume_group_create_with_options(self):
+ def test_volume_group_create__legacy(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.13')
arglist = [
self.fake_volume_group_type.id,
self.fake_volume_type.id,
+ ]
+ verifylist = [
+ ('volume_group_type_legacy', self.fake_volume_group_type.id),
+ ('volume_types_legacy', [self.fake_volume_type.id]),
+ ('name', None),
+ ('description', None),
+ ('availability_zone', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ with mock.patch.object(self.cmd.log, 'warning') as mock_warning:
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volume_group_types_mock.get.assert_called_once_with(
+ self.fake_volume_group_type.id)
+ self.volume_types_mock.get.assert_called_once_with(
+ self.fake_volume_type.id)
+ self.volume_groups_mock.create.assert_called_once_with(
+ self.fake_volume_group_type.id,
+ self.fake_volume_type.id,
+ None,
+ None,
+ availability_zone=None,
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, data)
+ mock_warning.assert_called_once()
+ self.assertIn(
+ 'Passing volume group type and volume types as positional ',
+ str(mock_warning.call_args[0][0]),
+ )
+
+ def test_volume_group_create_no_volume_type(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.13')
+
+ arglist = [
+ '--volume-group-type', self.fake_volume_group_type.id,
+ ]
+ verifylist = [
+ ('volume_group_type', self.fake_volume_group_type.id),
+ ('name', None),
+ ('description', None),
+ ('availability_zone', None),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--volume-types is a required argument when creating ',
+ str(exc))
+
+ def test_volume_group_create_with_options(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.13')
+
+ arglist = [
+ '--volume-group-type', self.fake_volume_group_type.id,
+ '--volume-type', self.fake_volume_type.id,
'--name', 'foo',
'--description', 'hello, world',
'--availability-zone', 'bar',
@@ -157,8 +231,8 @@ class TestVolumeGroupCreate(TestVolumeGroup):
api_versions.APIVersion('3.12')
arglist = [
- self.fake_volume_group_type.id,
- self.fake_volume_type.id,
+ '--volume-group-type', self.fake_volume_group_type.id,
+ '--volume-type', self.fake_volume_type.id,
]
verifylist = [
('volume_group_type', self.fake_volume_group_type.id),
@@ -177,11 +251,105 @@ class TestVolumeGroupCreate(TestVolumeGroup):
'--os-volume-api-version 3.13 or greater is required',
str(exc))
+ def test_volume_group_create_from_source_group(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.14')
+
+ arglist = [
+ '--source-group', self.fake_volume_group.id,
+ ]
+ verifylist = [
+ ('source_group', self.fake_volume_group.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volume_groups_mock.get.assert_has_calls(
+ [mock.call(self.fake_volume_group.id),
+ mock.call(self.fake_volume_group.id)])
+ self.volume_groups_mock.create_from_src.assert_called_once_with(
+ None,
+ self.fake_volume_group.id,
+ None,
+ None,
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, data)
+
+ def test_volume_group_create_from_group_snapshot(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.14')
+
+ arglist = [
+ '--group-snapshot', self.fake_volume_group_snapshot.id,
+ ]
+ verifylist = [
+ ('group_snapshot', self.fake_volume_group_snapshot.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.volume_group_snapshots_mock.get.assert_called_once_with(
+ self.fake_volume_group_snapshot.id)
+ self.volume_groups_mock.get.assert_called_once_with(
+ self.fake_volume_group.id)
+ self.volume_groups_mock.create_from_src.assert_called_once_with(
+ self.fake_volume_group_snapshot.id,
+ None,
+ None,
+ None,
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertCountEqual(self.data, data)
+
+ def test_volume_group_create_from_src_pre_v314(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.13')
+
+ arglist = [
+ '--source-group', self.fake_volume_group.id,
+ ]
+ verifylist = [
+ ('source_group', self.fake_volume_group.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ exc = self.assertRaises(
+ exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+ self.assertIn(
+ '--os-volume-api-version 3.14 or greater is required',
+ str(exc))
+
+ def test_volume_group_create_from_src_source_group_group_snapshot(self):
+ self.app.client_manager.volume.api_version = \
+ api_versions.APIVersion('3.14')
+
+ arglist = [
+ '--source-group', self.fake_volume_group.id,
+ '--group-snapshot', self.fake_volume_group_snapshot.id,
+ ]
+ verifylist = [
+ ('source_group', self.fake_volume_group.id),
+ ('group_snapshot', self.fake_volume_group_snapshot.id),
+ ]
+
+ exc = self.assertRaises(tests_utils.ParserException,
+ self.check_parser,
+ self.cmd,
+ arglist,
+ verifylist)
+ self.assertIn(
+ '--group-snapshot: not allowed with argument --source-group',
+ str(exc))
+
class TestVolumeGroupDelete(TestVolumeGroup):
- fake_volume_group = \
- volume_fakes.FakeVolumeGroup.create_one_volume_group()
+ fake_volume_group = volume_fakes.create_one_volume_group()
def setUp(self):
super().setUp()
@@ -236,8 +404,7 @@ class TestVolumeGroupDelete(TestVolumeGroup):
class TestVolumeGroupSet(TestVolumeGroup):
- fake_volume_group = \
- volume_fakes.FakeVolumeGroup.create_one_volume_group()
+ fake_volume_group = volume_fakes.create_one_volume_group()
columns = (
'ID',
@@ -368,8 +535,7 @@ class TestVolumeGroupSet(TestVolumeGroup):
class TestVolumeGroupList(TestVolumeGroup):
- fake_volume_groups = \
- volume_fakes.FakeVolumeGroup.create_volume_groups()
+ fake_volume_groups = volume_fakes.create_volume_groups()
columns = (
'ID',
@@ -436,8 +602,7 @@ class TestVolumeGroupList(TestVolumeGroup):
class TestVolumeGroupFailover(TestVolumeGroup):
- fake_volume_group = \
- volume_fakes.FakeVolumeGroup.create_one_volume_group()
+ fake_volume_group = volume_fakes.create_one_volume_group()
def setUp(self):
super().setUp()
diff --git a/openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py b/openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py
index 509d9f08..2a5a30f0 100644
--- a/openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py
+++ b/openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py
@@ -32,9 +32,9 @@ class TestVolumeGroupSnapshot(volume_fakes.TestVolume):
class TestVolumeGroupSnapshotCreate(TestVolumeGroupSnapshot):
- fake_volume_group = volume_fakes.FakeVolumeGroup.create_one_volume_group()
+ fake_volume_group = volume_fakes.create_one_volume_group()
fake_volume_group_snapshot = \
- volume_fakes.FakeVolumeGroupSnapshot.create_one_volume_group_snapshot()
+ volume_fakes.create_one_volume_group_snapshot()
columns = (
'ID',
@@ -141,7 +141,7 @@ class TestVolumeGroupSnapshotCreate(TestVolumeGroupSnapshot):
class TestVolumeGroupSnapshotDelete(TestVolumeGroupSnapshot):
fake_volume_group_snapshot = \
- volume_fakes.FakeVolumeGroupSnapshot.create_one_volume_group_snapshot()
+ volume_fakes.create_one_volume_group_snapshot()
def setUp(self):
super().setUp()
@@ -195,8 +195,7 @@ class TestVolumeGroupSnapshotDelete(TestVolumeGroupSnapshot):
class TestVolumeGroupSnapshotList(TestVolumeGroupSnapshot):
- fake_volume_group_snapshots = \
- volume_fakes.FakeVolumeGroupSnapshot.create_volume_group_snapshots()
+ fake_volume_group_snapshots = volume_fakes.create_volume_group_snapshots()
columns = (
'ID',
diff --git a/openstackclient/tests/unit/volume/v3/test_volume_group_type.py b/openstackclient/tests/unit/volume/v3/test_volume_group_type.py
index 7e758a2c..34b4e501 100644
--- a/openstackclient/tests/unit/volume/v3/test_volume_group_type.py
+++ b/openstackclient/tests/unit/volume/v3/test_volume_group_type.py
@@ -34,8 +34,7 @@ class TestVolumeGroupTypeCreate(TestVolumeGroupType):
maxDiff = 2000
- fake_volume_group_type = \
- volume_fakes.FakeVolumeGroupType.create_one_volume_group_type()
+ fake_volume_group_type = volume_fakes.create_one_volume_group_type()
columns = (
'ID',
@@ -133,8 +132,7 @@ class TestVolumeGroupTypeCreate(TestVolumeGroupType):
class TestVolumeGroupTypeDelete(TestVolumeGroupType):
- fake_volume_group_type = \
- volume_fakes.FakeVolumeGroupType.create_one_volume_group_type()
+ fake_volume_group_type = volume_fakes.create_one_volume_group_type()
def setUp(self):
super().setUp()
@@ -187,13 +185,13 @@ class TestVolumeGroupTypeDelete(TestVolumeGroupType):
class TestVolumeGroupTypeSet(TestVolumeGroupType):
- fake_volume_group_type = \
- volume_fakes.FakeVolumeGroupType.create_one_volume_group_type(
- methods={
- 'get_keys': {'foo': 'bar'},
- 'set_keys': None,
- 'unset_keys': None,
- })
+ fake_volume_group_type = volume_fakes.create_one_volume_group_type(
+ methods={
+ 'get_keys': {'foo': 'bar'},
+ 'set_keys': None,
+ 'unset_keys': None,
+ },
+ )
columns = (
'ID',
@@ -316,9 +314,9 @@ class TestVolumeGroupTypeSet(TestVolumeGroupType):
class TestVolumeGroupTypeUnset(TestVolumeGroupType):
- fake_volume_group_type = \
- volume_fakes.FakeVolumeGroupType.create_one_volume_group_type(
- methods={'unset_keys': None})
+ fake_volume_group_type = volume_fakes.create_one_volume_group_type(
+ methods={'unset_keys': None},
+ )
columns = (
'ID',
@@ -393,8 +391,7 @@ class TestVolumeGroupTypeUnset(TestVolumeGroupType):
class TestVolumeGroupTypeList(TestVolumeGroupType):
- fake_volume_group_types = \
- volume_fakes.FakeVolumeGroupType.create_volume_group_types()
+ fake_volume_group_types = volume_fakes.create_volume_group_types()
columns = (
'ID',
diff --git a/openstackclient/tests/unit/volume/v3/test_volume_message.py b/openstackclient/tests/unit/volume/v3/test_volume_message.py
index 8cabc0c3..45b0747e 100644
--- a/openstackclient/tests/unit/volume/v3/test_volume_message.py
+++ b/openstackclient/tests/unit/volume/v3/test_volume_message.py
@@ -34,15 +34,14 @@ class TestVolumeMessage(volume_fakes.TestVolume):
class TestVolumeMessageDelete(TestVolumeMessage):
- fake_messages = volume_fakes.FakeVolumeMessage.create_volume_messages(
- count=2)
+ fake_messages = volume_fakes.create_volume_messages(count=2)
def setUp(self):
super().setUp()
- self.volume_messages_mock.get = \
- volume_fakes.FakeVolumeMessage.get_volume_messages(
- self.fake_messages)
+ self.volume_messages_mock.get = volume_fakes.get_volume_messages(
+ self.fake_messages,
+ )
self.volume_messages_mock.delete.return_value = None
# Get the command object to mock
@@ -139,8 +138,7 @@ class TestVolumeMessageDelete(TestVolumeMessage):
class TestVolumeMessageList(TestVolumeMessage):
fake_project = identity_fakes.FakeProject.create_one_project()
- fake_messages = volume_fakes.FakeVolumeMessage.create_volume_messages(
- count=3)
+ fake_messages = volume_fakes.create_volume_messages(count=3)
columns = (
'ID',
@@ -253,7 +251,7 @@ class TestVolumeMessageList(TestVolumeMessage):
class TestVolumeMessageShow(TestVolumeMessage):
- fake_message = volume_fakes.FakeVolumeMessage.create_one_volume_message()
+ fake_message = volume_fakes.create_one_volume_message()
columns = (
'created_at',
diff --git a/openstackclient/volume/v1/volume_backup.py b/openstackclient/volume/v1/volume_backup.py
index 1a83a3c0..790cb463 100644
--- a/openstackclient/volume/v1/volume_backup.py
+++ b/openstackclient/volume/v1/volume_backup.py
@@ -231,18 +231,23 @@ class RestoreVolumeBackup(command.Command):
parser.add_argument(
'volume',
metavar='<volume>',
- help=_('Volume to restore to (name or ID)')
+ nargs='?',
+ help=_('Volume to restore to (name or ID) (default to None)')
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
- backup = utils.find_resource(volume_client.backups,
- parsed_args.backup)
- destination_volume = utils.find_resource(volume_client.volumes,
- parsed_args.volume)
- return volume_client.restores.restore(backup.id,
- destination_volume.id)
+ backup = utils.find_resource(
+ volume_client.backups, parsed_args.backup,
+ )
+ volume_id = None
+ if parsed_args.volume is not None:
+ volume_id = utils.find_resource(
+ volume_client.volumes,
+ parsed_args.volume,
+ ).id
+ return volume_client.restores.restore(backup.id, volume_id)
class ShowVolumeBackup(command.ShowOne):
diff --git a/openstackclient/volume/v1/volume_type.py b/openstackclient/volume/v1/volume_type.py
index 4f015d13..c584943e 100644
--- a/openstackclient/volume/v1/volume_type.py
+++ b/openstackclient/volume/v1/volume_type.py
@@ -411,7 +411,7 @@ class UnsetVolumeType(command.Command):
"--encryption-type",
action="store_true",
help=_("Remove the encryption type for this volume type "
- "(admin oly)"),
+ "(admin only)"),
)
return parser
diff --git a/openstackclient/volume/v2/consistency_group.py b/openstackclient/volume/v2/consistency_group.py
index c50a1b5b..77da6f64 100644
--- a/openstackclient/volume/v2/consistency_group.py
+++ b/openstackclient/volume/v2/consistency_group.py
@@ -14,6 +14,7 @@
"""Volume v2 consistency group action implementations"""
+import argparse
import logging
from osc_lib.cli import format_columns
@@ -90,35 +91,51 @@ class CreateConsistencyGroup(command.ShowOne):
"name",
metavar="<name>",
nargs="?",
- help=_("Name of new consistency group (default to None)")
+ help=_("Name of new consistency group (default to None)"),
)
exclusive_group = parser.add_mutually_exclusive_group(required=True)
exclusive_group.add_argument(
"--volume-type",
metavar="<volume-type>",
- help=_("Volume type of this consistency group (name or ID)")
+ help=_("Volume type of this consistency group (name or ID)"),
)
exclusive_group.add_argument(
+ "--source",
+ metavar="<consistency-group>",
+ help=_("Existing consistency group (name or ID)"),
+ )
+ # NOTE(stephenfin): Legacy alias
+ exclusive_group.add_argument(
"--consistency-group-source",
metavar="<consistency-group>",
- help=_("Existing consistency group (name or ID)")
+ dest='source',
+ help=argparse.SUPPRESS,
+ )
+ exclusive_group.add_argument(
+ "--snapshot",
+ metavar="<consistency-group-snapshot>",
+ help=_("Existing consistency group snapshot (name or ID)"),
)
+ # NOTE(stephenfin): Legacy alias
exclusive_group.add_argument(
"--consistency-group-snapshot",
metavar="<consistency-group-snapshot>",
- help=_("Existing consistency group snapshot (name or ID)")
+ dest='snapshot',
+ help=argparse.SUPPRESS,
)
parser.add_argument(
"--description",
metavar="<description>",
- help=_("Description of this consistency group")
+ help=_("Description of this consistency group"),
)
parser.add_argument(
"--availability-zone",
metavar="<availability-zone>",
- help=_("Availability zone for this consistency group "
- "(not available if creating consistency group "
- "from source)"),
+ help=_(
+ "Availability zone for this consistency group "
+ "(not available if creating consistency group "
+ "from source)"
+ ),
)
return parser
@@ -142,21 +159,23 @@ class CreateConsistencyGroup(command.ShowOne):
consistency_group_id = None
consistency_group_snapshot = None
- if parsed_args.consistency_group_source:
+ if parsed_args.source:
consistency_group_id = utils.find_resource(
volume_client.consistencygroups,
- parsed_args.consistency_group_source).id
- elif parsed_args.consistency_group_snapshot:
+ parsed_args.source,
+ ).id
+ elif parsed_args.snapshot:
consistency_group_snapshot = utils.find_resource(
volume_client.cgsnapshots,
- parsed_args.consistency_group_snapshot).id
+ parsed_args.snapshot,
+ ).id
consistency_group = (
volume_client.consistencygroups.create_from_src(
consistency_group_snapshot,
consistency_group_id,
name=parsed_args.name,
- description=parsed_args.description
+ description=parsed_args.description,
)
)
diff --git a/openstackclient/volume/v2/volume.py b/openstackclient/volume/v2/volume.py
index 6d14b360..7905e097 100644
--- a/openstackclient/volume/v2/volume.py
+++ b/openstackclient/volume/v2/volume.py
@@ -71,10 +71,10 @@ def _check_size_arg(args):
volume is not specified.
"""
- if ((args.snapshot or args.source)
+ if ((args.snapshot or args.source or args.backup)
is None and args.size is None):
- msg = _("--size is a required option if snapshot "
- "or source volume is not specified.")
+ msg = _("--size is a required option if snapshot, backup "
+ "or source volume are not specified.")
raise exceptions.CommandError(msg)
@@ -82,18 +82,19 @@ class CreateVolume(command.ShowOne):
_description = _("Create new volume")
def get_parser(self, prog_name):
- parser = super(CreateVolume, self).get_parser(prog_name)
+ parser = super().get_parser(prog_name)
parser.add_argument(
"name",
metavar="<name>",
+ nargs="?",
help=_("Volume name"),
)
parser.add_argument(
"--size",
metavar="<size>",
type=int,
- help=_("Volume size in GB (Required unless --snapshot or "
- "--source is specified)"),
+ help=_("Volume size in GB (required unless --snapshot, "
+ "--source or --backup is specified)"),
)
parser.add_argument(
"--type",
@@ -117,6 +118,12 @@ class CreateVolume(command.ShowOne):
help=_("Volume to clone (name or ID)"),
)
source_group.add_argument(
+ "--backup",
+ metavar="<backup>",
+ help=_("Restore backup to a volume (name or ID) "
+ "(supported by --os-volume-api-version 3.47 or later)"),
+ )
+ source_group.add_argument(
"--source-replicated",
metavar="<replicated-volume>",
help=argparse.SUPPRESS,
@@ -176,14 +183,28 @@ class CreateVolume(command.ShowOne):
def take_action(self, parsed_args):
_check_size_arg(parsed_args)
+ # size is validated in the above call to
+ # _check_size_arg where we check that size
+ # should be passed if we are not creating a
+ # volume from snapshot, backup or source volume
+ size = parsed_args.size
+
volume_client = self.app.client_manager.volume
image_client = self.app.client_manager.image
+ if parsed_args.backup and not (
+ volume_client.api_version.matches('3.47')):
+ msg = _("--os-volume-api-version 3.47 or greater is required "
+ "to create a volume from backup.")
+ raise exceptions.CommandError(msg)
+
source_volume = None
if parsed_args.source:
- source_volume = utils.find_resource(
+ source_volume_obj = utils.find_resource(
volume_client.volumes,
- parsed_args.source).id
+ parsed_args.source)
+ source_volume = source_volume_obj.id
+ size = max(size or 0, source_volume_obj.size)
consistency_group = None
if parsed_args.consistency_group:
@@ -196,8 +217,6 @@ class CreateVolume(command.ShowOne):
image = image_client.find_image(parsed_args.image,
ignore_missing=False).id
- size = parsed_args.size
-
snapshot = None
if parsed_args.snapshot:
snapshot_obj = utils.find_resource(
@@ -212,6 +231,15 @@ class CreateVolume(command.ShowOne):
# snapshot size.
size = max(size or 0, snapshot_obj.size)
+ backup = None
+ if parsed_args.backup:
+ backup_obj = utils.find_resource(
+ volume_client.backups,
+ parsed_args.backup)
+ backup = backup_obj.id
+ # As above
+ size = max(size or 0, backup_obj.size)
+
volume = volume_client.volumes.create(
size=size,
snapshot_id=snapshot,
@@ -224,6 +252,7 @@ class CreateVolume(command.ShowOne):
source_volid=source_volume,
consistencygroup_id=consistency_group,
scheduler_hints=parsed_args.hint,
+ backup_id=backup,
)
if parsed_args.bootable or parsed_args.non_bootable:
diff --git a/openstackclient/volume/v2/volume_backup.py b/openstackclient/volume/v2/volume_backup.py
index 96b22a68..d96b28e9 100644
--- a/openstackclient/volume/v2/volume_backup.py
+++ b/openstackclient/volume/v2/volume_backup.py
@@ -310,9 +310,17 @@ class ListVolumeBackup(command.Lister):
filter_volume_id = None
if parsed_args.volume:
- filter_volume_id = utils.find_resource(
- volume_client.volumes, parsed_args.volume,
- ).id
+ try:
+ filter_volume_id = utils.find_resource(
+ volume_client.volumes, parsed_args.volume,
+ ).id
+ except exceptions.CommandError:
+ # Volume with that ID does not exist, but search for backups
+ # for that volume nevertheless
+ LOG.debug("No volume with ID %s existing, continuing to "
+ "search for backups for that volume ID",
+ parsed_args.volume)
+ filter_volume_id = parsed_args.volume
marker_backup_id = None
if parsed_args.marker:
@@ -355,18 +363,50 @@ class RestoreVolumeBackup(command.ShowOne):
parser.add_argument(
"volume",
metavar="<volume>",
- help=_("Volume to restore to (name or ID)")
+ nargs="?",
+ help=_(
+ "Volume to restore to "
+ "(name or ID for existing volume, name only for new volume) "
+ "(default to None)"
+ )
+ )
+ parser.add_argument(
+ "--force",
+ action="store_true",
+ help=_(
+ "Restore the backup to an existing volume "
+ "(default to False)"
+ )
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
+
backup = utils.find_resource(volume_client.backups, parsed_args.backup)
- destination_volume = utils.find_resource(volume_client.volumes,
- parsed_args.volume)
- backup = volume_client.restores.restore(backup.id,
- destination_volume.id)
- return zip(*sorted(backup._info.items()))
+
+ volume_name = None
+ volume_id = None
+ try:
+ volume_id = utils.find_resource(
+ volume_client.volumes,
+ parsed_args.volume,
+ ).id
+ except Exception:
+ volume_name = parsed_args.volume
+ else:
+ # If we didn't fail, the volume must already exist. We only allow
+ # this to work if the user forced things
+ if not parsed_args.force:
+ msg = _(
+ "Volume '%s' already exists; if you want to restore the "
+ "backup to it you need to specify the '--force' option"
+ ) % parsed_args.volume
+ raise exceptions.CommandError(msg)
+
+ return volume_client.restores.restore(
+ backup.id, volume_id, volume_name,
+ )
class SetVolumeBackup(command.Command):
diff --git a/openstackclient/volume/v2/volume_snapshot.py b/openstackclient/volume/v2/volume_snapshot.py
index 656f59d4..53d8d27f 100644
--- a/openstackclient/volume/v2/volume_snapshot.py
+++ b/openstackclient/volume/v2/volume_snapshot.py
@@ -98,7 +98,7 @@ class CreateVolumeSnapshot(command.ShowOne):
"--remote-source",
metavar="<key=value>",
action=parseractions.KeyValueAction,
- help=_("The attribute(s) of the exsiting remote volume snapshot "
+ help=_("The attribute(s) of the existing remote volume snapshot "
"(admin required) (repeat option to specify multiple "
"attributes) e.g.: '--remote-source source-name=test_name "
"--remote-source source-id=test_id'"),
diff --git a/openstackclient/volume/v3/block_storage_cleanup.py b/openstackclient/volume/v3/block_storage_cleanup.py
new file mode 100644
index 00000000..f99b8217
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_cleanup.py
@@ -0,0 +1,146 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+
+from openstackclient.i18n import _
+
+
+def _format_cleanup_response(cleaning, unavailable):
+ column_headers = (
+ 'ID',
+ 'Cluster Name',
+ 'Host',
+ 'Binary',
+ 'Status',
+ )
+ combined_data = []
+ for obj in cleaning:
+ details = (obj.id, obj.cluster_name, obj.host, obj.binary, 'Cleaning')
+ combined_data.append(details)
+
+ for obj in unavailable:
+ details = (obj.id, obj.cluster_name, obj.host, obj.binary,
+ 'Unavailable')
+ combined_data.append(details)
+
+ return (column_headers, combined_data)
+
+
+class BlockStorageCleanup(command.Lister):
+ """Do block storage cleanup.
+
+ This command requires ``--os-volume-api-version`` 3.24 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ '--cluster',
+ metavar='<cluster>',
+ help=_('Name of block storage cluster in which cleanup needs '
+ 'to be performed (name only)')
+ )
+ parser.add_argument(
+ "--host",
+ metavar="<host>",
+ default=None,
+ help=_("Host where the service resides. (name only)")
+ )
+ parser.add_argument(
+ '--binary',
+ metavar='<binary>',
+ default=None,
+ help=_("Name of the service binary.")
+ )
+ service_up_parser = parser.add_mutually_exclusive_group()
+ service_up_parser.add_argument(
+ '--up',
+ dest='is_up',
+ action='store_true',
+ default=None,
+ help=_(
+ 'Filter by up status. If this is set, services need to be up.'
+ )
+ )
+ service_up_parser.add_argument(
+ '--down',
+ dest='is_up',
+ action='store_false',
+ help=_(
+ 'Filter by down status. If this is set, services need to be '
+ 'down.'
+ )
+ )
+ service_disabled_parser = parser.add_mutually_exclusive_group()
+ service_disabled_parser.add_argument(
+ '--disabled',
+ dest='disabled',
+ action='store_true',
+ default=None,
+ help=_('Filter by disabled status.')
+ )
+ service_disabled_parser.add_argument(
+ '--enabled',
+ dest='disabled',
+ action='store_false',
+ help=_('Filter by enabled status.')
+ )
+ parser.add_argument(
+ '--resource-id',
+ metavar='<resource-id>',
+ default=None,
+ help=_('UUID of a resource to cleanup.')
+ )
+ parser.add_argument(
+ '--resource-type',
+ metavar='<Volume|Snapshot>',
+ choices=('Volume', 'Snapshot'),
+ help=_('Type of resource to cleanup.')
+ )
+ parser.add_argument(
+ '--service-id',
+ type=int,
+ default=None,
+ help=_(
+ 'The service ID field from the DB, not the UUID of the '
+ 'service.'
+ )
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.24'):
+ msg = _(
+ "--os-volume-api-version 3.24 or greater is required to "
+ "support the 'block storage cleanup' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ filters = {
+ 'cluster_name': parsed_args.cluster,
+ 'host': parsed_args.host,
+ 'binary': parsed_args.binary,
+ 'is_up': parsed_args.is_up,
+ 'disabled': parsed_args.disabled,
+ 'resource_id': parsed_args.resource_id,
+ 'resource_type': parsed_args.resource_type,
+ 'service_id': parsed_args.service_id
+ }
+
+ filters = {k: v for k, v in filters.items() if v is not None}
+ cleaning, unavailable = volume_client.workers.clean(**filters)
+ return _format_cleanup_response(cleaning, unavailable)
diff --git a/openstackclient/volume/v3/block_storage_cluster.py b/openstackclient/volume/v3/block_storage_cluster.py
new file mode 100644
index 00000000..34b25efc
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_cluster.py
@@ -0,0 +1,281 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+
+def _format_cluster(cluster, detailed=False):
+ columns = (
+ 'name',
+ 'binary',
+ 'state',
+ 'status',
+ )
+ column_headers = (
+ 'Name',
+ 'Binary',
+ 'State',
+ 'Status',
+ )
+
+ if detailed:
+ columns += (
+ 'disabled_reason',
+ 'num_hosts',
+ 'num_down_hosts',
+ 'last_heartbeat',
+ 'created_at',
+ 'updated_at',
+ # optional columns, depending on whether replication is enabled
+ 'replication_status',
+ 'frozen',
+ 'active_backend_id',
+ )
+ column_headers += (
+ 'Disabled Reason',
+ 'Hosts',
+ 'Down Hosts',
+ 'Last Heartbeat',
+ 'Created At',
+ 'Updated At',
+ # optional columns, depending on whether replication is enabled
+ 'Replication Status',
+ 'Frozen',
+ 'Active Backend ID',
+ )
+
+ return (
+ column_headers,
+ utils.get_item_properties(
+ cluster,
+ columns,
+ ),
+ )
+
+
+class ListBlockStorageCluster(command.Lister):
+ """List block storage clusters.
+
+ This command requires ``--os-volume-api-version`` 3.7 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ '--cluster', metavar='<name>', default=None,
+ help=_(
+ 'Filter by cluster name, without backend will list '
+ 'all clustered services from the same cluster.'
+ ),
+ )
+ parser.add_argument(
+ '--binary',
+ metavar='<binary>',
+ help=_('Cluster binary.'),
+ )
+ parser.add_argument(
+ '--up',
+ action='store_true',
+ dest='is_up',
+ default=None,
+ help=_('Filter by up status.'),
+ )
+ parser.add_argument(
+ '--down',
+ action='store_false',
+ dest='is_up',
+ help=_('Filter by down status.'),
+ )
+ parser.add_argument(
+ '--disabled',
+ action='store_true',
+ dest='is_disabled',
+ default=None,
+ help=_('Filter by disabled status.'),
+ )
+ parser.add_argument(
+ '--enabled',
+ action='store_false',
+ dest='is_disabled',
+ help=_('Filter by enabled status.'),
+ )
+ parser.add_argument(
+ '--num-hosts',
+ metavar='<hosts>',
+ type=int,
+ default=None,
+ help=_('Filter by number of hosts in the cluster.'),
+ )
+ parser.add_argument(
+ '--num-down-hosts',
+ metavar='<hosts>',
+ type=int,
+ default=None,
+ help=_('Filter by number of hosts that are down.'),
+ )
+ parser.add_argument(
+ '--long',
+ action='store_true',
+ default=False,
+ help=_("List additional fields in output")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.7'):
+ msg = _(
+ "--os-volume-api-version 3.7 or greater is required to "
+ "support the 'block storage cluster list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ columns = ('Name', 'Binary', 'State', 'Status')
+ if parsed_args.long:
+ columns += (
+ 'Num Hosts',
+ 'Num Down Hosts',
+ 'Last Heartbeat',
+ 'Disabled Reason',
+ 'Created At',
+ 'Updated At',
+ )
+
+ data = volume_client.clusters.list(
+ name=parsed_args.cluster,
+ binary=parsed_args.binary,
+ is_up=parsed_args.is_up,
+ disabled=parsed_args.is_disabled,
+ num_hosts=parsed_args.num_hosts,
+ num_down_hosts=parsed_args.num_down_hosts,
+ detailed=parsed_args.long,
+ )
+
+ return (
+ columns,
+ (utils.get_item_properties(s, columns) for s in data),
+ )
+
+
+class SetBlockStorageCluster(command.Command):
+ """Set block storage cluster properties.
+
+ This command requires ``--os-volume-api-version`` 3.7 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'cluster',
+ metavar='<cluster>',
+ help=_('Name of block storage cluster to update (name only)')
+ )
+ parser.add_argument(
+ '--binary',
+ metavar='<binary>',
+ default='cinder-volume',
+ help=_(
+ "Name of binary to filter by; defaults to 'cinder-volume' "
+ "(optional)"
+ )
+ )
+ enabled_group = parser.add_mutually_exclusive_group()
+ enabled_group.add_argument(
+ '--enable',
+ action='store_false',
+ dest='disabled',
+ default=None,
+ help=_('Enable cluster')
+ )
+ enabled_group.add_argument(
+ '--disable',
+ action='store_true',
+ dest='disabled',
+ help=_('Disable cluster')
+ )
+ parser.add_argument(
+ '--disable-reason',
+ metavar='<reason>',
+ dest='disabled_reason',
+ help=_(
+ 'Reason for disabling the cluster '
+ '(should be used with --disable option)'
+ )
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.7'):
+ msg = _(
+ "--os-volume-api-version 3.7 or greater is required to "
+ "support the 'block storage cluster set' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.disabled_reason and not parsed_args.disabled:
+ msg = _("Cannot specify --disable-reason without --disable")
+ raise exceptions.CommandError(msg)
+
+ cluster = volume_client.clusters.update(
+ parsed_args.cluster,
+ parsed_args.binary,
+ disabled=parsed_args.disabled,
+ disabled_reason=parsed_args.disabled_reason,
+ )
+
+ return _format_cluster(cluster, detailed=True)
+
+
+class ShowBlockStorageCluster(command.ShowOne):
+ """Show detailed information for a block storage cluster.
+
+ This command requires ``--os-volume-api-version`` 3.7 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'cluster',
+ metavar='<cluster>',
+ help=_('Name of block storage cluster.'),
+ )
+ parser.add_argument(
+ '--binary',
+ metavar='<binary>',
+ help=_('Service binary.'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.7'):
+ msg = _(
+ "--os-volume-api-version 3.7 or greater is required to "
+ "support the 'block storage cluster show' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ cluster = volume_client.clusters.show(
+ parsed_args.cluster,
+ binary=parsed_args.binary,
+ )
+
+ return _format_cluster(cluster, detailed=True)
diff --git a/openstackclient/volume/v3/block_storage_log_level.py b/openstackclient/volume/v3/block_storage_log_level.py
new file mode 100644
index 00000000..d5286cdd
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_log_level.py
@@ -0,0 +1,147 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Block Storage Service action implementations"""
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+
+class BlockStorageLogLevelList(command.Lister):
+ """List log levels of block storage service.
+
+ Supported by --os-volume-api-version 3.32 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "--host",
+ metavar="<host>",
+ default="",
+ help=_("List block storage service log level of specified host "
+ "(name only)")
+ )
+ parser.add_argument(
+ "--service",
+ metavar="<service>",
+ default="",
+ choices=(
+ '',
+ '*',
+ 'cinder-api',
+ 'cinder-volume',
+ 'cinder-scheduler',
+ 'cinder-backup'),
+ help=_("List block storage service log level of the specified "
+ "service (name only)")
+ )
+ parser.add_argument(
+ "--log-prefix",
+ metavar="<log-prefix>",
+ default="",
+ help="Prefix for the log, e.g. 'sqlalchemy'"
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ service_client = self.app.client_manager.volume
+ columns = [
+ "Binary",
+ "Host",
+ "Prefix",
+ "Level",
+ ]
+
+ if service_client.api_version < api_versions.APIVersion('3.32'):
+ msg = _(
+ "--os-volume-api-version 3.32 or greater is required to "
+ "support the 'block storage log level list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ data = service_client.services.get_log_levels(
+ binary=parsed_args.service,
+ server=parsed_args.host,
+ prefix=parsed_args.log_prefix)
+
+ return (columns,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))
+
+
+class BlockStorageLogLevelSet(command.Command):
+ """Set log level of block storage service
+
+ Supported by --os-volume-api-version 3.32 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ "level",
+ metavar="<log-level>",
+ choices=('INFO', 'WARNING', 'ERROR', 'DEBUG'),
+ type=str.upper,
+ help=_("Desired log level.")
+ )
+ parser.add_argument(
+ "--host",
+ metavar="<host>",
+ default="",
+ help=_("Set block storage service log level of specified host "
+ "(name only)")
+ )
+ parser.add_argument(
+ "--service",
+ metavar="<service>",
+ default="",
+ choices=(
+ '',
+ '*',
+ 'cinder-api',
+ 'cinder-volume',
+ 'cinder-scheduler',
+ 'cinder-backup'),
+ help=_("Set block storage service log level of specified service "
+ "(name only)")
+ )
+ parser.add_argument(
+ "--log-prefix",
+ metavar="<log-prefix>",
+ default="",
+ help="Prefix for the log, e.g. 'sqlalchemy'"
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ service_client = self.app.client_manager.volume
+
+ if service_client.api_version < api_versions.APIVersion('3.32'):
+ msg = _(
+ "--os-volume-api-version 3.32 or greater is required to "
+ "support the 'block storage log level set' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ service_client.services.set_log_levels(
+ level=parsed_args.level,
+ binary=parsed_args.service,
+ server=parsed_args.host,
+ prefix=parsed_args.log_prefix)
diff --git a/openstackclient/volume/v3/block_storage_manage.py b/openstackclient/volume/v3/block_storage_manage.py
new file mode 100644
index 00000000..9015f44d
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_manage.py
@@ -0,0 +1,258 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Block Storage Volume/Snapshot Management implementations"""
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+from oslo_utils import strutils
+
+from openstackclient.i18n import _
+
+
+SORT_MANAGEABLE_KEY_VALUES = ('size', 'reference')
+
+
+class BlockStorageManageVolumes(command.Lister):
+ """List manageable volumes.
+
+ Supported by --os-volume-api-version 3.8 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ host_group = parser.add_mutually_exclusive_group()
+ host_group.add_argument(
+ "host",
+ metavar="<host>",
+ nargs='?',
+ help=_('Cinder host on which to list manageable volumes. '
+ 'Takes the form: host@backend-name#pool')
+ )
+ host_group.add_argument(
+ "--cluster",
+ metavar="<cluster>",
+ help=_('Cinder cluster on which to list manageable volumes. '
+ 'Takes the form: cluster@backend-name#pool. '
+ '(supported by --os-volume-api-version 3.17 or later)')
+ )
+ parser.add_argument(
+ '--detailed',
+ metavar='<detailed>',
+ default=True,
+ help=_('Returns detailed information (Default=True).')
+ )
+ parser.add_argument(
+ '--marker',
+ metavar='<marker>',
+ default=None,
+ help=_('Begin returning volumes that appear later in the volume '
+ 'list than that represented by this reference. This '
+ 'reference should be json like. Default=None.')
+ )
+ parser.add_argument(
+ '--limit',
+ metavar='<limit>',
+ default=None,
+ help=_('Maximum number of volumes to return. Default=None.')
+ )
+ parser.add_argument(
+ '--offset',
+ metavar='<offset>',
+ default=None,
+ help=_('Number of volumes to skip after marker. Default=None.')
+ )
+ parser.add_argument(
+ '--sort',
+ metavar='<key>[:<direction>]',
+ default=None,
+ help=(_('Comma-separated list of sort keys and directions in the '
+ 'form of <key>[:<asc|desc>]. '
+ 'Valid keys: %s. '
+ 'Default=None.') % ', '.join(SORT_MANAGEABLE_KEY_VALUES))
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if parsed_args.host is None and parsed_args.cluster is None:
+ msg = _(
+ "Either <host> or '--cluster <cluster>' needs to be provided "
+ "to run the 'block storage volume manageable list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if volume_client.api_version < api_versions.APIVersion('3.8'):
+ msg = _(
+ "--os-volume-api-version 3.8 or greater is required to "
+ "support the 'block storage volume manageable list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.cluster:
+ if volume_client.api_version < api_versions.APIVersion('3.17'):
+ msg = _(
+ "--os-volume-api-version 3.17 or greater is required to "
+ "support the '--cluster' option"
+ )
+ raise exceptions.CommandError(msg)
+
+ detailed = strutils.bool_from_string(parsed_args.detailed)
+ cluster = getattr(parsed_args, 'cluster', None)
+
+ columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ ]
+ if detailed:
+ columns.extend([
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ])
+
+ data = volume_client.volumes.list_manageable(
+ host=parsed_args.host,
+ detailed=detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=cluster)
+
+ return (columns,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))
+
+
+class BlockStorageManageSnapshots(command.Lister):
+ """List manageable snapshots.
+
+ Supported by --os-volume-api-version 3.8 or greater.
+ """
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ host_group = parser.add_mutually_exclusive_group()
+ host_group.add_argument(
+ "host",
+ metavar="<host>",
+ nargs='?',
+ help=_('Cinder host on which to list manageable snapshots. '
+ 'Takes the form: host@backend-name#pool')
+ )
+ host_group.add_argument(
+ "--cluster",
+ metavar="<cluster>",
+ help=_('Cinder cluster on which to list manageable snapshots. '
+ 'Takes the form: cluster@backend-name#pool. '
+ '(supported by --os-volume-api-version 3.17 or later)')
+ )
+ parser.add_argument(
+ '--detailed',
+ metavar='<detailed>',
+ default=True,
+ help=_('Returns detailed information (Default=True).')
+ )
+ parser.add_argument(
+ '--marker',
+ metavar='<marker>',
+ default=None,
+ help=_('Begin returning snapshots that appear later in the '
+ 'snapshot list than that represented by this reference. '
+ 'This reference should be json like. Default=None.')
+ )
+ parser.add_argument(
+ '--limit',
+ metavar='<limit>',
+ default=None,
+ help=_('Maximum number of snapshots to return. Default=None.')
+ )
+ parser.add_argument(
+ '--offset',
+ metavar='<offset>',
+ default=None,
+ help=_('Number of snapshots to skip after marker. Default=None.')
+ )
+ parser.add_argument(
+ '--sort',
+ metavar='<key>[:<direction>]',
+ default=None,
+ help=(_('Comma-separated list of sort keys and directions in the '
+ 'form of <key>[:<asc|desc>]. '
+ 'Valid keys: %s. '
+ 'Default=None.') % ', '.join(SORT_MANAGEABLE_KEY_VALUES))
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if parsed_args.host is None and parsed_args.cluster is None:
+ msg = _(
+ "Either <host> or '--cluster <cluster>' needs to be provided "
+ "to run the 'block storage volume snapshot manageable list' "
+ "command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if volume_client.api_version < api_versions.APIVersion('3.8'):
+ msg = _(
+ "--os-volume-api-version 3.8 or greater is required to "
+ "support the 'block storage volume snapshot manageable list' "
+ "command"
+ )
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.cluster:
+ if volume_client.api_version < api_versions.APIVersion('3.17'):
+ msg = _(
+ "--os-volume-api-version 3.17 or greater is required to "
+ "support the '--cluster' option"
+ )
+ raise exceptions.CommandError(msg)
+
+ detailed = strutils.bool_from_string(parsed_args.detailed)
+ cluster = getattr(parsed_args, 'cluster', None)
+
+ columns = [
+ 'reference',
+ 'size',
+ 'safe_to_manage',
+ 'source_reference',
+ ]
+ if detailed:
+ columns.extend([
+ 'reason_not_safe',
+ 'cinder_id',
+ 'extra_info',
+ ])
+
+ data = volume_client.volume_snapshots.list_manageable(
+ host=parsed_args.host,
+ detailed=detailed,
+ marker=parsed_args.marker,
+ limit=parsed_args.limit,
+ offset=parsed_args.offset,
+ sort=parsed_args.sort,
+ cluster=cluster)
+
+ return (columns,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))
diff --git a/openstackclient/volume/v3/block_storage_resource_filter.py b/openstackclient/volume/v3/block_storage_resource_filter.py
new file mode 100644
index 00000000..4bcacf90
--- /dev/null
+++ b/openstackclient/volume/v3/block_storage_resource_filter.py
@@ -0,0 +1,83 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Volume V3 Resource Filters implementations"""
+
+from cinderclient import api_versions
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+
+class ListBlockStorageResourceFilter(command.Lister):
+ _description = _('List block storage resource filters')
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.33'):
+ msg = _(
+ "--os-volume-api-version 3.33 or greater is required to "
+ "support the 'block storage resource filter list' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ column_headers = (
+ 'Resource',
+ 'Filters',
+ )
+
+ data = volume_client.resource_filters.list()
+
+ return (
+ column_headers,
+ (utils.get_item_properties(s, column_headers) for s in data)
+ )
+
+
+class ShowBlockStorageResourceFilter(command.ShowOne):
+ _description = _('Show filters for a block storage resource type')
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'resource',
+ metavar='<resource>',
+ help=_('Resource to show filters for (name).')
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.33'):
+ msg = _(
+ "--os-volume-api-version 3.33 or greater is required to "
+ "support the 'block storage resource filter show' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ data = volume_client.resource_filters.list(
+ resource=parsed_args.resource
+ )
+ if not data:
+ msg = _(
+ "No resource filter with a name of {parsed_args.resource}' "
+ "exists."
+ )
+ raise exceptions.CommandError(msg)
+ resource_filter = next(data)
+
+ return zip(*sorted(resource_filter._info.items()))
diff --git a/openstackclient/volume/v3/volume.py b/openstackclient/volume/v3/volume.py
new file mode 100644
index 00000000..4b159688
--- /dev/null
+++ b/openstackclient/volume/v3/volume.py
@@ -0,0 +1,114 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Volume V3 Volume action implementations"""
+
+import logging
+
+from cinderclient import api_versions
+from osc_lib.cli import format_columns
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+
+LOG = logging.getLogger(__name__)
+
+
+class VolumeSummary(command.ShowOne):
+ _description = _("Show a summary of all volumes in this deployment.")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ '--all-projects',
+ action='store_true',
+ default=False,
+ help=_('Include all projects (admin only)'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.12'):
+ msg = _(
+ "--os-volume-api-version 3.12 or greater is required to "
+ "support the 'volume summary' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ columns = [
+ 'total_count',
+ 'total_size',
+ ]
+ column_headers = [
+ 'Total Count',
+ 'Total Size',
+ ]
+ if volume_client.api_version.matches('3.36'):
+ columns.append('metadata')
+ column_headers.append('Metadata')
+
+ # set value of 'all_tenants' when using project option
+ all_projects = parsed_args.all_projects
+
+ vol_summary = volume_client.volumes.summary(
+ all_tenants=all_projects,
+ )
+
+ return (
+ column_headers,
+ utils.get_dict_properties(
+ vol_summary['volume-summary'],
+ columns,
+ formatters={'metadata': format_columns.DictColumn},
+ ),
+ )
+
+
+class VolumeRevertToSnapshot(command.Command):
+ _description = _("Revert a volume to a snapshot.")
+
+ def get_parser(self, prog_name):
+ parser = super().get_parser(prog_name)
+ parser.add_argument(
+ 'snapshot',
+ metavar="<snapshot>",
+ help=_('Name or ID of the snapshot to restore. The snapshot must '
+ 'be the most recent one known to cinder.'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+
+ volume_client = self.app.client_manager.volume
+
+ if volume_client.api_version < api_versions.APIVersion('3.40'):
+ msg = _(
+ "--os-volume-api-version 3.40 or greater is required to "
+ "support the 'volume revert snapshot' command"
+ )
+ raise exceptions.CommandError(msg)
+
+ snapshot = utils.find_resource(
+ volume_client.volume_snapshots, parsed_args.snapshot)
+ volume = utils.find_resource(
+ volume_client.volumes, snapshot.volume_id)
+
+ volume_client.volumes.revert_to_snapshot(
+ volume=volume, snapshot=snapshot)
diff --git a/openstackclient/volume/v3/volume_attachment.py b/openstackclient/volume/v3/volume_attachment.py
index 39a9c37f..57a6da73 100644
--- a/openstackclient/volume/v3/volume_attachment.py
+++ b/openstackclient/volume/v3/volume_attachment.py
@@ -51,18 +51,27 @@ def _format_attachment(attachment):
'Properties',
)
- # TODO(stephenfin): Improve output with the nested connection_info
- # field - cinderclient printed two things but that's equally ugly
- return (
- column_headers,
- utils.get_item_properties(
+ # VolumeAttachmentManager.create returns a dict while everything else
+ # returns a VolumeAttachment object
+ if isinstance(attachment, dict):
+ data = []
+ for column in columns:
+ if column == 'connection_info':
+ data.append(format_columns.DictColumn(attachment[column]))
+ continue
+ data.append(attachment[column])
+ else:
+ data = utils.get_item_properties(
attachment,
columns,
formatters={
'connection_info': format_columns.DictColumn,
},
- ),
- )
+ )
+
+ # TODO(stephenfin): Improve output with the nested connection_info
+ # field - cinderclient printed two things but that's equally ugly
+ return (column_headers, data)
class CreateVolumeAttachment(command.ShowOne):
@@ -73,7 +82,7 @@ class CreateVolumeAttachment(command.ShowOne):
the volume to the server at the hypervisor level. As a result, it should
typically only be used for troubleshooting issues with an existing server
in combination with other tooling. For all other use cases, the 'server
- volume add' command should be preferred.
+ add volume' command should be preferred.
"""
def get_parser(self, prog_name):
diff --git a/openstackclient/volume/v3/volume_group.py b/openstackclient/volume/v3/volume_group.py
index db4e9a94..242ffcd4 100644
--- a/openstackclient/volume/v3/volume_group.py
+++ b/openstackclient/volume/v3/volume_group.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
+import argparse
from cinderclient import api_versions
from osc_lib.command import command
@@ -19,8 +19,6 @@ from osc_lib import utils
from openstackclient.i18n import _
-LOG = logging.getLogger(__name__)
-
def _format_group(group):
columns = (
@@ -82,17 +80,72 @@ class CreateVolumeGroup(command.ShowOne):
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
- parser.add_argument(
- 'volume_group_type',
+ # This is a bit complicated. We accept two patterns: a legacy pattern
+ #
+ # volume group create \
+ # <volume-group-type> <volume-type> [<volume-type>...]
+ #
+ # and the modern approach
+ #
+ # volume group create \
+ # --volume-group-type <volume-group-type>
+ # --volume-type <volume-type>
+ # [--volume-type <volume-type> ...]
+ #
+ # Because argparse doesn't properly support nested exclusive groups, we
+ # use two groups: one to ensure users don't pass <volume-group-type> as
+ # both a positional and an option argument and another to ensure users
+ # don't pass <volume-type> this way. It's a bit weird but it catches
+ # everything we care about.
+ source_parser = parser.add_mutually_exclusive_group()
+ # we use a different name purely so we can issue a deprecation warning
+ source_parser.add_argument(
+ 'volume_group_type_legacy',
metavar='<volume_group_type>',
- help=_('Name or ID of volume group type to use.'),
+ nargs='?',
+ help=argparse.SUPPRESS,
)
- parser.add_argument(
- 'volume_types',
+ volume_types_parser = parser.add_mutually_exclusive_group()
+ # We need to use a separate dest
+ # https://github.com/python/cpython/issues/101990
+ volume_types_parser.add_argument(
+ 'volume_types_legacy',
+ metavar='<volume_type>',
+ nargs='*',
+ default=[],
+ help=argparse.SUPPRESS,
+ )
+ source_parser.add_argument(
+ '--volume-group-type',
+ metavar='<volume_group_type>',
+ help=_('Volume group type to use (name or ID)'),
+ )
+ volume_types_parser.add_argument(
+ '--volume-type',
metavar='<volume_type>',
- nargs='+',
+ dest='volume_types',
+ action='append',
default=[],
- help=_('Name or ID of volume type(s) to use.'),
+ help=_(
+ 'Volume type(s) to use (name or ID) '
+ '(required with --volume-group-type)'
+ ),
+ )
+ source_parser.add_argument(
+ '--source-group',
+ metavar='<source-group>',
+ help=_(
+ 'Existing volume group to use (name or ID) '
+ '(supported by --os-volume-api-version 3.14 or later)'
+ ),
+ )
+ source_parser.add_argument(
+ '--group-snapshot',
+ metavar='<group-snapshot>',
+ help=_(
+ 'Existing group snapshot to use (name or ID) '
+ '(supported by --os-volume-api-version 3.14 or later)'
+ ),
)
parser.add_argument(
'--name',
@@ -107,44 +160,105 @@ class CreateVolumeGroup(command.ShowOne):
parser.add_argument(
'--availability-zone',
metavar='<availability-zone>',
- help=_('Availability zone for volume group.'),
+ help=_(
+ 'Availability zone for volume group. '
+ '(not available if creating group from source)'
+ ),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
- if volume_client.api_version < api_versions.APIVersion('3.13'):
+ if parsed_args.volume_group_type_legacy:
msg = _(
- "--os-volume-api-version 3.13 or greater is required to "
- "support the 'volume group create' command"
+ "Passing volume group type and volume types as positional "
+ "arguments is deprecated. Use the --volume-group-type and "
+ "--volume-type option arguments instead."
)
- raise exceptions.CommandError(msg)
+ self.log.warning(msg)
- volume_group_type = utils.find_resource(
- volume_client.group_types,
- parsed_args.volume_group_type,
- )
+ volume_group_type = parsed_args.volume_group_type or \
+ parsed_args.volume_group_type_legacy
+ volume_types = parsed_args.volume_types[:]
+ volume_types.extend(parsed_args.volume_types_legacy)
- volume_types = []
- for volume_type in parsed_args.volume_types:
- volume_types.append(
- utils.find_resource(
- volume_client.volume_types,
- volume_type,
+ if volume_group_type:
+ if volume_client.api_version < api_versions.APIVersion('3.13'):
+ msg = _(
+ "--os-volume-api-version 3.13 or greater is required to "
+ "support the 'volume group create' command"
)
- )
+ raise exceptions.CommandError(msg)
+ if not volume_types:
+ msg = _(
+ "--volume-types is a required argument when creating a "
+ "group from group type."
+ )
+ raise exceptions.CommandError(msg)
- group = volume_client.groups.create(
- volume_group_type.id,
- ','.join(x.id for x in volume_types),
- parsed_args.name,
- parsed_args.description,
- availability_zone=parsed_args.availability_zone)
+ volume_group_type_id = utils.find_resource(
+ volume_client.group_types,
+ volume_group_type,
+ ).id
+ volume_types_ids = []
+ for volume_type in volume_types:
+ volume_types_ids.append(
+ utils.find_resource(
+ volume_client.volume_types,
+ volume_type,
+ ).id
+ )
- group = volume_client.groups.get(group.id)
+ group = volume_client.groups.create(
+ volume_group_type_id,
+ ','.join(volume_types_ids),
+ parsed_args.name,
+ parsed_args.description,
+ availability_zone=parsed_args.availability_zone,
+ )
- return _format_group(group)
+ group = volume_client.groups.get(group.id)
+ return _format_group(group)
+
+ else:
+ if volume_client.api_version < api_versions.APIVersion('3.14'):
+ msg = _(
+ "--os-volume-api-version 3.14 or greater is required to "
+ "support the 'volume group create "
+ "[--source-group|--group-snapshot]' command"
+ )
+ raise exceptions.CommandError(msg)
+ if (parsed_args.source_group is None and
+ parsed_args.group_snapshot is None):
+ msg = _(
+ "Either --source-group <source_group> or "
+ "'--group-snapshot <group_snapshot>' needs to be "
+ "provided to run the 'volume group create "
+ "[--source-group|--group-snapshot]' command"
+ )
+ raise exceptions.CommandError(msg)
+ if parsed_args.availability_zone:
+ msg = _("'--availability-zone' option will not work "
+ "if creating group from source.")
+ self.log.warning(msg)
+
+ source_group = None
+ if parsed_args.source_group:
+ source_group = utils.find_resource(volume_client.groups,
+ parsed_args.source_group)
+ group_snapshot = None
+ if parsed_args.group_snapshot:
+ group_snapshot = utils.find_resource(
+ volume_client.group_snapshots,
+ parsed_args.group_snapshot)
+ group = volume_client.groups.create_from_src(
+ group_snapshot.id if group_snapshot else None,
+ source_group.id if source_group else None,
+ parsed_args.name,
+ parsed_args.description)
+ group = volume_client.groups.get(group.id)
+ return _format_group(group)
class DeleteVolumeGroup(command.Command):