summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-all4
-rwxr-xr-xbin/nova-api-os-volume46
-rwxr-xr-xbin/nova-manage28
-rwxr-xr-xbin/nova-volume50
-rwxr-xr-xbin/nova-volume-usage-audit81
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json8
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml3
-rw-r--r--doc/source/conf.py6
-rw-r--r--doc/source/devref/architecture.rst7
-rw-r--r--doc/source/devref/index.rst2
-rw-r--r--doc/source/devref/rpc.rst10
-rw-r--r--doc/source/devref/volume.rst52
-rw-r--r--doc/source/devref/xensmvolume.rst88
-rw-r--r--doc/source/man/nova-api-os-volume.rst49
-rw-r--r--doc/source/man/nova-rootwrap.rst4
-rw-r--r--doc/source/man/nova-volume-usage-audit.rst61
-rw-r--r--doc/source/man/nova-volume.rst54
-rw-r--r--etc/nova/rootwrap.d/volume.filters36
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py7
-rw-r--r--nova/api/openstack/compute/contrib/volumetypes.py225
-rw-r--r--nova/api/openstack/volume/__init__.py68
-rw-r--r--nova/api/openstack/volume/contrib/__init__.py39
-rw-r--r--nova/api/openstack/volume/contrib/admin_actions.py129
-rw-r--r--nova/api/openstack/volume/contrib/image_create.py31
-rw-r--r--nova/api/openstack/volume/contrib/types_extra_specs.py149
-rw-r--r--nova/api/openstack/volume/contrib/types_manage.py91
-rw-r--r--nova/api/openstack/volume/contrib/volume_actions.py131
-rw-r--r--nova/api/openstack/volume/extensions.py34
-rw-r--r--nova/api/openstack/volume/snapshots.py185
-rw-r--r--nova/api/openstack/volume/types.py80
-rw-r--r--nova/api/openstack/volume/versions.py83
-rw-r--r--nova/api/openstack/volume/views/__init__.py16
-rw-r--r--nova/api/openstack/volume/views/types.py34
-rw-r--r--nova/api/openstack/volume/views/versions.py36
-rw-r--r--nova/api/openstack/volume/volumes.py364
-rw-r--r--nova/flags.py20
-rw-r--r--nova/network/manager.py2
-rw-r--r--nova/scheduler/driver.py20
-rw-r--r--nova/service.py9
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py14
-rw-r--r--nova/tests/api/ec2/test_cloud.py576
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py1
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_snapshots.py16
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_volume_types.py224
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_volume_types_extra_specs.py198
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_volumes.py15
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py1
-rw-r--r--nova/tests/api/openstack/fakes.py1
-rw-r--r--nova/tests/api/openstack/volume/__init__.py19
-rw-r--r--nova/tests/api/openstack/volume/contrib/__init__.py19
-rw-r--r--nova/tests/api/openstack/volume/contrib/test_admin_actions.py184
-rw-r--r--nova/tests/api/openstack/volume/contrib/test_types_extra_specs.py226
-rw-r--r--nova/tests/api/openstack/volume/contrib/test_types_manage.py122
-rw-r--r--nova/tests/api/openstack/volume/contrib/test_volume_actions.py162
-rw-r--r--nova/tests/api/openstack/volume/extensions/__init__.py15
-rw-r--r--nova/tests/api/openstack/volume/extensions/foxinsocks.py94
-rw-r--r--nova/tests/api/openstack/volume/test_extensions.py155
-rw-r--r--nova/tests/api/openstack/volume/test_router.py112
-rw-r--r--nova/tests/api/openstack/volume/test_snapshots.py299
-rw-r--r--nova/tests/api/openstack/volume/test_types.py194
-rw-r--r--nova/tests/api/openstack/volume/test_volumes.py602
-rw-r--r--nova/tests/compute/test_compute.py60
-rw-r--r--nova/tests/fake_flags.py2
-rw-r--r--nova/tests/fake_volume.py14
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl3
-rw-r--r--nova/tests/integrated/integrated_helpers.py3
-rw-r--r--nova/tests/integrated/test_volumes.py181
-rw-r--r--nova/tests/scheduler/test_scheduler.py70
-rw-r--r--nova/tests/test_cinder.py3
-rw-r--r--nova/tests/test_iscsi.py121
-rw-r--r--nova/tests/test_libvirt.py1
-rw-r--r--nova/tests/test_netapp.py1380
-rw-r--r--nova/tests/test_netapp_nfs.py234
-rw-r--r--nova/tests/test_nexenta.py278
-rw-r--r--nova/tests/test_nfs.py569
-rw-r--r--nova/tests/test_plugin_api_extensions.py1
-rw-r--r--nova/tests/test_quota.py23
-rw-r--r--nova/tests/test_rbd.py161
-rw-r--r--nova/tests/test_solidfire.py208
-rw-r--r--nova/tests/test_storwize_svc.py1376
-rw-r--r--nova/tests/test_volume.py931
-rw-r--r--nova/tests/test_volume_types.py167
-rw-r--r--nova/tests/test_volume_types_extra_specs.py130
-rw-r--r--nova/tests/test_volume_utils.py91
-rw-r--r--nova/tests/test_xenapi.py2
-rw-r--r--nova/tests/test_xensm.py140
-rw-r--r--nova/tests/volume/test_HpSanISCSIDriver.py209
-rw-r--r--nova/volume/api.py511
-rw-r--r--nova/volume/manager.py452
-rw-r--r--nova/volume/netapp.py1291
-rw-r--r--nova/volume/netapp_nfs.py267
-rw-r--r--nova/volume/nexenta/__init__.py33
-rw-r--r--nova/volume/nexenta/jsonrpc.py84
-rw-r--r--nova/volume/nexenta/volume.py282
-rw-r--r--nova/volume/nfs.py293
-rw-r--r--nova/volume/san.py651
-rw-r--r--nova/volume/solidfire.py424
-rw-r--r--nova/volume/storwize_svc.py1233
-rw-r--r--nova/volume/utils.py84
-rw-r--r--nova/volume/volume_types.py125
-rw-r--r--nova/volume/xensm.py250
-rw-r--r--setup.py3
103 files changed, 110 insertions, 17830 deletions
diff --git a/bin/nova-all b/bin/nova-all
index 1d7b2dfaca..2bbc27c821 100755
--- a/bin/nova-all
+++ b/bin/nova-all
@@ -70,8 +70,8 @@ if __name__ == '__main__':
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s') % mod.__name__)
- for binary in ['nova-compute', 'nova-volume',
- 'nova-network', 'nova-scheduler', 'nova-cert']:
+ for binary in ['nova-compute', 'nova-network', 'nova-scheduler',
+ 'nova-cert']:
try:
launcher.launch_server(service.Service.create(binary=binary))
except (Exception, SystemExit):
diff --git a/bin/nova-api-os-volume b/bin/nova-api-os-volume
deleted file mode 100755
index 7c368fce93..0000000000
--- a/bin/nova-api-os-volume
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Starter script for Nova OS API."""
-
-import eventlet
-eventlet.monkey_patch(os=False)
-
-import os
-import sys
-
-
-possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
- sys.argv[0]), os.pardir, os.pardir))
-if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
- sys.path.insert(0, possible_topdir)
-
-
-from nova import flags
-from nova.openstack.common import log as logging
-from nova import service
-from nova import utils
-
-if __name__ == '__main__':
- flags.parse_args(sys.argv)
- logging.setup("nova")
- utils.monkey_patch()
- server = service.WSGIService('osapi_volume')
- service.serve(server, workers=server.workers)
- service.wait()
diff --git a/bin/nova-manage b/bin/nova-manage
index c68ff73c6d..9194ff4931 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -777,33 +777,6 @@ class VersionCommands(object):
self.list()
-class VolumeCommands(object):
- """Methods for dealing with a cloud in an odd state"""
-
- @args('--volume', dest='volume_id', metavar='<volume id>',
- help='Volume ID')
- def reattach(self, volume_id):
- """Re-attach a volume that has previously been attached
- to an instance. Typically called after a compute host
- has been rebooted."""
-
- if 'cinder' in FLAGS.volume_api_class:
- print(_("\"nova-manage volume reattach\" only valid "
- "when using nova-volume service"))
- sys.exit(1)
-
- ctxt = context.get_admin_context()
- volume = db.volume_get(ctxt, param2id(volume_id))
-
- if not volume['instance_id']:
- print _("volume is not attached to an instance")
- return
- instance = db.instance_get(ctxt, volume['instance_id'])
- rpcapi = compute_rpcapi.ComputeAPI()
- rpcapi.attach_volume(ctxt, instance, volume['id'],
- volume['mountpoint'])
-
-
class InstanceTypeCommands(object):
"""Class for managing instance types / flavors."""
@@ -1207,7 +1180,6 @@ CATEGORIES = [
('sm', StorageManagerCommands),
('version', VersionCommands),
('vm', VmCommands),
- ('volume', VolumeCommands),
('vpn', VpnCommands),
]
diff --git a/bin/nova-volume b/bin/nova-volume
deleted file mode 100755
index 602b332b7c..0000000000
--- a/bin/nova-volume
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Starter script for Nova Volume."""
-
-import eventlet
-eventlet.monkey_patch()
-
-import os
-import sys
-
-# If ../nova/__init__.py exists, add ../ to Python search path, so that
-# it will override what happens to be installed in /usr/(local/)lib/python...
-possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
- os.pardir,
- os.pardir))
-if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
- sys.path.insert(0, possible_topdir)
-
-
-from nova import flags
-from nova.openstack.common import log as logging
-from nova import service
-from nova import utils
-
-if __name__ == '__main__':
- flags.parse_args(sys.argv)
- FLAGS = flags.FLAGS
- logging.setup("nova")
- utils.monkey_patch()
- server = service.Service.create(binary='nova-volume',
- topic=FLAGS.volume_topic)
- service.serve(server)
- service.wait()
diff --git a/bin/nova-volume-usage-audit b/bin/nova-volume-usage-audit
deleted file mode 100755
index 2f7f54a412..0000000000
--- a/bin/nova-volume-usage-audit
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Cron script to generate usage notifications for volumes existing during
- the audit period.
-
- Together with the notifications generated by volumes
- create/delete/resize, over that time period, this allows an external
- system consuming usage notification feeds to calculate volume usage
- for each tenant.
-
- Time periods are specified as 'hour', 'month', 'day' or 'year'
-
- hour = previous hour. If run at 9:07am, will generate usage for 8-9am.
- month = previous month. If the script is run April 1, it will generate
- usages for March 1 through March 31.
- day = previous day. if run on July 4th, it generates usages for July 3rd.
- year = previous year. If run on Jan 1, it generates usages for
- Jan 1 through Dec 31 of the previous year.
-"""
-
-import datetime
-import gettext
-import os
-import sys
-import time
-import traceback
-
-# If ../nova/__init__.py exists, add ../ to Python search path, so that
-# it will override what happens to be installed in /usr/(local/)lib/python...
-POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
- os.pardir,
- os.pardir))
-if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
- sys.path.insert(0, POSSIBLE_TOPDIR)
-
-gettext.install('nova', unicode=1)
-from nova import context
-from nova import db
-from nova import exception
-from nova import flags
-from nova.openstack.common import log as logging
-from nova.openstack.common import rpc
-from nova import utils
-from nova.volume import utils as volume_utils
-
-FLAGS = flags.FLAGS
-
-if __name__ == '__main__':
- admin_context = context.get_admin_context()
- flags.FLAGS(sys.argv)
- logging.setup("nova")
- begin, end = utils.last_completed_audit_period()
- print _("Starting volume usage audit")
- print _("Creating usages for %s until %s") % (str(begin), str(end))
- volumes = db.volume_get_active_by_window(admin_context,
- begin,
- end)
- print _("Found %d volumes") % len(volumes)
- for volume_ref in volumes:
- try:
- volume_utils.notify_usage_exists(
- admin_context, volume_ref)
- except Exception, e:
- print traceback.format_exc(e)
- print _("Volume usage audit completed")
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index e459340d57..f4dfa06838 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -329,14 +329,6 @@
"updated": "2011-08-17T00:00:00+00:00"
},
{
- "alias": "os-volume-types",
- "description": "Volume types support",
- "links": [],
- "name": "VolumeTypes",
- "namespace": "http://docs.openstack.org/compute/ext/volume_types/api/v1.1",
- "updated": "2011-08-24T00:00:00+00:00"
- },
- {
"alias": "os-volumes",
"description": "Volumes support",
"links": [],
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index 0ecb97c6f4..de18e5a2b6 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -137,9 +137,6 @@
<extension alias="os-virtual-interfaces" updated="2011-08-17T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/virtual_interfaces/api/v1.1" name="VirtualInterfaces">
<description>Virtual interface support</description>
</extension>
- <extension alias="os-volume-types" updated="2011-08-24T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/volume_types/api/v1.1" name="VolumeTypes">
- <description>Volume types support</description>
- </extension>
<extension alias="os-volumes" updated="2011-03-25T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/volumes/api/v1.1" name="Volumes">
<description>Volumes support</description>
</extension>
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 7f77cc5d31..b52bcad0d3 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -127,8 +127,6 @@ man_pages = [
[u'OpenStack'], 1),
('man/nova-api-os-compute', 'nova-api-os-compute',
u'Cloud controller fabric', [u'OpenStack'], 1),
- ('man/nova-api-os-volume', 'nova-api-os-volume',
- u'Cloud controller fabric', [u'OpenStack'], 1),
('man/nova-api', 'nova-api', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-cert', 'nova-cert', u'Cloud controller fabric',
@@ -155,10 +153,6 @@ man_pages = [
[u'OpenStack'], 1),
('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
[u'OpenStack'], 1),
- ('man/nova-volume-usage-audit', 'nova-volume-usage-audit', u'Cloud controller fabric',
- [u'OpenStack'], 1),
- ('man/nova-volume', 'nova-volume', u'Cloud controller fabric',
- [u'OpenStack'], 1),
('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
[u'OpenStack'], 1)
]
diff --git a/doc/source/devref/architecture.rst b/doc/source/devref/architecture.rst
index db5947a3e8..3c4a5ca48b 100644
--- a/doc/source/devref/architecture.rst
+++ b/doc/source/devref/architecture.rst
@@ -33,8 +33,8 @@ Below you will find a helpful explanation of the different components.
[ Auth Manager ] ---
| \- ( DB )
|
- | [ scheduler ] - [ volume ] - ( iSCSI )
- | /
+ |
+ |
[ Web Dashboard ] -> [ api ] -- < AMQP > ------ [ network ] - ( Flat/Vlan )
| \
< HTTP > [ scheduler ] - [ compute ] - ( libvirt/xen )
@@ -46,7 +46,6 @@ Below you will find a helpful explanation of the different components.
* api: component that receives http requests, converts commands and communicates with other components via the queue or http (in the case of objectstore)
* Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system.
* objectstore: http server that replicates s3 api and allows storage and retrieval of images
-* scheduler: decides which host gets each vm and volume
-* volume: manages dynamically attachable block devices.
+* scheduler: decides which host gets each vm
* network: manages ip forwarding, bridges, and vlans
* compute: manages communication with hypervisor and virtual machines.
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index a04bc83caa..239848c62c 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -67,8 +67,6 @@ Module Reference
services
database
- volume
- xensmvolume
compute
network
api
diff --git a/doc/source/devref/rpc.rst b/doc/source/devref/rpc.rst
index 8ed901a6b1..9d97c308ad 100644
--- a/doc/source/devref/rpc.rst
+++ b/doc/source/devref/rpc.rst
@@ -30,12 +30,12 @@ Nova uses direct, fanout, and topic-based exchanges. The architecture looks like
..
-Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Nova service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only.
+Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Nova service (for example Compute, Scheduler, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only.
Nova RPC Mappings
-----------------
-The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Nova component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute, Volume or Network). Invokers and Workers do not actually exist in the Nova object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rcp.call operations.
+The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Nova component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute or Network). Invokers and Workers do not actually exist in the Nova object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rcp.call operations.
Figure 2 shows the following internal elements:
@@ -97,10 +97,8 @@ The figure below shows the status of a RabbitMQ node after Nova components' boot
2. compute
3. network.phantom (phantom is hostname)
4. network
- 5. volume.phantom (phantom is hostname)
- 6. volume
- 7. scheduler.phantom (phantom is hostname)
- 8. scheduler
+ 5. scheduler.phantom (phantom is hostname)
+ 6. scheduler
.. image:: /images/rpc/state.png
:width: 60%
diff --git a/doc/source/devref/volume.rst b/doc/source/devref/volume.rst
deleted file mode 100644
index c9e0387ffe..0000000000
--- a/doc/source/devref/volume.rst
+++ /dev/null
@@ -1,52 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Storage Volumes, Disks
-======================
-
-.. todo:: rework after iSCSI merge (see 'Old Docs') (todd or vish)
-
-
-The :mod:`nova.volume.manager` Module
--------------------------------------
-
-.. automodule:: nova.volume.manager
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`nova.volume.driver` Module
--------------------------------------
-
-.. automodule:: nova.volume.driver
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-Tests
------
-
-The :mod:`volume_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.volume_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/devref/xensmvolume.rst b/doc/source/devref/xensmvolume.rst
deleted file mode 100644
index 883cdd9310..0000000000
--- a/doc/source/devref/xensmvolume.rst
+++ /dev/null
@@ -1,88 +0,0 @@
-Xen Storage Manager Volume Driver
-=================================
-
-The Xen Storage Manager (xensm) driver for Nova-Volume is based on XenAPI Storage Manager. This will not only provide basic storage functionality (like volume creation, and destruction) on a number of different storage back-ends, such as Netapp, NFS, etc. but it will also enable the capability of using more sophisticated storage back-ends for operations like cloning/snapshotting etc. To have an idea of the benefits of using XenAPI SM to provide back-end storage services, the list below shows some of the storage plugins already supported in XenServer/XCP:
-
-- NFS VHD: SR plugin which stores disks as VHD files on a remote NFS filesystem
-- Local VHD on LVM: SR plugin which represents disks as VHD disks on Logical Volumes within a locally-attached Volume Group
-- HBA LUN-per-VDI driver: SR plugin which represents LUNs as VDIs sourced by hardware HBA adapters, e.g. hardware-based iSCSI or FC support
-- NetApp: SR driver for mapping of LUNs to VDIs on a NETAPP server, providing use of fast snapshot and clone features on the filer
-- LVHD over FC: SR plugin which represents disks as VHDs on Logical Volumes within a Volume Group created on an HBA LUN, e.g. hardware-based iSCSI or FC support
-- iSCSI: Base ISCSI SR driver, provides a LUN-per-VDI. Does not support creation of VDIs but accesses existing LUNs on a target.
-- LVHD over iSCSI: SR plugin which represents disks as Logical Volumes within a Volume Group created on an iSCSI LUN
-- EqualLogic: SR driver for mapping of LUNs to VDIs on an EQUALLOGIC array group, providing use of fast snapshot and clone features on the array
-
-Glossary
-=========
-
- XenServer: Commercial, supported product from Citrix
-
- Xen Cloud Platform (XCP): Open-source equivalent of XenServer (and the development project for the toolstack). Everything said about XenServer below applies equally to XCP
-
- XenAPI: The management API exposed by XenServer and XCP
-
- xapi: The primary daemon on XenServer and Xen Cloud Platform; the one that exposes the XenAPI
-
-
-Design
-=======
-
-Definitions
------------
-
-Backend: A term for a particular storage backend. This could be iSCSI, NFS, Netapp etc.
-Backend-config: All the parameters required to connect to a specific backend. For e.g. For NFS, this would be the server, path, etc.
-Flavor: This term is equivalent to volume "types". A user friendly term to specify some notion of quality of service. For example, "gold" might mean that the volumes will use a backend where backups are possible.
-
-A flavor can be associated with multiple backends. The volume scheduler, with the help of the driver, will decide which backend will be used to create a volume of a particular flavor. Currently, the driver uses a simple "first-fit" policy, where the first backend that can successfully create this volume is the one that is used.
-
-Operation
-----------
-
-Using the nova-manage command detailed in the implementation, an admin can add flavors and backends.
-
-One or more nova-volume service instances will be deployed per availability zone. When an instance is started, it will create storage repositories (SRs) to connect to the backends available within that zone. All nova-volume instances within a zone can see all the available backends. These instances are completely symmetric and hence should be able to service any create_volume request within the zone.
-
-
-Commands
-=========
-
-A category called "sm" has been added to nova-manage in the class StorageManagerCommands.
-
-The following actions will be added:
-
-- flavor_list
-- flavor_create
-- flavor_delete
-- backend_list
-- backend_add
-- backend_remove
-
-Usage:
-------
-
-nova-manage sm flavor_create <label> <description>
-
-nova-manage sm flavor_delete<label>
-
-nova-manage sm backend_add <flavor label> <SR type> [config connection parameters]
-
-Note: SR type and config connection parameters are in keeping with the Xen Command Line Interface. http://support.citrix.com/article/CTX124887
-
-nova-manage sm backend_delete <backend-id>
-
-Examples:
----------
-
-nova-manage sm flavor_create gold "Not all that glitters"
-
-nova-manage sm flavor_delete gold
-
-nova-manage sm backend_add gold nfs name_label=toybox-renuka server=myserver serverpath=/local/scratch/myname
-
-nova-manage sm backend_remove 1
-
-API Changes
-===========
-
-No API changes have been introduced so far. The existing euca-create-volume and euca-delete-volume commands (or equivalent OpenStack API commands) should be used.
diff --git a/doc/source/man/nova-api-os-volume.rst b/doc/source/man/nova-api-os-volume.rst
deleted file mode 100644
index 2d3e009f16..0000000000
--- a/doc/source/man/nova-api-os-volume.rst
+++ /dev/null
@@ -1,49 +0,0 @@
-===================
-nova-api-os-volume
-===================
-
--------------------------------------------
-Server for the Nova OpenStack Volume APIs
--------------------------------------------
-
-:Author: openstack@lists.launchpad.net
-:Date: 2012-09-27
-:Copyright: OpenStack LLC
-:Version: 2012.1
-:Manual section: 1
-:Manual group: cloud computing
-
-SYNOPSIS
-========
-
- nova-api-os-volume [options]
-
-DESCRIPTION
-===========
-
-nova-api-os-volume is a server daemon that serves the Nova OpenStack API
-
-OPTIONS
-=======
-
- **General options**
-
-FILES
-========
-
-* /etc/nova/nova.conf
-* /etc/nova/api-paste.ini
-* /etc/nova/policy.json
-* /etc/nova/rootwrap.conf
-* /etc/nova/rootwrap.d/
-
-SEE ALSO
-========
-
-* `OpenStack Nova <http://nova.openstack.org>`__
-* `OpenStack Nova <http://nova.openstack.org>`__
-
-BUGS
-====
-
-* Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova <http://nova.openstack.org>`__
diff --git a/doc/source/man/nova-rootwrap.rst b/doc/source/man/nova-rootwrap.rst
index e69af588a1..8bc089a359 100644
--- a/doc/source/man/nova-rootwrap.rst
+++ b/doc/source/man/nova-rootwrap.rst
@@ -30,8 +30,8 @@ You also need to let the nova user run nova-rootwrap as root in sudoers:
nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf *
To make allowed commands node-specific, your packaging should only
-install {compute,network,volume}.filters respectively on compute, network
-and volume nodes (i.e. nova-api nodes should not have any of those files
+install {compute,network}.filters respectively on compute and network
+nodes (i.e. nova-api nodes should not have any of those files
installed).
diff --git a/doc/source/man/nova-volume-usage-audit.rst b/doc/source/man/nova-volume-usage-audit.rst
deleted file mode 100644
index 628536a797..0000000000
--- a/doc/source/man/nova-volume-usage-audit.rst
+++ /dev/null
@@ -1,61 +0,0 @@
-=======================
-nova-volume-usage-audit
-=======================
-
--------------------------------------------
-Generate Usage Notifications for Volumes
--------------------------------------------
-
-:Author: openstack@lists.launchpad.net
-:Date: 2012-09-27
-:Copyright: OpenStack LLC
-:Version: 2012.1
-:Manual section: 1
-:Manual group: cloud computing
-
-SYNOPSIS
-========
-
- nova-volume-usage-audit [options]
-
-DESCRIPTION
-===========
-
-Cron script to generate usage notifications for volumes existing during
-the audit period.
-
-Together with the notifications generated by volumes
-create/delete/resize, over that time period, this allows an external
-system consuming usage notification feeds to calculate volume usage
-for each tenant.
-
-Time periods are specified as 'hour', 'month', 'day' or 'year'
-
-hour = previous hour. If run at 9:07am, will generate usage for 8-9am.
-month = previous month. If the script is run April 1, it will generate
- usages for March 1 through March 31.
-day = previous day. if run on July 4th, it generates usages for July 3rd.
-year = previous year. If run on Jan 1, it generates usages for
- Jan 1 through Dec 31 of the previous year.
-
-
-OPTIONS
-=======
-
- **General options**
-
-FILES
-========
-
-* /etc/nova/nova.conf
-
-SEE ALSO
-========
-
-* `OpenStack Nova <http://nova.openstack.org>`__
-* `OpenStack Nova <http://nova.openstack.org>`__
-
-BUGS
-====
-
-* Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova <http://nova.openstack.org>`__
diff --git a/doc/source/man/nova-volume.rst b/doc/source/man/nova-volume.rst
deleted file mode 100644
index 4c695c841d..0000000000
--- a/doc/source/man/nova-volume.rst
+++ /dev/null
@@ -1,54 +0,0 @@
-===========
-nova-volume
-===========
-
--------------------
-Nova Volume Server
--------------------
-
-:Author: openstack@lists.launchpad.net
-:Date: 2012-09-27
-:Copyright: OpenStack LLC
-:Version: 2012.1
-:Manual section: 1
-:Manual group: cloud computing
-
-SYNOPSIS
-========
-
- nova-volume [options]
-
-DESCRIPTION
-===========
-
-nova-volume manages creating, attaching, detaching, and persistent storage.
-
-Persistent storage volumes keep their state independent of instances. You can
-attach to an instance, terminate the instance, spawn a new instance (even
-one from a different image) and re-attach the volume with the same data
-intact.
-
-
-OPTIONS
-=======
-
- **General options**
-
-FILES
-========
-
-* /etc/nova/nova.conf
-* /etc/nova/policy.json
-* /etc/nova/rootwrap.conf
-* /etc/nova/rootwrap.d/
-
-SEE ALSO
-========
-
-* `OpenStack Nova <http://nova.openstack.org>`__
-* `OpenStack Nova <http://nova.openstack.org>`__
-
-BUGS
-====
-
-* Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova <http://nova.openstack.org>`__
diff --git a/etc/nova/rootwrap.d/volume.filters b/etc/nova/rootwrap.d/volume.filters
deleted file mode 100644
index 574fef7c25..0000000000
--- a/etc/nova/rootwrap.d/volume.filters
+++ /dev/null
@@ -1,36 +0,0 @@
-# nova-rootwrap command filters for volume nodes
-# This file should be owned by (and only-writeable by) the root user
-
-[Filters]
-# nova/volume/iscsi.py: iscsi_helper '--op' ...
-ietadm: CommandFilter, /usr/sbin/ietadm, root
-tgtadm: CommandFilter, /usr/sbin/tgtadm, root
-tgt-admin: CommandFilter, /usr/sbin/tgt-admin, root
-
-# nova/volume/driver.py: 'vgs', '--noheadings', '-o', 'name'
-vgs: CommandFilter, /sbin/vgs, root
-
-# nova/volume/driver.py: 'lvcreate', '-L', sizestr, '-n', volume_name,..
-# nova/volume/driver.py: 'lvcreate', '-L', ...
-lvcreate: CommandFilter, /sbin/lvcreate, root
-
-# nova/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...
-dd: CommandFilter, /bin/dd, root
-
-# nova/volume/driver.py: 'lvremove', '-f', %s/%s % ...
-lvremove: CommandFilter, /sbin/lvremove, root
-
-# nova/volume/driver.py: 'lvdisplay', '--noheading', '-C', '-o', 'Attr',..
-lvdisplay: CommandFilter, /sbin/lvdisplay, root
-
-# nova/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...
-# nova/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...
-iscsiadm: CommandFilter, /sbin/iscsiadm, root
-iscsiadm_usr: CommandFilter, /usr/bin/iscsiadm, root
-
-# nova/volume/driver.py
-dmsetup: CommandFilter, /sbin/dmsetup, root
-dmsetup_usr: CommandFilter, /usr/sbin/dmsetup, root
-
-#nova/volume/.py: utils.temporary_chown(path, 0), ...
-chown: CommandFilter, /bin/chown, root
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index 9940e3050e..6eaa51079c 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -29,7 +29,6 @@ from nova import flags
from nova.openstack.common import log as logging
from nova import utils
from nova import volume
-from nova.volume import volume_types
LOG = logging.getLogger(__name__)
@@ -227,12 +226,6 @@ class VolumeController(wsgi.Controller):
vol = body['volume']
vol_type = vol.get('volume_type', None)
- if vol_type:
- try:
- vol_type = volume_types.get_volume_type_by_name(context,
- vol_type)
- except exception.NotFound:
- raise exc.HTTPNotFound()
metadata = vol.get('metadata', None)
diff --git a/nova/api/openstack/compute/contrib/volumetypes.py b/nova/api/openstack/compute/contrib/volumetypes.py
deleted file mode 100644
index 036e3ff427..0000000000
--- a/nova/api/openstack/compute/contrib/volumetypes.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-""" The volume type & volume types extra specs extension"""
-
-from webob import exc
-
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova.api.openstack import xmlutil
-from nova import db
-from nova import exception
-from nova.volume import volume_types
-
-
-authorize = extensions.extension_authorizer('compute', 'volumetypes')
-
-
-def make_voltype(elem):
- elem.set('id')
- elem.set('name')
- extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs')
- elem.append(extra_specs)
-
-
-class VolumeTypeTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('volume_type', selector='volume_type')
- make_voltype(root)
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumeTypesTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('volume_types')
- elem = xmlutil.SubTemplateElement(root, 'volume_type',
- selector='volume_types')
- make_voltype(elem)
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumeTypesController(wsgi.Controller):
- """ The volume types API controller for the OpenStack API """
-
- @wsgi.serializers(xml=VolumeTypesTemplate)
- def index(self, req):
- """ Returns the list of volume types """
- context = req.environ['nova.context']
- authorize(context)
- return {'volume_types': volume_types.get_all_types(context).values()}
-
- @wsgi.serializers(xml=VolumeTypeTemplate)
- def create(self, req, body):
- """Creates a new volume type."""
- context = req.environ['nova.context']
- authorize(context)
-
- if not self.is_valid_body(body, 'volume_type'):
- raise exc.HTTPUnprocessableEntity()
-
- vol_type = body['volume_type']
- name = vol_type.get('name', None)
- specs = vol_type.get('extra_specs', {})
-
- if name is None or name == "":
- raise exc.HTTPUnprocessableEntity()
-
- try:
- volume_types.create(context, name, specs)
- vol_type = volume_types.get_volume_type_by_name(context, name)
- except exception.NotFound:
- raise exc.HTTPNotFound()
-
- return {'volume_type': vol_type}
-
- @wsgi.serializers(xml=VolumeTypeTemplate)
- def show(self, req, id):
- """ Return a single volume type item """
- context = req.environ['nova.context']
- authorize(context)
-
- try:
- vol_type = volume_types.get_volume_type(context, id)
- except exception.NotFound:
- raise exc.HTTPNotFound()
-
- return {'volume_type': vol_type}
-
- def delete(self, req, id):
- """ Deletes an existing volume type """
- context = req.environ['nova.context']
- authorize(context)
-
- try:
- vol_type = volume_types.get_volume_type(context, id)
- volume_types.destroy(context, vol_type['name'])
- except exception.NotFound:
- raise exc.HTTPNotFound()
-
-
-class VolumeTypeExtraSpecsTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.make_flat_dict('extra_specs', selector='extra_specs')
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumeTypeExtraSpecTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- tagname = xmlutil.Selector('key')
-
- def extraspec_sel(obj, do_raise=False):
- # Have to extract the key and value for later use...
- key, value = obj.items()[0]
- return dict(key=key, value=value)
-
- root = xmlutil.TemplateElement(tagname, selector=extraspec_sel)
- root.text = 'value'
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumeTypeExtraSpecsController(object):
- """ The volume type extra specs API controller for the OpenStack API """
-
- def _get_extra_specs(self, context, vol_type_id):
- extra_specs = db.volume_type_extra_specs_get(context, vol_type_id)
- specs_dict = {}
- for key, value in extra_specs.iteritems():
- specs_dict[key] = value
- return dict(extra_specs=specs_dict)
-
- def _check_body(self, body):
- if body is None or body == "":
- expl = _('No Request Body')
- raise exc.HTTPBadRequest(explanation=expl)
-
- @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate)
- def index(self, req, vol_type_id):
- """ Returns the list of extra specs for a given volume type """
- context = req.environ['nova.context']
- authorize(context)
- return self._get_extra_specs(context, vol_type_id)
-
- @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate)
- def create(self, req, vol_type_id, body):
- context = req.environ['nova.context']
- authorize(context)
- self._check_body(body)
- specs = body.get('extra_specs')
- db.volume_type_extra_specs_update_or_create(context,
- vol_type_id,
- specs)
- return body
-
- @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate)
- def update(self, req, vol_type_id, id, body):
- context = req.environ['nova.context']
- authorize(context)
- self._check_body(body)
- if not id in body:
- expl = _('Request body and URI mismatch')
- raise exc.HTTPBadRequest(explanation=expl)
- if len(body) > 1:
- expl = _('Request body contains too many items')
- raise exc.HTTPBadRequest(explanation=expl)
- db.volume_type_extra_specs_update_or_create(context,
- vol_type_id,
- body)
- return body
-
- @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate)
- def show(self, req, vol_type_id, id):
- """ Return a single extra spec item """
- context = req.environ['nova.context']
- authorize(context)
- specs = self._get_extra_specs(context, vol_type_id)
- if id in specs['extra_specs']:
- return {id: specs['extra_specs'][id]}
- else:
- raise exc.HTTPNotFound()
-
- def delete(self, req, vol_type_id, id):
- """ Deletes an existing extra spec """
- context = req.environ['nova.context']
- authorize(context)
- db.volume_type_extra_specs_delete(context, vol_type_id, id)
-
-
-class Volumetypes(extensions.ExtensionDescriptor):
- """Volume types support"""
-
- name = "VolumeTypes"
- alias = "os-volume-types"
- namespace = "http://docs.openstack.org/compute/ext/volume_types/api/v1.1"
- updated = "2011-08-24T00:00:00+00:00"
-
- def get_resources(self):
- resources = []
-
- res = extensions.ResourceExtension(
- 'os-volume-types',
- VolumeTypesController())
- resources.append(res)
-
- res = extensions.ResourceExtension('extra_specs',
- VolumeTypeExtraSpecsController(),
- parent=dict(
- member_name='vol_type',
- collection_name='os-volume-types'))
- resources.append(res)
-
- return resources
diff --git a/nova/api/openstack/volume/__init__.py b/nova/api/openstack/volume/__init__.py
deleted file mode 100644
index 3aca5bebf1..0000000000
--- a/nova/api/openstack/volume/__init__.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-WSGI middleware for OpenStack Volume API.
-"""
-
-import nova.api.openstack
-from nova.api.openstack.volume import extensions
-from nova.api.openstack.volume import snapshots
-from nova.api.openstack.volume import types
-from nova.api.openstack.volume import versions
-from nova.api.openstack.volume import volumes
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-class APIRouter(nova.api.openstack.APIRouter):
- """
- Routes requests on the OpenStack API to the appropriate controller
- and method.
- """
- ExtensionManager = extensions.ExtensionManager
-
- def _setup_routes(self, mapper, ext_mgr, init_only):
- if init_only is None or 'versions' in init_only:
- self.resources['versions'] = versions.create_resource()
- mapper.connect("versions", "/",
- controller=self.resources['versions'],
- action='show')
-
- mapper.redirect("", "/")
-
- if init_only is None or 'volumes' in init_only:
- self.resources['volumes'] = volumes.create_resource(ext_mgr)
- mapper.resource("volume", "volumes",
- controller=self.resources['volumes'],
- collection={'detail': 'GET'},
- member={'action': 'POST'})
-
- if init_only is None or 'types' in init_only:
- self.resources['types'] = types.create_resource()
- mapper.resource("type", "types",
- controller=self.resources['types'])
-
- if init_only is None or 'snapshots' in init_only:
- self.resources['snapshots'] = snapshots.create_resource(ext_mgr)
- mapper.resource("snapshot", "snapshots",
- controller=self.resources['snapshots'],
- collection={'detail': 'GET'},
- member={'action': 'POST'})
diff --git a/nova/api/openstack/volume/contrib/__init__.py b/nova/api/openstack/volume/contrib/__init__.py
deleted file mode 100644
index 8e01d88d0b..0000000000
--- a/nova/api/openstack/volume/contrib/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Contrib contains extensions that are shipped with nova.
-
-It can't be called 'extensions' because that causes namespacing problems.
-
-"""
-
-from nova.api.openstack import extensions
-from nova import flags
-from nova.openstack.common import log as logging
-
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger(__name__)
-
-
-def standard_extensions(ext_mgr):
- extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__)
-
-
-def select_extensions(ext_mgr):
- extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__,
- FLAGS.osapi_volume_ext_list)
diff --git a/nova/api/openstack/volume/contrib/admin_actions.py b/nova/api/openstack/volume/contrib/admin_actions.py
deleted file mode 100644
index 7e93283f75..0000000000
--- a/nova/api/openstack/volume/contrib/admin_actions.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Copyright 2012 OpenStack, LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-from webob import exc
-
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova import db
-from nova import exception
-from nova.openstack.common import log as logging
-from nova import volume
-
-
-LOG = logging.getLogger(__name__)
-
-
-class AdminController(wsgi.Controller):
- """Abstract base class for AdminControllers."""
-
- collection = None # api collection to extend
-
- # FIXME(clayg): this will be hard to keep up-to-date
- # Concrete classes can expand or over-ride
- valid_status = set([
- 'creating',
- 'available',
- 'deleting',
- 'error',
- 'error_deleting',
- ])
-
- def __init__(self, *args, **kwargs):
- super(AdminController, self).__init__(*args, **kwargs)
- # singular name of the resource
- self.resource_name = self.collection.rstrip('s')
- self.volume_api = volume.API()
-
- def _update(self, *args, **kwargs):
- raise NotImplementedError()
-
- def _validate_status(self, status):
- if status not in self.valid_status:
- raise exc.HTTPBadRequest("Must specify a valid status")
-
- def authorize(self, context, action_name):
- # e.g. "snapshot_admin_actions:reset_status"
- action = '%s_admin_actions:%s' % (self.resource_name, action_name)
- extensions.extension_authorizer('volume', action)(context)
-
- @wsgi.action('os-reset_status')
- def _reset_status(self, req, id, body):
- """Reset status on the resource."""
- context = req.environ['nova.context']
- self.authorize(context, 'reset_status')
- try:
- new_status = body['os-reset_status']['status']
- except (TypeError, KeyError):
- raise exc.HTTPBadRequest("Must specify 'status'")
- self._validate_status(new_status)
- msg = _("Updating status of %(resource)s '%(id)s' to '%(status)s'")
- LOG.debug(msg, {'resource': self.resource_name, 'id': id,
- 'status': new_status})
- try:
- self._update(context, id, {'status': new_status})
- except exception.NotFound, e:
- raise exc.HTTPNotFound(e)
- return webob.Response(status_int=202)
-
-
-class VolumeAdminController(AdminController):
- """AdminController for Volumes."""
-
- collection = 'volumes'
- valid_status = AdminController.valid_status.union(
- set(['attaching', 'in-use', 'detaching']))
-
- def _update(self, *args, **kwargs):
- db.volume_update(*args, **kwargs)
-
- @wsgi.action('os-force_delete')
- def _force_delete(self, req, id, body):
- """Delete a resource, bypassing the check that it must be available."""
- context = req.environ['nova.context']
- self.authorize(context, 'force_delete')
- try:
- volume = self.volume_api.get(context, id)
- except exception.NotFound:
- raise exc.HTTPNotFound()
- self.volume_api.delete(context, volume, force=True)
- return webob.Response(status_int=202)
-
-
-class SnapshotAdminController(AdminController):
- """AdminController for Snapshots."""
-
- collection = 'snapshots'
-
- def _update(self, *args, **kwargs):
- db.snapshot_update(*args, **kwargs)
-
-
-class Admin_actions(extensions.ExtensionDescriptor):
- """Enable admin actions."""
-
- name = "AdminActions"
- alias = "os-admin-actions"
- namespace = "http://docs.openstack.org/volume/ext/admin-actions/api/v1.1"
- updated = "2012-08-25T00:00:00+00:00"
-
- def get_controller_extensions(self):
- exts = []
- for class_ in (VolumeAdminController, SnapshotAdminController):
- controller = class_()
- extension = extensions.ControllerExtension(
- self, class_.collection, controller)
- exts.append(extension)
- return exts
diff --git a/nova/api/openstack/volume/contrib/image_create.py b/nova/api/openstack/volume/contrib/image_create.py
deleted file mode 100644
index 8406897998..0000000000
--- a/nova/api/openstack/volume/contrib/image_create.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 NTT.
-# Copyright (c) 2012 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""The Create Volume from Image extension."""
-
-
-from nova.api.openstack import extensions
-
-
-class Image_create(extensions.ExtensionDescriptor):
- """Allow creating a volume from an image in the Create Volume v1 API"""
-
- name = "CreateVolumeExtension"
- alias = "os-image-create"
- namespace = "http://docs.openstack.org/volume/ext/image-create/api/v1"
- updated = "2012-08-13T00:00:00+00:00"
diff --git a/nova/api/openstack/volume/contrib/types_extra_specs.py b/nova/api/openstack/volume/contrib/types_extra_specs.py
deleted file mode 100644
index 2e993ad8a1..0000000000
--- a/nova/api/openstack/volume/contrib/types_extra_specs.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""The volume types extra specs extension"""
-
-import webob
-
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova.api.openstack import xmlutil
-from nova import db
-from nova import exception
-from nova.volume import volume_types
-
-
-authorize = extensions.extension_authorizer('volume', 'types_extra_specs')
-
-
-class VolumeTypeExtraSpecsTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.make_flat_dict('extra_specs', selector='extra_specs')
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumeTypeExtraSpecTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- tagname = xmlutil.Selector('key')
-
- def extraspec_sel(obj, do_raise=False):
- # Have to extract the key and value for later use...
- key, value = obj.items()[0]
- return dict(key=key, value=value)
-
- root = xmlutil.TemplateElement(tagname, selector=extraspec_sel)
- root.text = 'value'
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumeTypeExtraSpecsController(wsgi.Controller):
- """ The volume type extra specs API controller for the OpenStack API """
-
- def _get_extra_specs(self, context, type_id):
- extra_specs = db.volume_type_extra_specs_get(context, type_id)
- specs_dict = {}
- for key, value in extra_specs.iteritems():
- specs_dict[key] = value
- return dict(extra_specs=specs_dict)
-
- def _check_type(self, context, type_id):
- try:
- volume_types.get_volume_type(context, type_id)
- except exception.NotFound as ex:
- raise webob.exc.HTTPNotFound(explanation=unicode(ex))
-
- @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate)
- def index(self, req, type_id):
- """ Returns the list of extra specs for a given volume type """
- context = req.environ['nova.context']
- authorize(context)
- self._check_type(context, type_id)
- return self._get_extra_specs(context, type_id)
-
- @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate)
- def create(self, req, type_id, body=None):
- context = req.environ['nova.context']
- authorize(context)
-
- if not self.is_valid_body(body, 'extra_specs'):
- raise webob.exc.HTTPUnprocessableEntity()
-
- self._check_type(context, type_id)
-
- specs = body['extra_specs']
- db.volume_type_extra_specs_update_or_create(context,
- type_id,
- specs)
- return body
-
- @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate)
- def update(self, req, type_id, id, body=None):
- context = req.environ['nova.context']
- authorize(context)
- if not body:
- raise webob.exc.HTTPUnprocessableEntity()
- self._check_type(context, type_id)
- if not id in body:
- expl = _('Request body and URI mismatch')
- raise webob.exc.HTTPBadRequest(explanation=expl)
- if len(body) > 1:
- expl = _('Request body contains too many items')
- raise webob.exc.HTTPBadRequest(explanation=expl)
- db.volume_type_extra_specs_update_or_create(context,
- type_id,
- body)
- return body
-
- @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate)
- def show(self, req, type_id, id):
- """Return a single extra spec item."""
- context = req.environ['nova.context']
- authorize(context)
- self._check_type(context, type_id)
- specs = self._get_extra_specs(context, type_id)
- if id in specs['extra_specs']:
- return {id: specs['extra_specs'][id]}
- else:
- raise webob.exc.HTTPNotFound()
-
- def delete(self, req, type_id, id):
- """ Deletes an existing extra spec """
- context = req.environ['nova.context']
- self._check_type(context, type_id)
- authorize(context)
- db.volume_type_extra_specs_delete(context, type_id, id)
- return webob.Response(status_int=202)
-
-
-class Types_extra_specs(extensions.ExtensionDescriptor):
- """Types extra specs support"""
-
- name = "TypesExtraSpecs"
- alias = "os-types-extra-specs"
- namespace = "http://docs.openstack.org/volume/ext/types-extra-specs/api/v1"
- updated = "2011-08-24T00:00:00+00:00"
-
- def get_resources(self):
- resources = []
- res = extensions.ResourceExtension('extra_specs',
- VolumeTypeExtraSpecsController(),
- parent=dict(
- member_name='type',
- collection_name='types'))
- resources.append(res)
-
- return resources
diff --git a/nova/api/openstack/volume/contrib/types_manage.py b/nova/api/openstack/volume/contrib/types_manage.py
deleted file mode 100644
index e68093ce8f..0000000000
--- a/nova/api/openstack/volume/contrib/types_manage.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""The volume types manage extension."""
-
-import webob
-
-from nova.api.openstack import extensions
-from nova.api.openstack.volume import types
-from nova.api.openstack.volume.views import types as views_types
-from nova.api.openstack import wsgi
-from nova import exception
-from nova.volume import volume_types
-
-
-authorize = extensions.extension_authorizer('volume', 'types_manage')
-
-
-class VolumeTypesManageController(wsgi.Controller):
- """ The volume types API controller for the OpenStack API """
-
- _view_builder_class = views_types.ViewBuilder
-
- @wsgi.action("create")
- @wsgi.serializers(xml=types.VolumeTypeTemplate)
- def _create(self, req, body):
- """Creates a new volume type."""
- context = req.environ['nova.context']
- authorize(context)
-
- if not self.is_valid_body(body, 'volume_type'):
- raise webob.exc.HTTPUnprocessableEntity()
-
- vol_type = body['volume_type']
- name = vol_type.get('name', None)
- specs = vol_type.get('extra_specs', {})
-
- if name is None or name == "":
- raise webob.exc.HTTPUnprocessableEntity()
-
- try:
- volume_types.create(context, name, specs)
- vol_type = volume_types.get_volume_type_by_name(context, name)
- except exception.VolumeTypeExists as err:
- raise webob.exc.HTTPConflict(explanation=str(err))
- except exception.NotFound:
- raise webob.exc.HTTPNotFound()
-
- return self._view_builder.show(req, vol_type)
-
- @wsgi.action("delete")
- def _delete(self, req, id):
- """ Deletes an existing volume type """
- context = req.environ['nova.context']
- authorize(context)
-
- try:
- vol_type = volume_types.get_volume_type(context, id)
- volume_types.destroy(context, vol_type['name'])
- except exception.NotFound:
- raise webob.exc.HTTPNotFound()
-
- return webob.Response(status_int=202)
-
-
-class Types_manage(extensions.ExtensionDescriptor):
- """Types manage support"""
-
- name = "TypesManage"
- alias = "os-types-manage"
- namespace = "http://docs.openstack.org/volume/ext/types-manage/api/v1"
- updated = "2011-08-24T00:00:00+00:00"
-
- def get_controller_extensions(self):
- controller = VolumeTypesManageController()
- extension = extensions.ControllerExtension(self, 'types', controller)
- return [extension]
diff --git a/nova/api/openstack/volume/contrib/volume_actions.py b/nova/api/openstack/volume/contrib/volume_actions.py
deleted file mode 100644
index 8a453bfb11..0000000000
--- a/nova/api/openstack/volume/contrib/volume_actions.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright 2012 OpenStack, LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-from xml.dom import minidom
-
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova.api.openstack import xmlutil
-from nova import exception
-from nova import flags
-from nova.openstack.common import log as logging
-from nova.openstack.common.rpc import common as rpc_common
-from nova import volume
-
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger(__name__)
-
-
-def authorize(context, action_name):
- action = 'volume_actions:%s' % action_name
- extensions.extension_authorizer('volume', action)(context)
-
-
-class VolumeToImageSerializer(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('os-volume_upload_image',
- selector='os-volume_upload_image')
- root.set('id')
- root.set('updated_at')
- root.set('status')
- root.set('display_description')
- root.set('size')
- root.set('volume_type')
- root.set('image_id')
- root.set('container_format')
- root.set('disk_format')
- root.set('image_name')
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumeToImageDeserializer(wsgi.XMLDeserializer):
- """Deserializer to handle xml-formatted requests"""
- def default(self, string):
- dom = minidom.parseString(string)
- action_node = dom.childNodes[0]
- action_name = action_node.tagName
-
- action_data = {}
- attributes = ["force", "image_name", "container_format", "disk_format"]
- for attr in attributes:
- if action_node.hasAttribute(attr):
- action_data[attr] = action_node.getAttribute(attr)
- if 'force' in action_data and action_data['force'] == 'True':
- action_data['force'] = True
- return {'body': {action_name: action_data}}
-
-
-class VolumeActionsController(wsgi.Controller):
- def __init__(self, *args, **kwargs):
- super(VolumeActionsController, self).__init__(*args, **kwargs)
- self.volume_api = volume.API()
-
- @wsgi.response(202)
- @wsgi.action('os-volume_upload_image')
- @wsgi.serializers(xml=VolumeToImageSerializer)
- @wsgi.deserializers(xml=VolumeToImageDeserializer)
- def _volume_upload_image(self, req, id, body):
- """Uploads the specified volume to image service."""
- context = req.environ['nova.context']
- try:
- params = body['os-volume_upload_image']
- except (TypeError, KeyError):
- msg = _("Invalid request body")
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
- if not params.get("image_name"):
- msg = _("No image_name was specified in request.")
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
- force = params.get('force', False)
- try:
- volume = self.volume_api.get(context, id)
- except exception.VolumeNotFound, error:
- raise webob.exc.HTTPNotFound(explanation=unicode(error))
- authorize(context, "upload_image")
- image_metadata = {"container_format": params.get("container_format",
- "bare"),
- "disk_format": params.get("disk_format", "raw"),
- "name": params["image_name"]}
- try:
- response = self.volume_api.copy_volume_to_image(context,
- volume,
- image_metadata,
- force)
- except exception.InvalidVolume, error:
- raise webob.exc.HTTPBadRequest(explanation=unicode(error))
- except ValueError, error:
- raise webob.exc.HTTPBadRequest(explanation=unicode(error))
- except rpc_common.RemoteError as error:
- msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type,
- 'err_msg': error.value}
- raise webob.exc.HTTPBadRequest(explanation=msg)
- return {'os-volume_upload_image': response}
-
-
-class Volume_actions(extensions.ExtensionDescriptor):
- """Enable volume actions
- """
-
- name = "VolumeActions"
- alias = "os-volume-actions"
- namespace = "http://docs.openstack.org/volume/ext/volume-actions/api/v1.1"
- updated = "2012-05-31T00:00:00+00:00"
-
- def get_controller_extensions(self):
- controller = VolumeActionsController()
- extension = extensions.ControllerExtension(self, 'volumes', controller)
- return [extension]
diff --git a/nova/api/openstack/volume/extensions.py b/nova/api/openstack/volume/extensions.py
deleted file mode 100644
index b21b9d1454..0000000000
--- a/nova/api/openstack/volume/extensions.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.api.openstack import extensions as base_extensions
-from nova import flags
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
-
-class ExtensionManager(base_extensions.ExtensionManager):
- def __init__(self):
- LOG.audit(_('Initializing extension manager.'))
- self.cls_list = FLAGS.osapi_volume_extension
- self.extensions = {}
- self.plugins = []
- self.sorted_ext_list = []
- self._load_extensions()
diff --git a/nova/api/openstack/volume/snapshots.py b/nova/api/openstack/volume/snapshots.py
deleted file mode 100644
index 74c5f75e6a..0000000000
--- a/nova/api/openstack/volume/snapshots.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""The volumes snapshots api."""
-
-import webob
-from webob import exc
-
-from nova.api.openstack import common
-from nova.api.openstack import wsgi
-from nova.api.openstack import xmlutil
-from nova import exception
-from nova import flags
-from nova.openstack.common import log as logging
-from nova import utils
-from nova import volume
-
-
-LOG = logging.getLogger(__name__)
-
-
-FLAGS = flags.FLAGS
-
-
-def _translate_snapshot_detail_view(context, vol):
- """Maps keys for snapshots details view."""
-
- d = _translate_snapshot_summary_view(context, vol)
-
- # NOTE(gagupta): No additional data / lookups at the moment
- return d
-
-
-def _translate_snapshot_summary_view(context, vol):
- """Maps keys for snapshots summary view."""
- d = {}
-
- # TODO(bcwaldon): remove str cast once we use uuids
- d['id'] = str(vol['id'])
- d['volume_id'] = str(vol['volume_id'])
- d['status'] = vol['status']
- # NOTE(gagupta): We map volume_size as the snapshot size
- d['size'] = vol['volume_size']
- d['created_at'] = vol['created_at']
- d['display_name'] = vol['display_name']
- d['display_description'] = vol['display_description']
- return d
-
-
-def make_snapshot(elem):
- elem.set('id')
- elem.set('status')
- elem.set('size')
- elem.set('created_at')
- elem.set('display_name')
- elem.set('display_description')
- elem.set('volume_id')
-
-
-class SnapshotTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('snapshot', selector='snapshot')
- make_snapshot(root)
- return xmlutil.MasterTemplate(root, 1)
-
-
-class SnapshotsTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('snapshots')
- elem = xmlutil.SubTemplateElement(root, 'snapshot',
- selector='snapshots')
- make_snapshot(elem)
- return xmlutil.MasterTemplate(root, 1)
-
-
-class SnapshotsController(wsgi.Controller):
- """The Volumes API controller for the OpenStack API."""
-
- def __init__(self, ext_mgr=None):
- self.volume_api = volume.API()
- self.ext_mgr = ext_mgr
- super(SnapshotsController, self).__init__()
-
- @wsgi.serializers(xml=SnapshotTemplate)
- def show(self, req, id):
- """Return data about the given snapshot."""
- context = req.environ['nova.context']
-
- try:
- vol = self.volume_api.get_snapshot(context, id)
- except exception.NotFound:
- raise exc.HTTPNotFound()
-
- return {'snapshot': _translate_snapshot_detail_view(context, vol)}
-
- def delete(self, req, id):
- """Delete a snapshot."""
- context = req.environ['nova.context']
-
- LOG.audit(_("Delete snapshot with id: %s"), id, context=context)
-
- try:
- snapshot = self.volume_api.get_snapshot(context, id)
- self.volume_api.delete_snapshot(context, snapshot)
- except exception.NotFound:
- raise exc.HTTPNotFound()
- return webob.Response(status_int=202)
-
- @wsgi.serializers(xml=SnapshotsTemplate)
- def index(self, req):
- """Returns a summary list of snapshots."""
- return self._items(req, entity_maker=_translate_snapshot_summary_view)
-
- @wsgi.serializers(xml=SnapshotsTemplate)
- def detail(self, req):
- """Returns a detailed list of snapshots."""
- return self._items(req, entity_maker=_translate_snapshot_detail_view)
-
- def _items(self, req, entity_maker):
- """Returns a list of snapshots, transformed through entity_maker."""
- context = req.environ['nova.context']
-
- search_opts = {}
- search_opts.update(req.GET)
-
- snapshots = self.volume_api.get_all_snapshots(context,
- search_opts=search_opts)
- limited_list = common.limited(snapshots, req)
- res = [entity_maker(context, snapshot) for snapshot in limited_list]
- return {'snapshots': res}
-
- @wsgi.serializers(xml=SnapshotTemplate)
- def create(self, req, body):
- """Creates a new snapshot."""
- context = req.environ['nova.context']
-
- if not self.is_valid_body(body, 'snapshot'):
- raise exc.HTTPUnprocessableEntity()
-
- snapshot = body['snapshot']
- volume_id = snapshot['volume_id']
-
- try:
- volume = self.volume_api.get(context, volume_id)
- except exception.VolumeNotFound as err:
- raise exc.HTTPNotFound(explanation=unicode(err))
-
- force = snapshot.get('force', False)
- msg = _("Create snapshot from volume %s")
- LOG.audit(msg, volume_id, context=context)
-
- if not utils.is_valid_boolstr(force):
- msg = _("Invalid value '%s' for force. ") % force
- raise exception.InvalidParameterValue(err=msg)
-
- if utils.bool_from_str(force):
- new_snapshot = self.volume_api.create_snapshot_force(context,
- volume,
- snapshot.get('display_name'),
- snapshot.get('display_description'))
- else:
- new_snapshot = self.volume_api.create_snapshot(context,
- volume,
- snapshot.get('display_name'),
- snapshot.get('display_description'))
-
- retval = _translate_snapshot_detail_view(context, new_snapshot)
-
- return {'snapshot': retval}
-
-
-def create_resource(ext_mgr):
- return wsgi.Resource(SnapshotsController(ext_mgr))
diff --git a/nova/api/openstack/volume/types.py b/nova/api/openstack/volume/types.py
deleted file mode 100644
index a39d2a1f65..0000000000
--- a/nova/api/openstack/volume/types.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-""" The volume type & volume types extra specs extension"""
-
-from webob import exc
-
-from nova.api.openstack.volume.views import types as views_types
-from nova.api.openstack import wsgi
-from nova.api.openstack import xmlutil
-from nova import exception
-from nova.volume import volume_types
-
-
-def make_voltype(elem):
- elem.set('id')
- elem.set('name')
- extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs')
- elem.append(extra_specs)
-
-
-class VolumeTypeTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('volume_type', selector='volume_type')
- make_voltype(root)
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumeTypesTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('volume_types')
- elem = xmlutil.SubTemplateElement(root, 'volume_type',
- selector='volume_types')
- make_voltype(elem)
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumeTypesController(wsgi.Controller):
- """ The volume types API controller for the OpenStack API """
-
- _view_builder_class = views_types.ViewBuilder
-
- @wsgi.serializers(xml=VolumeTypesTemplate)
- def index(self, req):
- """ Returns the list of volume types """
- context = req.environ['nova.context']
- vol_types = volume_types.get_all_types(context).values()
- return self._view_builder.index(req, vol_types)
-
- @wsgi.serializers(xml=VolumeTypeTemplate)
- def show(self, req, id):
- """ Return a single volume type item """
- context = req.environ['nova.context']
-
- try:
- vol_type = volume_types.get_volume_type(context, id)
- except exception.NotFound:
- raise exc.HTTPNotFound()
-
- # TODO(bcwaldon): remove str cast once we use uuids
- vol_type['id'] = str(vol_type['id'])
- return self._view_builder.show(req, vol_type)
-
-
-def create_resource():
- return wsgi.Resource(VolumeTypesController())
diff --git a/nova/api/openstack/volume/versions.py b/nova/api/openstack/volume/versions.py
deleted file mode 100644
index 68d34b1f95..0000000000
--- a/nova/api/openstack/volume/versions.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from nova.api.openstack.compute import versions
-from nova.api.openstack.volume.views import versions as views_versions
-from nova.api.openstack import wsgi
-
-
-VERSIONS = {
- "v1.0": {
- "id": "v1.0",
- "status": "CURRENT",
- "updated": "2012-01-04T11:33:21Z",
- "links": [
- {
- "rel": "describedby",
- "type": "application/pdf",
- "href": "http://jorgew.github.com/block-storage-api/"
- "content/os-block-storage-1.0.pdf",
- },
- {
- "rel": "describedby",
- "type": "application/vnd.sun.wadl+xml",
- #(anthony) FIXME
- "href": "http://docs.rackspacecloud.com/"
- "servers/api/v1.1/application.wadl",
- },
- ],
- "media-types": [
- {
- "base": "application/xml",
- "type": "application/vnd.openstack.volume+xml;version=1",
- },
- {
- "base": "application/json",
- "type": "application/vnd.openstack.volume+json;version=1",
- }
- ],
- }
-}
-
-
-class Versions(versions.Versions):
- @wsgi.serializers(xml=versions.VersionsTemplate,
- atom=versions.VersionsAtomSerializer)
- def index(self, req):
- """Return all versions."""
- builder = views_versions.get_view_builder(req)
- return builder.build_versions(VERSIONS)
-
- @wsgi.serializers(xml=versions.ChoicesTemplate)
- @wsgi.response(300)
- def multi(self, req):
- """Return multiple choices."""
- builder = views_versions.get_view_builder(req)
- return builder.build_choices(VERSIONS, req)
-
-
-class VolumeVersionV1(object):
- @wsgi.serializers(xml=versions.VersionTemplate,
- atom=versions.VersionAtomSerializer)
- def show(self, req):
- builder = views_versions.get_view_builder(req)
- return builder.build_version(VERSIONS['v1.0'])
-
-
-def create_resource():
- return wsgi.Resource(VolumeVersionV1())
diff --git a/nova/api/openstack/volume/views/__init__.py b/nova/api/openstack/volume/views/__init__.py
deleted file mode 100644
index d65c689a83..0000000000
--- a/nova/api/openstack/volume/views/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/nova/api/openstack/volume/views/types.py b/nova/api/openstack/volume/views/types.py
deleted file mode 100644
index 8274a0c6f5..0000000000
--- a/nova/api/openstack/volume/views/types.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.api.openstack import common
-
-
-class ViewBuilder(common.ViewBuilder):
-
- def show(self, request, volume_type, brief=False):
- """Trim away extraneous volume type attributes."""
- trimmed = dict(id=volume_type.get('id'),
- name=volume_type.get('name'),
- extra_specs=volume_type.get('extra_specs'))
- return trimmed if brief else dict(volume_type=trimmed)
-
- def index(self, request, volume_types):
- """Index over trimmed volume types"""
- volume_types_list = [self.show(request, volume_type, True)
- for volume_type in volume_types]
- return dict(volume_types=volume_types_list)
diff --git a/nova/api/openstack/volume/views/versions.py b/nova/api/openstack/volume/views/versions.py
deleted file mode 100644
index 2e659af6a8..0000000000
--- a/nova/api/openstack/volume/views/versions.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010-2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-from nova.api.openstack.compute.views import versions as compute_views
-
-
-def get_view_builder(req):
- base_url = req.application_url
- return ViewBuilder(base_url)
-
-
-class ViewBuilder(compute_views.ViewBuilder):
- def generate_href(self, path=None):
- """Create an url that refers to a specific version_number."""
- version_number = 'v1'
- if path:
- path = path.strip('/')
- return os.path.join(self.base_url, version_number, path)
- else:
- return os.path.join(self.base_url, version_number) + '/'
diff --git a/nova/api/openstack/volume/volumes.py b/nova/api/openstack/volume/volumes.py
deleted file mode 100644
index e13f040367..0000000000
--- a/nova/api/openstack/volume/volumes.py
+++ /dev/null
@@ -1,364 +0,0 @@
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""The volumes api."""
-
-import webob
-from webob import exc
-from xml.dom import minidom
-
-from nova.api.openstack import common
-from nova.api.openstack import wsgi
-from nova.api.openstack import xmlutil
-from nova import exception
-from nova import flags
-from nova.openstack.common import log as logging
-from nova import utils
-from nova import volume
-from nova.volume import volume_types
-
-
-LOG = logging.getLogger(__name__)
-
-
-FLAGS = flags.FLAGS
-
-
-def _translate_attachment_detail_view(_context, vol):
- """Maps keys for attachment details view."""
-
- d = _translate_attachment_summary_view(_context, vol)
-
- # No additional data / lookups at the moment
-
- return d
-
-
-def _translate_attachment_summary_view(_context, vol):
- """Maps keys for attachment summary view."""
- d = {}
-
- volume_id = vol['id']
-
- # NOTE(justinsb): We use the volume id as the id of the attachment object
- d['id'] = volume_id
-
- d['volume_id'] = volume_id
- d['server_id'] = vol['instance_uuid']
- if vol.get('mountpoint'):
- d['device'] = vol['mountpoint']
-
- return d
-
-
-def _translate_volume_detail_view(context, vol, image_id=None):
- """Maps keys for volumes details view."""
-
- d = _translate_volume_summary_view(context, vol, image_id)
-
- # No additional data / lookups at the moment
-
- return d
-
-
-def _translate_volume_summary_view(context, vol, image_id=None):
- """Maps keys for volumes summary view."""
- d = {}
-
- d['id'] = vol['id']
- d['status'] = vol['status']
- d['size'] = vol['size']
- d['availability_zone'] = vol['availability_zone']
- d['created_at'] = vol['created_at']
-
- d['attachments'] = []
- if vol['attach_status'] == 'attached':
- attachment = _translate_attachment_detail_view(context, vol)
- d['attachments'].append(attachment)
-
- d['display_name'] = vol['display_name']
- d['display_description'] = vol['display_description']
-
- if vol['volume_type_id'] and vol.get('volume_type'):
- d['volume_type'] = vol['volume_type']['name']
- else:
- # TODO(bcwaldon): remove str cast once we use uuids
- d['volume_type'] = str(vol['volume_type_id'])
-
- d['snapshot_id'] = vol['snapshot_id']
-
- if image_id:
- d['image_id'] = image_id
-
- LOG.audit(_("vol=%s"), vol, context=context)
-
- if vol.get('volume_metadata'):
- metadata = vol.get('volume_metadata')
- d['metadata'] = dict((item['key'], item['value']) for item in metadata)
- else:
- d['metadata'] = {}
-
- return d
-
-
-def make_attachment(elem):
- elem.set('id')
- elem.set('server_id')
- elem.set('volume_id')
- elem.set('device')
-
-
-def make_volume(elem):
- elem.set('id')
- elem.set('status')
- elem.set('size')
- elem.set('availability_zone')
- elem.set('created_at')
- elem.set('display_name')
- elem.set('display_description')
- elem.set('volume_type')
- elem.set('snapshot_id')
-
- attachments = xmlutil.SubTemplateElement(elem, 'attachments')
- attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
- selector='attachments')
- make_attachment(attachment)
-
- # Attach metadata node
- elem.append(common.MetadataTemplate())
-
-
-class VolumeTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('volume', selector='volume')
- make_volume(root)
- return xmlutil.MasterTemplate(root, 1)
-
-
-class VolumesTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('volumes')
- elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
- make_volume(elem)
- return xmlutil.MasterTemplate(root, 1)
-
-
-class CommonDeserializer(wsgi.MetadataXMLDeserializer):
- """Common deserializer to handle xml-formatted volume requests.
-
- Handles standard volume attributes as well as the optional metadata
- attribute
- """
-
- metadata_deserializer = common.MetadataXMLDeserializer()
-
- def _extract_volume(self, node):
- """Marshal the volume attribute of a parsed request."""
- volume = {}
- volume_node = self.find_first_child_named(node, 'volume')
-
- attributes = ['display_name', 'display_description', 'size',
- 'volume_type', 'availability_zone']
- for attr in attributes:
- if volume_node.getAttribute(attr):
- volume[attr] = volume_node.getAttribute(attr)
-
- metadata_node = self.find_first_child_named(volume_node, 'metadata')
- if metadata_node is not None:
- volume['metadata'] = self.extract_metadata(metadata_node)
-
- return volume
-
-
-class CreateDeserializer(CommonDeserializer):
- """Deserializer to handle xml-formatted create volume requests.
-
- Handles standard volume attributes as well as the optional metadata
- attribute
- """
-
- def default(self, string):
- """Deserialize an xml-formatted volume create request."""
- dom = minidom.parseString(string)
- volume = self._extract_volume(dom)
- return {'body': {'volume': volume}}
-
-
-class VolumeController(wsgi.Controller):
- """The Volumes API controller for the OpenStack API."""
-
- def __init__(self, ext_mgr):
- self.volume_api = volume.API()
- self.ext_mgr = ext_mgr
- super(VolumeController, self).__init__()
-
- @wsgi.serializers(xml=VolumeTemplate)
- def show(self, req, id):
- """Return data about the given volume."""
- context = req.environ['nova.context']
-
- try:
- vol = self.volume_api.get(context, id)
- except exception.NotFound:
- raise exc.HTTPNotFound()
-
- return {'volume': _translate_volume_detail_view(context, vol)}
-
- def delete(self, req, id):
- """Delete a volume."""
- context = req.environ['nova.context']
-
- LOG.audit(_("Delete volume with id: %s"), id, context=context)
-
- try:
- volume = self.volume_api.get(context, id)
- self.volume_api.delete(context, volume)
- except exception.NotFound:
- raise exc.HTTPNotFound()
- return webob.Response(status_int=202)
-
- @wsgi.serializers(xml=VolumesTemplate)
- def index(self, req):
- """Returns a summary list of volumes."""
- return self._items(req, entity_maker=_translate_volume_summary_view)
-
- @wsgi.serializers(xml=VolumesTemplate)
- def detail(self, req):
- """Returns a detailed list of volumes."""
- return self._items(req, entity_maker=_translate_volume_detail_view)
-
- def _items(self, req, entity_maker):
- """Returns a list of volumes, transformed through entity_maker."""
-
- search_opts = {}
- search_opts.update(req.GET)
-
- context = req.environ['nova.context']
- remove_invalid_options(context,
- search_opts, self._get_volume_search_options())
-
- volumes = self.volume_api.get_all(context, search_opts=search_opts)
- limited_list = common.limited(volumes, req)
- res = [entity_maker(context, vol) for vol in limited_list]
- return {'volumes': res}
-
- def _image_uuid_from_href(self, image_href):
- # If the image href was generated by nova api, strip image_href
- # down to an id.
- try:
- image_uuid = image_href.split('/').pop()
- except (TypeError, AttributeError):
- msg = _("Invalid imageRef provided.")
- raise exc.HTTPBadRequest(explanation=msg)
-
- if not utils.is_uuid_like(image_uuid):
- msg = _("Invalid imageRef provided.")
- raise exc.HTTPBadRequest(explanation=msg)
-
- return image_uuid
-
- @wsgi.serializers(xml=VolumeTemplate)
- @wsgi.deserializers(xml=CreateDeserializer)
- def create(self, req, body):
- """Creates a new volume."""
- if not self.is_valid_body(body, 'volume'):
- msg = _("Invalid request body. 'volume' not found")
- raise exc.HTTPUnprocessableEntity(explanation=msg)
-
- context = req.environ['nova.context']
- volume = body['volume']
-
- kwargs = {}
-
- req_volume_type = volume.get('volume_type', None)
- if req_volume_type:
- try:
- kwargs['volume_type'] = volume_types.get_volume_type_by_name(
- context, req_volume_type)
- except exception.NotFound:
- raise exc.HTTPNotFound()
-
- kwargs['metadata'] = volume.get('metadata', None)
-
- snapshot_id = volume.get('snapshot_id')
- if snapshot_id is not None:
- kwargs['snapshot'] = self.volume_api.get_snapshot(context,
- snapshot_id)
- else:
- kwargs['snapshot'] = None
-
- size = volume.get('size', None)
- if size is None and kwargs['snapshot'] is not None:
- size = kwargs['snapshot']['volume_size']
-
- if size is None:
- msg = _("Invalid request body. 'size' not found")
- raise exc.HTTPUnprocessableEntity(explanation=msg)
-
- LOG.audit(_("Create volume of %s GB"), size, context=context)
-
- image_href = None
- image_uuid = None
- if self.ext_mgr.is_loaded('os-image-create'):
- image_href = volume.get('imageRef')
- if snapshot_id and image_href:
- msg = _("Snapshot and image cannot be specified together.")
- raise exc.HTTPBadRequest(explanation=msg)
- if image_href:
- image_uuid = self._image_uuid_from_href(image_href)
- kwargs['image_id'] = image_uuid
-
- kwargs['availability_zone'] = volume.get('availability_zone', None)
-
- new_volume = self.volume_api.create(context,
- size,
- volume.get('display_name'),
- volume.get('display_description'),
- **kwargs)
-
- # TODO(vish): Instance should be None at db layer instead of
- # trying to lazy load, but for now we turn it into
- # a dict to avoid an error.
- retval = _translate_volume_detail_view(context, dict(new_volume),
- image_uuid)
-
- result = {'volume': retval}
-
- location = '%s/%s' % (req.url, new_volume['id'])
-
- return wsgi.ResponseObject(result, headers=dict(location=location))
-
- def _get_volume_search_options(self):
- """Return volume search options allowed by non-admin."""
- return ('name', 'status')
-
-
-def create_resource(ext_mgr):
- return wsgi.Resource(VolumeController(ext_mgr))
-
-
-def remove_invalid_options(context, search_options, allowed_search_options):
- """Remove search options that are not valid for non-admin API/context."""
- if context.is_admin:
- # Allow all options
- return
- # Otherwise, strip out all unknown options
- unknown_options = [opt for opt in search_options
- if opt not in allowed_search_options]
- bad_options = ", ".join(unknown_options)
- log_msg = _("Removing options '%(bad_options)s' from query") % locals()
- LOG.debug(log_msg)
- for opt in unknown_options:
- search_options.pop(opt, None)
diff --git a/nova/flags.py b/nova/flags.py
index 3927e8f889..03e607c027 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -159,9 +159,6 @@ global_opts = [
cfg.StrOpt('scheduler_topic',
default='scheduler',
help='the topic scheduler nodes listen on'),
- cfg.StrOpt('volume_topic',
- default='volume',
- help='the topic volume nodes listen on'),
cfg.StrOpt('network_topic',
default='network',
help='the topic network nodes listen on'),
@@ -169,7 +166,7 @@ global_opts = [
default=True,
help='whether to rate limit the api'),
cfg.ListOpt('enabled_apis',
- default=['ec2', 'osapi_compute', 'osapi_volume', 'metadata'],
+ default=['ec2', 'osapi_compute', 'metadata'],
help='a list of APIs to enable by default'),
cfg.StrOpt('ec2_host',
default='$my_ip',
@@ -197,16 +194,6 @@ global_opts = [
'nova.api.openstack.compute.contrib.standard_extensions'
],
help='osapi compute extension to load'),
- cfg.ListOpt('osapi_volume_ext_list',
- default=[],
- help='Specify list of extensions to load when using osapi_'
- 'volume_extension option with nova.api.openstack.'
- 'volume.contrib.select_extensions'),
- cfg.MultiStrOpt('osapi_volume_extension',
- default=[
- 'nova.api.openstack.volume.contrib.standard_extensions'
- ],
- help='osapi volume extension to load'),
cfg.StrOpt('osapi_path',
default='/v1.1/',
help='the path prefix used to call the openstack api server'),
@@ -281,9 +268,6 @@ global_opts = [
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='full class name for the Manager for network'),
- cfg.StrOpt('volume_manager',
- default='nova.volume.manager.VolumeManager',
- help='full class name for the Manager for volume'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
@@ -382,7 +366,7 @@ global_opts = [
default='nova.network.api.API',
help='The full class name of the network API class to use'),
cfg.StrOpt('volume_api_class',
- default='nova.volume.api.API',
+ default='nova.volume.cinder.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('security_group_handler',
default='nova.network.sg.NullSecurityGroupHandler',
diff --git a/nova/network/manager.py b/nova/network/manager.py
index a0a74bcfed..268c582381 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -65,11 +65,9 @@ from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
-from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
-from nova.openstack.common import rpc
from nova.openstack.common import timeutils
import nova.policy
from nova import quota
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 90e1a004b4..cba1ed9359 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -87,20 +87,6 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
'scheduler.run_instance', notifier.ERROR, payload)
-def cast_to_volume_host(context, host, method, **kwargs):
- """Cast request to a volume host queue"""
-
- volume_id = kwargs.get('volume_id', None)
- if volume_id is not None:
- now = timeutils.utcnow()
- db.volume_update(context, volume_id,
- {'host': host, 'scheduled_at': now})
- rpc.cast(context,
- rpc.queue_get_for(context, FLAGS.volume_topic, host),
- {"method": method, "args": kwargs})
- LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
-
-
def instance_update_db(context, instance_uuid):
'''Clear the host and set the scheduled_at field of an Instance.
@@ -127,13 +113,11 @@ def cast_to_compute_host(context, host, method, **kwargs):
def cast_to_host(context, topic, host, method, **kwargs):
"""Generic cast to host"""
- topic_mapping = {
- FLAGS.compute_topic: cast_to_compute_host,
- FLAGS.volume_topic: cast_to_volume_host}
+ topic_mapping = {FLAGS.compute_topic: cast_to_compute_host}
func = topic_mapping.get(topic)
if func:
- func(context, host, method, **kwargs)
+ cast_to_compute_host(context, host, method, **kwargs)
else:
rpc.cast(context,
rpc.queue_get_for(context, topic, host),
diff --git a/nova/service.py b/nova/service.py
index caff1672a1..5a15322884 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -88,15 +88,6 @@ service_opts = [
cfg.IntOpt('metadata_workers',
default=None,
help='Number of workers for metadata service'),
- cfg.StrOpt('osapi_volume_listen',
- default="0.0.0.0",
- help='IP address for OpenStack Volume API to listen'),
- cfg.IntOpt('osapi_volume_listen_port',
- default=8776,
- help='port for os volume api to listen'),
- cfg.IntOpt('osapi_volume_workers',
- default=None,
- help='Number of workers for OpenStack Volume API service'),
]
FLAGS = flags.FLAGS
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 389f71b7b7..49ee9c1520 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -18,7 +18,6 @@
# under the License.
import copy
-import shutil
import tempfile
from nova.api.ec2 import cloud
@@ -85,8 +84,7 @@ class CinderCloudTestCase(test.TestCase):
super(CinderCloudTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
- volume_api_class='nova.tests.fake_volume.API',
- volumes_dir=vol_tmpdir)
+ volume_api_class='nova.tests.fake_volume.API')
def fake_show(meh, context, id):
return {'id': id,
@@ -123,7 +121,6 @@ class CinderCloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
- self.volume = self.start_service('volume')
self.user_id = 'fake'
self.project_id = 'fake'
@@ -143,10 +140,6 @@ class CinderCloudTestCase(test.TestCase):
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
- try:
- shutil.rmtree(FLAGS.volumes_dir)
- except OSError, e:
- pass
self.volume_api.reset_fake_api(self.context)
super(CinderCloudTestCase, self).tearDown()
fake.FakeImageService_reset()
@@ -312,7 +305,7 @@ class CinderCloudTestCase(test.TestCase):
kwargs = {'name': 'bdmtest-volume',
'description': 'bdm test volume description',
'status': 'available',
- 'host': self.volume.host,
+ 'host': 'fake',
'size': 1,
'attach_status': 'detached',
'volume_id': values['id']}
@@ -642,13 +635,12 @@ class CinderCloudTestCase(test.TestCase):
kwargs = {'name': 'test-volume',
'description': 'test volume description',
'status': 'available',
- 'host': self.volume.host,
+ 'host': 'fake',
'size': 1,
'attach_status': 'detached'}
if volume_id:
kwargs['volume_id'] = volume_id
return self.volume_api.create_with_kwargs(self.context, **kwargs)
- #return db.volume_create(self.context, kwargs)
def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
self.assertEqual(vol['instance_uuid'], instance_uuid)
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index d86897dc16..1abde10697 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -22,7 +22,6 @@ import copy
import datetime
import functools
import os
-import shutil
import string
import tempfile
@@ -46,7 +45,7 @@ from nova.tests import fake_network
from nova.tests.image import fake
from nova import utils
from nova.virt import fake as fake_virt
-from nova.volume import iscsi
+from nova import volume
LOG = logging.getLogger(__name__)
@@ -97,10 +96,8 @@ class CloudTestCase(test.TestCase):
super(CloudTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ volume_api_class='nova.tests.fake_volume.API',
volumes_dir=vol_tmpdir)
- self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target)
- self.stubs.Set(iscsi.TgtAdm, 'remove_iscsi_target',
- self.fake_remove_iscsi_target)
def fake_show(meh, context, id):
return {'id': id,
@@ -137,13 +134,13 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
- self.volume = self.start_service('volume')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
+ self.volume_api = volume.API()
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
@@ -156,10 +153,7 @@ class CloudTestCase(test.TestCase):
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
- try:
- shutil.rmtree(FLAGS.volumes_dir)
- except OSError, e:
- pass
+ self.volume_api.reset_fake_api(self.context)
super(CloudTestCase, self).tearDown()
fake.FakeImageService_reset()
@@ -681,63 +675,6 @@ class CloudTestCase(test.TestCase):
self.cloud.delete_security_group(self.context, 'testgrp')
- def test_describe_volumes(self):
- """Makes sure describe_volumes works and filters results."""
- vol1 = db.volume_create(self.context, {'project_id': self.project_id})
- vol2 = db.volume_create(self.context, {'project_id': self.project_id})
- result = self.cloud.describe_volumes(self.context)
- self.assertEqual(len(result['volumeSet']), 2)
- volume_id = ec2utils.id_to_ec2_vol_id(vol2['id'])
- result = self.cloud.describe_volumes(self.context,
- volume_id=[volume_id])
- self.assertEqual(len(result['volumeSet']), 1)
- self.assertEqual(
- ec2utils.ec2_vol_id_to_uuid(
- result['volumeSet'][0]['volumeId']),
- vol2['id'])
- db.volume_destroy(self.context, vol1['id'])
- db.volume_destroy(self.context, vol2['id'])
-
- def test_create_volume_in_availability_zone(self):
- """Makes sure create_volume works when we specify an availability
- zone
- """
- availability_zone = 'zone1:host1'
-
- result = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- volume_id = result['volumeId']
- availabilityZone = result['availabilityZone']
- self.assertEqual(availabilityZone, availability_zone)
- result = self.cloud.describe_volumes(self.context)
- self.assertEqual(len(result['volumeSet']), 1)
- self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
- self.assertEqual(result['volumeSet'][0]['availabilityZone'],
- availabilityZone)
-
- db.volume_destroy(self.context, ec2utils.ec2_vol_id_to_uuid(volume_id))
-
- def test_create_volume_from_snapshot(self):
- """Makes sure create_volume works when we specify a snapshot."""
- vol = db.volume_create(self.context, {'size': 1,
- 'project_id': self.project_id})
- snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
- 'volume_size': vol['size'],
- 'status': "available"})
- snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id'])
-
- result = self.cloud.create_volume(self.context,
- snapshot_id=snapshot_id)
- volume_id = result['volumeId']
- result = self.cloud.describe_volumes(self.context)
- self.assertEqual(len(result['volumeSet']), 2)
- self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id)
-
- db.volume_destroy(self.context, ec2utils.ec2_vol_id_to_uuid(volume_id))
- db.snapshot_destroy(self.context, snap['id'])
- db.volume_destroy(self.context, vol['id'])
-
def test_describe_availability_zones(self):
"""Makes sure describe_availability_zones works and filters results."""
service1 = db.service_create(self.context, {'host': 'host1_zones',
@@ -772,63 +709,10 @@ class CloudTestCase(test.TestCase):
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
- self.assertEqual(len(result['availabilityZoneInfo']), 15)
+ self.assertEqual(len(result['availabilityZoneInfo']), 13)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
- def test_describe_snapshots(self):
- """Makes sure describe_snapshots works and filters results."""
- vol = db.volume_create(self.context, {})
- snap1 = db.snapshot_create(self.context,
- {'volume_id': vol['id'], 'project_id': self.project_id})
- snap2 = db.snapshot_create(self.context,
- {'volume_id': vol['id'], 'project_id': self.project_id})
- result = self.cloud.describe_snapshots(self.context)
- self.assertEqual(len(result['snapshotSet']), 2)
- snapshot_id = ec2utils.id_to_ec2_snap_id(snap2['id'])
- result = self.cloud.describe_snapshots(self.context,
- snapshot_id=[snapshot_id])
- self.assertEqual(len(result['snapshotSet']), 1)
- self.assertEqual(
- ec2utils.ec2_snap_id_to_uuid(
- result['snapshotSet'][0]['snapshotId']),
- snap2['id'])
- db.snapshot_destroy(self.context, snap1['id'])
- db.snapshot_destroy(self.context, snap2['id'])
- db.volume_destroy(self.context, vol['id'])
-
- def test_create_snapshot(self):
- """Makes sure create_snapshot works."""
- vol = db.volume_create(self.context,
- {'status': "available", 'size': 0})
- volume_id = ec2utils.id_to_ec2_vol_id(vol['id'])
-
- result = self.cloud.create_snapshot(self.context,
- volume_id=volume_id)
- snapshot_id = result['snapshotId']
- result = self.cloud.describe_snapshots(self.context)
- self.assertEqual(len(result['snapshotSet']), 1)
- self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
-
- db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id))
- db.volume_destroy(self.context, vol['id'])
-
- def test_delete_snapshot(self):
- """Makes sure delete_snapshot works."""
- vol = db.volume_create(self.context,
- {'status': "available", 'size': 0})
- snap = db.snapshot_create(self.context,
- {'volume_id': vol['id'],
- 'status': "available",
- 'volume_size': 0})
- snapshot_id = ec2utils.id_to_ec2_snap_id(snap['id'])
-
- result = self.cloud.delete_snapshot(self.context,
- snapshot_id=snapshot_id)
- self.assertTrue(result)
-
- db.volume_destroy(self.context, vol['id'])
-
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
self.flags(use_ipv6=True)
@@ -1035,189 +919,6 @@ class CloudTestCase(test.TestCase):
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 2)
- def _block_device_mapping_create(self, instance_uuid, mappings):
- volumes = []
- for bdm in mappings:
- db.block_device_mapping_create(self.context, bdm)
- if 'volume_id' in bdm:
- values = {'id': bdm['volume_id']}
- for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
- ('snapshot_size', 'volume_size'),
- ('delete_on_termination',
- 'delete_on_termination')]:
- if bdm_key in bdm:
- values[vol_key] = bdm[bdm_key]
- vol = db.volume_create(self.context, values)
- db.volume_attached(self.context, vol['id'],
- instance_uuid, bdm['device_name'])
- volumes.append(vol)
- return volumes
-
- def _setUpBlockDeviceMapping(self):
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- inst1 = db.instance_create(self.context,
- {'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'root_device_name': '/dev/sdb1'})
- inst2 = db.instance_create(self.context,
- {'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'root_device_name': '/dev/sdc1'})
-
- instance_id = inst1['id']
- instance_uuid = inst1['uuid']
- mappings0 = [
- {'instance_uuid': instance_uuid,
- 'device_name': '/dev/sdb1',
- 'snapshot_id': '1',
- 'volume_id': '2'},
- {'instance_uuid': instance_uuid,
- 'device_name': '/dev/sdb2',
- 'volume_id': '3',
- 'volume_size': 1},
- {'instance_uuid': instance_uuid,
- 'device_name': '/dev/sdb3',
- 'delete_on_termination': True,
- 'snapshot_id': '4',
- 'volume_id': '5'},
- {'instance_uuid': instance_uuid,
- 'device_name': '/dev/sdb4',
- 'delete_on_termination': False,
- 'snapshot_id': '6',
- 'volume_id': '7'},
- {'instance_uuid': instance_uuid,
- 'device_name': '/dev/sdb5',
- 'snapshot_id': '8',
- 'volume_id': '9',
- 'volume_size': 0},
- {'instance_uuid': instance_uuid,
- 'device_name': '/dev/sdb6',
- 'snapshot_id': '10',
- 'volume_id': '11',
- 'volume_size': 1},
- {'instance_uuid': instance_uuid,
- 'device_name': '/dev/sdb7',
- 'no_device': True},
- {'instance_uuid': instance_uuid,
- 'device_name': '/dev/sdb8',
- 'virtual_name': 'swap'},
- {'instance_uuid': instance_uuid,
- 'device_name': '/dev/sdb9',
- 'virtual_name': 'ephemeral3'}]
-
- volumes = self._block_device_mapping_create(instance_uuid, mappings0)
- return (inst1, inst2, volumes)
-
- def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes):
- for vol in volumes:
- db.volume_destroy(self.context, vol['id'])
- for uuid in (inst1['uuid'], inst2['uuid']):
- for bdm in db.block_device_mapping_get_all_by_instance(
- self.context, uuid):
- db.block_device_mapping_destroy(self.context, bdm['id'])
- db.instance_destroy(self.context, inst2['uuid'])
- db.instance_destroy(self.context, inst1['uuid'])
-
- _expected_instance_bdm1 = {
- 'instanceId': 'i-00000001',
- 'rootDeviceName': '/dev/sdb1',
- 'rootDeviceType': 'ebs'}
-
- _expected_block_device_mapping0 = [
- {'deviceName': '/dev/sdb1',
- 'ebs': {'status': 'in-use',
- 'deleteOnTermination': False,
- 'volumeId': '2',
- }},
- {'deviceName': '/dev/sdb2',
- 'ebs': {'status': 'in-use',
- 'deleteOnTermination': False,
- 'volumeId': '3',
- }},
- {'deviceName': '/dev/sdb3',
- 'ebs': {'status': 'in-use',
- 'deleteOnTermination': True,
- 'volumeId': '5',
- }},
- {'deviceName': '/dev/sdb4',
- 'ebs': {'status': 'in-use',
- 'deleteOnTermination': False,
- 'volumeId': '7',
- }},
- {'deviceName': '/dev/sdb5',
- 'ebs': {'status': 'in-use',
- 'deleteOnTermination': False,
- 'volumeId': '9',
- }},
- {'deviceName': '/dev/sdb6',
- 'ebs': {'status': 'in-use',
- 'deleteOnTermination': False,
- 'volumeId': '11', }}]
- # NOTE(yamahata): swap/ephemeral device case isn't supported yet.
-
- _expected_instance_bdm2 = {
- 'instanceId': 'i-00000002',
- 'rootDeviceName': '/dev/sdc1',
- 'rootDeviceType': 'instance-store'}
-
- def test_format_instance_bdm(self):
- (inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
-
- result = {}
- self.cloud._format_instance_bdm(self.context, inst1['uuid'],
- '/dev/sdb1', result)
- self.assertSubDictMatch(
- {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
- result)
- self._assertEqualBlockDeviceMapping(
- self._expected_block_device_mapping0, result['blockDeviceMapping'])
-
- result = {}
- self.cloud._format_instance_bdm(self.context, inst2['uuid'],
- '/dev/sdc1', result)
- self.assertSubDictMatch(
- {'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']},
- result)
-
- self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
-
- def _assertInstance(self, instance_id):
- ec2_instance_id = ec2utils.id_to_ec2_inst_id(instance_id)
- result = self.cloud.describe_instances(self.context,
- instance_id=[ec2_instance_id])
- result = result['reservationSet'][0]
- self.assertEqual(len(result['instancesSet']), 1)
- result = result['instancesSet'][0]
- self.assertEqual(result['instanceId'], ec2_instance_id)
- return result
-
- def _assertEqualBlockDeviceMapping(self, expected, result):
- self.assertEqual(len(expected), len(result))
- for x in expected:
- found = False
- for y in result:
- if x['deviceName'] == y['deviceName']:
- self.assertSubDictMatch(x, y)
- found = True
- break
- self.assertTrue(found)
-
- def test_describe_instances_bdm(self):
- """Make sure describe_instances works with root_device_name and
- block device mappings
- """
- (inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
-
- result = self._assertInstance(inst1['uuid'])
- self.assertSubDictMatch(self._expected_instance_bdm1, result)
- self._assertEqualBlockDeviceMapping(
- self._expected_block_device_mapping0, result['blockDeviceMapping'])
-
- result = self._assertInstance(inst2['uuid'])
- self.assertSubDictMatch(self._expected_instance_bdm2, result)
-
- self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
-
def test_describe_images(self):
describe_images = self.cloud.describe_images
@@ -1281,13 +982,17 @@ class CloudTestCase(test.TestCase):
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
- {'device_name': '/dev/sdb1', 'snapshot_id': 01234567},
- {'device_name': '/dev/sdb2', 'volume_id': 01234567},
+ {'device_name': '/dev/sdb1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e3'},
+ {'device_name': '/dev/sdb2',
+ 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4'},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
- {'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
- {'device_name': '/dev/sdc2', 'volume_id': 12345678},
+ {'device_name': '/dev/sdc1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e5'},
+ {'device_name': '/dev/sdc2',
+ 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e6'},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
@@ -1305,7 +1010,7 @@ class CloudTestCase(test.TestCase):
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
- 'snapshot_id': 01234567}]
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7'}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
@@ -1338,11 +1043,7 @@ class CloudTestCase(test.TestCase):
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
- snap = db.snapshot_create(self.context,
- {'id': bdm['snapshot_id'],
- 'volume_id': 01234567,
- 'status': "available",
- 'volume_size': 1})
+ snap = self._snapshot_create(bdm['snapshot_id'])
snapshots.append(snap['id'])
return (volumes, snapshots)
@@ -1364,24 +1065,24 @@ class CloudTestCase(test.TestCase):
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
- 'snap-00053977'}},
+ 'snap-00000001'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
- 'vol-00053977'}},
+ 'vol-00000001'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
# {'deviceName': '/dev/sdb4', 'noDevice': True},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
- 'snap-00bc614e'}},
+ 'snap-00000002'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
- 'vol-00bc614e'}},
+ 'vol-00000002'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
# {'deviceName': '/dev/sdc4', 'noDevice': True}
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
- 'ebs': {'snapshotId': 'snap-00053977'}}]
+ 'ebs': {'snapshotId': 'snap-00000003'}}]
# NOTE(yamahata):
# InstanceBlockDeviceMappingItemType
@@ -2066,228 +1767,30 @@ class CloudTestCase(test.TestCase):
self.assertTrue(result)
def _volume_create(self, volume_id=None):
- location = '10.0.2.15:3260'
- iqn = 'iqn.2010-10.org.openstack:%s' % volume_id
- kwargs = {'status': 'available',
- 'host': self.volume.host,
+ kwargs = {'name': 'test-volume',
+ 'description': 'test volume description',
+ 'status': 'available',
+ 'host': 'fake',
'size': 1,
- 'provider_location': '1 %s,fake %s' % (location, iqn),
- 'attach_status': 'detached', }
+ 'attach_status': 'detached'}
if volume_id:
- kwargs['id'] = volume_id
- return db.volume_create(self.context, kwargs)
-
- def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
- self.assertEqual(vol['instance_uuid'], instance_uuid)
- self.assertEqual(vol['mountpoint'], mountpoint)
- self.assertEqual(vol['status'], "in-use")
- self.assertEqual(vol['attach_status'], "attached")
- self.assertNotEqual(vol['attach_time'], None)
-
- def _assert_volume_detached(self, vol):
- self.assertEqual(vol['instance_uuid'], None)
- self.assertEqual(vol['mountpoint'], None)
- self.assertEqual(vol['status'], "available")
- self.assertEqual(vol['attach_status'], "detached")
- self.assertEqual(vol['attach_time'], None)
-
- def test_stop_start_with_volume(self):
- """Make sure run instance with block device mapping works"""
-
- # enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
-
- vol1 = self._volume_create()
- vol2 = self._volume_create()
- kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
- 'max_count': 1,
- 'block_device_mapping': [{'device_name': '/dev/vdb',
- 'volume_id': vol1['id'],
- 'delete_on_termination': False},
- {'device_name': '/dev/vdc',
- 'volume_id': vol2['id'],
- 'delete_on_termination': True},
- ]}
- ec2_instance_id = self._run_instance(**kwargs)
- instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
- ec2_instance_id)
- instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
-
- vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
- self.assertEqual(len(vols), 2)
- for vol in vols:
- self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
-
- vol = db.volume_get(self.context, vol1['id'])
- self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
-
- vol = db.volume_get(self.context, vol2['id'])
- self._assert_volume_attached(vol, instance_uuid, '/dev/vdc')
-
- result = self.cloud.stop_instances(self.context, [ec2_instance_id])
- self.assertTrue(result)
-
- vol = db.volume_get(self.context, vol1['id'])
- self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
-
- vol = db.volume_get(self.context, vol2['id'])
- self._assert_volume_attached(vol, instance_uuid, '/dev/vdc')
-
- self.cloud.start_instances(self.context, [ec2_instance_id])
- vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
- self.assertEqual(len(vols), 2)
- for vol in vols:
- self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
- self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
- vol['mountpoint'] == '/dev/vdc')
- self.assertEqual(vol['instance_uuid'], instance_uuid)
- self.assertEqual(vol['status'], "in-use")
- self.assertEqual(vol['attach_status'], "attached")
-
- self.cloud.terminate_instances(self.context, [ec2_instance_id])
-
- admin_ctxt = context.get_admin_context(read_deleted="no")
- vol = db.volume_get(admin_ctxt, vol1['id'])
- self.assertFalse(vol['deleted'])
- db.volume_destroy(self.context, vol1['id'])
-
- admin_ctxt = context.get_admin_context(read_deleted="only")
- vol = db.volume_get(admin_ctxt, vol2['id'])
- self.assertTrue(vol['deleted'])
-
- self._restart_compute_service()
-
- def test_stop_with_attached_volume(self):
- """Make sure attach info is reflected to block device mapping"""
- # enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval=0.3)
-
- vol1 = self._volume_create()
- vol2 = self._volume_create()
- kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
- 'max_count': 1,
- 'block_device_mapping': [{'device_name': '/dev/sdb',
- 'volume_id': vol1['id'],
- 'delete_on_termination': True}]}
- ec2_instance_id = self._run_instance(**kwargs)
- instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
- instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
- ec2_instance_id)
-
- vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
- self.assertEqual(len(vols), 1)
- for vol in vols:
- self.assertEqual(vol['id'], vol1['id'])
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
-
- vol = db.volume_get(self.context, vol2['id'])
- self._assert_volume_detached(vol)
-
- instance = db.instance_get(self.context, instance_id)
- self.cloud.compute_api.attach_volume(self.context,
- instance,
- volume_id=vol2['id'],
- device='/dev/vdc')
- vol = db.volume_get(self.context, vol2['id'])
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
-
- self.cloud.compute_api.detach_volume(self.context,
- volume_id=vol1['id'])
- vol = db.volume_get(self.context, vol1['id'])
- self._assert_volume_detached(vol)
-
- result = self.cloud.stop_instances(self.context, [ec2_instance_id])
- self.assertTrue(result)
-
- vol = db.volume_get(self.context, vol2['id'])
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
-
- self.cloud.start_instances(self.context, [ec2_instance_id])
- vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
- self.assertEqual(len(vols), 1)
- for vol in vols:
- self.assertEqual(vol['id'], vol2['id'])
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
-
- vol = db.volume_get(self.context, vol1['id'])
- self._assert_volume_detached(vol)
-
- self.cloud.terminate_instances(self.context, [ec2_instance_id])
-
- for vol_id in (vol1['id'], vol2['id']):
- vol = db.volume_get(self.context, vol_id)
- self.assertEqual(vol['id'], vol_id)
- self._assert_volume_detached(vol)
- db.volume_destroy(self.context, vol_id)
-
- self._restart_compute_service()
+ kwargs['volume_id'] = volume_id
+ return self.volume_api.create_with_kwargs(self.context, **kwargs)
+
+ def _snapshot_create(self, snapshot_id=None):
+ kwargs = {'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4',
+ 'status': "available",
+ 'volume_size': 1}
+ if snapshot_id:
+ kwargs['snap_id'] = snapshot_id
+ return self.volume_api.create_snapshot_with_kwargs(self.context,
+ **kwargs)
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
return result['snapshotId']
- def test_run_with_snapshot(self):
- """Makes sure run/stop/start instance with snapshot works."""
- vol = self._volume_create()
- ec2_volume_id = ec2utils.id_to_ec2_vol_id(vol['id'])
-
- ec2_snapshot1_id = self._create_snapshot(ec2_volume_id)
- snapshot1_id = ec2utils.ec2_snap_id_to_uuid(ec2_snapshot1_id)
- ec2_snapshot2_id = self._create_snapshot(ec2_volume_id)
- snapshot2_id = ec2utils.ec2_snap_id_to_uuid(ec2_snapshot2_id)
-
- kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
- 'max_count': 1,
- 'block_device_mapping': [{'device_name': '/dev/vdb',
- 'snapshot_id': snapshot1_id,
- 'delete_on_termination': False, },
- {'device_name': '/dev/vdc',
- 'snapshot_id': snapshot2_id,
- 'delete_on_termination': True}]}
- ec2_instance_id = self._run_instance(**kwargs)
- instance_id = ec2utils.ec2_vol_id_to_uuid(ec2_instance_id)
- instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
- ec2_instance_id)
-
- vols = db.volume_get_all_by_instance_uuid(self.context, instance_uuid)
- self.assertEqual(len(vols), 2)
- vol1_id = None
- vol2_id = None
- for vol in vols:
- snapshot_id = vol['snapshot_id']
- if snapshot_id == snapshot1_id:
- vol1_id = vol['id']
- mountpoint = '/dev/vdb'
- elif snapshot_id == snapshot2_id:
- vol2_id = vol['id']
- mountpoint = '/dev/vdc'
- else:
- self.fail()
-
- self._assert_volume_attached(vol, instance_uuid, mountpoint)
-
- self.assertTrue(vol1_id)
- self.assertTrue(vol2_id)
-
- self.cloud.terminate_instances(self.context, [ec2_instance_id])
-
- admin_ctxt = context.get_admin_context(read_deleted="no")
- vol = db.volume_get(admin_ctxt, vol1_id)
- self._assert_volume_detached(vol)
- self.assertFalse(vol['deleted'])
- db.volume_destroy(self.context, vol1_id)
-
- admin_ctxt = context.get_admin_context(read_deleted="only")
- vol = db.volume_get(admin_ctxt, vol2_id)
- self.assertTrue(vol['deleted'])
-
- for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
- self.cloud.delete_snapshot(self.context, snapshot_id)
-
def _do_test_create_image(self, no_reboot):
"""Make sure that CreateImage works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
@@ -2363,7 +1866,7 @@ class CloudTestCase(test.TestCase):
self.assertEquals(bdm.get('deviceName'), 'sda1')
self.assertTrue('ebs' in bdm)
self.assertEquals(bdm['ebs'].get('snapshotId'),
- 'snap-%08x' % snapshots[0])
+ ec2utils.id_to_ec2_snap_id(snapshots[0]))
self.assertEquals(created_image.get('kernelId'), 'aki-00000001')
self.assertEquals(created_image.get('ramdiskId'), 'ari-00000002')
self.assertEquals(created_image.get('rootDeviceType'), 'ebs')
@@ -2371,11 +1874,6 @@ class CloudTestCase(test.TestCase):
self.assertNotEqual(virt_driver.get('powered_off'), no_reboot)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
- for vol in volumes:
- db.volume_destroy(self.context, vol)
- for snap in snapshots:
- db.snapshot_destroy(self.context, snap)
- # TODO(yamahata): clean up snapshot created by CreateImage.
self._restart_compute_service()
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index 9a14a7d7c4..f46262c359 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -50,7 +50,6 @@ class EC2ValidateTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
- self.volume = self.start_service('volume')
self.image_service = fake.FakeImageService()
self.user_id = 'fake'
diff --git a/nova/tests/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
index ad259c3027..6e76fc04ae 100644
--- a/nova/tests/api/openstack/compute/contrib/test_snapshots.py
+++ b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
@@ -25,7 +25,7 @@ from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
-from nova import volume
+from nova.volume import cinder
FLAGS = flags.FLAGS
@@ -92,14 +92,16 @@ class SnapshotApiTest(test.TestCase):
super(SnapshotApiTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
- self.stubs.Set(volume.api.API, "create_snapshot_force",
+ self.stubs.Set(cinder.API, "create_snapshot",
+ stub_snapshot_create)
+ self.stubs.Set(cinder.API, "create_snapshot_force",
stub_snapshot_create)
- self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
- self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
- self.stubs.Set(volume.api.API, "get_all_snapshots",
+ self.stubs.Set(cinder.API, "delete_snapshot",
+ stub_snapshot_delete)
+ self.stubs.Set(cinder.API, "get_snapshot", stub_snapshot_get)
+ self.stubs.Set(cinder.API, "get_all_snapshots",
stub_snapshot_get_all)
- self.stubs.Set(volume.api.API, "get", fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
diff --git a/nova/tests/api/openstack/compute/contrib/test_volume_types.py b/nova/tests/api/openstack/compute/contrib/test_volume_types.py
deleted file mode 100644
index af88cf6016..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_volume_types.py
+++ /dev/null
@@ -1,224 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import webob
-
-from nova.api.openstack.compute.contrib import volumetypes
-from nova import exception
-from nova.openstack.common import log as logging
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.volume import volume_types
-
-
-LOG = logging.getLogger(__name__)
-last_param = {}
-
-
-def stub_volume_type(id):
- specs = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- "key4": "value4",
- "key5": "value5"}
- return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
-
-
-def return_volume_types_get_all_types(context):
- return dict(vol_type_1=stub_volume_type(1),
- vol_type_2=stub_volume_type(2),
- vol_type_3=stub_volume_type(3))
-
-
-def return_empty_volume_types_get_all_types(context):
- return {}
-
-
-def return_volume_types_get_volume_type(context, id):
- if id == "777":
- raise exception.VolumeTypeNotFound(volume_type_id=id)
- return stub_volume_type(int(id))
-
-
-def return_volume_types_destroy(context, name):
- if name == "777":
- raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
- pass
-
-
-def return_volume_types_create(context, name, specs):
- pass
-
-
-def return_volume_types_get_by_name(context, name):
- if name == "777":
- raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
- return stub_volume_type(int(name.split("_")[2]))
-
-
-class VolumeTypesApiTest(test.TestCase):
- def setUp(self):
- super(VolumeTypesApiTest, self).setUp()
- fakes.stub_out_key_pair_funcs(self.stubs)
- self.controller = volumetypes.VolumeTypesController()
-
- def test_volume_types_index(self):
- self.stubs.Set(volume_types, 'get_all_types',
- return_volume_types_get_all_types)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-volume-types')
- res_dict = self.controller.index(req)
-
- self.assertEqual(3, len(res_dict['volume_types']))
-
- expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3']
- actual_names = map(lambda e: e['name'], res_dict['volume_types'])
- self.assertEqual(set(actual_names), set(expected_names))
- for entry in res_dict['volume_types']:
- self.assertEqual('value1', entry['extra_specs']['key1'])
-
- def test_volume_types_index_no_data(self):
- self.stubs.Set(volume_types, 'get_all_types',
- return_empty_volume_types_get_all_types)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-volume-types')
- res_dict = self.controller.index(req)
-
- self.assertEqual(0, len(res_dict['volume_types']))
-
- def test_volume_types_show(self):
- self.stubs.Set(volume_types, 'get_volume_type',
- return_volume_types_get_volume_type)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-volume-types/1')
- res_dict = self.controller.show(req, 1)
-
- self.assertEqual(1, len(res_dict))
- self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
-
- def test_volume_types_show_not_found(self):
- self.stubs.Set(volume_types, 'get_volume_type',
- return_volume_types_get_volume_type)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-volume-types/777')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, '777')
-
- def test_volume_types_delete(self):
- self.stubs.Set(volume_types, 'get_volume_type',
- return_volume_types_get_volume_type)
- self.stubs.Set(volume_types, 'destroy',
- return_volume_types_destroy)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-volume-types/1')
- self.controller.delete(req, 1)
-
- def test_volume_types_delete_not_found(self):
- self.stubs.Set(volume_types, 'get_volume_type',
- return_volume_types_get_volume_type)
- self.stubs.Set(volume_types, 'destroy',
- return_volume_types_destroy)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-volume-types/777')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, '777')
-
- def test_create(self):
- self.stubs.Set(volume_types, 'create',
- return_volume_types_create)
- self.stubs.Set(volume_types, 'get_volume_type_by_name',
- return_volume_types_get_by_name)
-
- body = {"volume_type": {"name": "vol_type_1",
- "extra_specs": {"key1": "value1"}}}
- req = fakes.HTTPRequest.blank('/v2/fake/os-volume-types')
- res_dict = self.controller.create(req, body)
-
- self.assertEqual(1, len(res_dict))
- self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
-
-
-class VolumeTypesSerializerTest(test.TestCase):
- def _verify_volume_type(self, vtype, tree):
- self.assertEqual('volume_type', tree.tag)
- self.assertEqual(vtype['name'], tree.get('name'))
- self.assertEqual(str(vtype['id']), tree.get('id'))
- self.assertEqual(1, len(tree))
- extra_specs = tree[0]
- self.assertEqual('extra_specs', extra_specs.tag)
- seen = set(vtype['extra_specs'].keys())
- for child in extra_specs:
- self.assertTrue(child.tag in seen)
- self.assertEqual(vtype['extra_specs'][child.tag], child.text)
- seen.remove(child.tag)
- self.assertEqual(len(seen), 0)
-
- def test_index_serializer(self):
- serializer = volumetypes.VolumeTypesTemplate()
-
- # Just getting some input data
- vtypes = return_volume_types_get_all_types(None)
- text = serializer.serialize({'volume_types': vtypes.values()})
-
- print text
- tree = etree.fromstring(text)
-
- self.assertEqual('volume_types', tree.tag)
- self.assertEqual(len(vtypes), len(tree))
- for child in tree:
- name = child.get('name')
- self.assertTrue(name in vtypes)
- self._verify_volume_type(vtypes[name], child)
-
- def test_voltype_serializer(self):
- serializer = volumetypes.VolumeTypeTemplate()
-
- vtype = stub_volume_type(1)
- text = serializer.serialize(dict(volume_type=vtype))
-
- print text
- tree = etree.fromstring(text)
-
- self._verify_volume_type(vtype, tree)
-
-
-class VolumeTypesUnprocessableEntityTestCase(test.TestCase):
- """
- Tests of places we throw 422 Unprocessable Entity from
- """
-
- def setUp(self):
- super(VolumeTypesUnprocessableEntityTestCase, self).setUp()
- self.controller = volumetypes.VolumeTypesController()
-
- def _unprocessable_volume_type_create(self, body):
- req = fakes.HTTPRequest.blank('/v2/fake/os-volume-types')
- req.method = 'POST'
-
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.controller.create, req, body)
-
- def test_create_volume_type_no_body(self):
- self._unprocessable_volume_type_create(body=None)
-
- def test_create_volume_type_missing_volume_type(self):
- body = {'foo': {'a': 'b'}}
- self._unprocessable_volume_type_create(body=body)
-
- def test_create_volume_type_malformed_entity(self):
- body = {'volume_type': 'string'}
- self._unprocessable_volume_type_create(body=body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_volume_types_extra_specs.py b/nova/tests/api/openstack/compute/contrib/test_volume_types_extra_specs.py
deleted file mode 100644
index e9c4034f0a..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_volume_types_extra_specs.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-# Copyright 2011 University of Southern California
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import webob
-
-from nova.api.openstack.compute.contrib import volumetypes
-from nova import test
-from nova.tests.api.openstack import fakes
-import nova.wsgi
-
-
-def return_create_volume_type_extra_specs(context, volume_type_id,
- extra_specs):
- return stub_volume_type_extra_specs()
-
-
-def return_volume_type_extra_specs(context, volume_type_id):
- return stub_volume_type_extra_specs()
-
-
-def return_empty_volume_type_extra_specs(context, volume_type_id):
- return {}
-
-
-def delete_volume_type_extra_specs(context, volume_type_id, key):
- pass
-
-
-def stub_volume_type_extra_specs():
- specs = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- "key4": "value4",
- "key5": "value5"}
- return specs
-
-
-class VolumeTypesExtraSpecsTest(test.TestCase):
-
- def setUp(self):
- super(VolumeTypesExtraSpecsTest, self).setUp()
- fakes.stub_out_key_pair_funcs(self.stubs)
- self.api_path = '/v2/fake/os-volume-types/1/extra_specs'
- self.controller = volumetypes.VolumeTypeExtraSpecsController()
-
- def test_index(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
- return_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path)
- res_dict = self.controller.index(req, 1)
-
- self.assertEqual('value1', res_dict['extra_specs']['key1'])
-
- def test_index_no_data(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
- return_empty_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path)
- res_dict = self.controller.index(req, 1)
-
- self.assertEqual(0, len(res_dict['extra_specs']))
-
- def test_show(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
- return_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key5')
- res_dict = self.controller.show(req, 1, 'key5')
-
- self.assertEqual('value5', res_dict['key5'])
-
- def test_show_spec_not_found(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
- return_empty_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key6')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, 1, 'key6')
-
- def test_delete(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_delete',
- delete_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key5')
- self.controller.delete(req, 1, 'key5')
-
- def test_create(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
- body = {"extra_specs": {"key1": "value1"}}
-
- req = fakes.HTTPRequest.blank(self.api_path)
- res_dict = self.controller.create(req, 1, body)
-
- self.assertEqual('value1', res_dict['extra_specs']['key1'])
-
- def test_create_empty_body(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, 1, '')
-
- def test_update_item(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
- body = {"key1": "value1"}
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key1')
- res_dict = self.controller.update(req, 1, 'key1', body)
-
- self.assertEqual('value1', res_dict['key1'])
-
- def test_update_item_empty_body(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key1')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'key1', '')
-
- def test_update_item_too_many_keys(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
- body = {"key1": "value1", "key2": "value2"}
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key1')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'key1', body)
-
- def test_update_item_body_uri_mismatch(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
- body = {"key1": "value1"}
-
- req = fakes.HTTPRequest.blank(self.api_path + '/bad')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'bad', body)
-
-
-class VolumeTypeExtraSpecsSerializerTest(test.TestCase):
- def test_index_create_serializer(self):
- serializer = volumetypes.VolumeTypeExtraSpecsTemplate()
-
- # Just getting some input data
- extra_specs = stub_volume_type_extra_specs()
- text = serializer.serialize(dict(extra_specs=extra_specs))
-
- print text
- tree = etree.fromstring(text)
-
- self.assertEqual('extra_specs', tree.tag)
- self.assertEqual(len(extra_specs), len(tree))
- seen = set(extra_specs.keys())
- for child in tree:
- self.assertTrue(child.tag in seen)
- self.assertEqual(extra_specs[child.tag], child.text)
- seen.remove(child.tag)
- self.assertEqual(len(seen), 0)
-
- def test_update_show_serializer(self):
- serializer = volumetypes.VolumeTypeExtraSpecTemplate()
-
- exemplar = dict(key1='value1')
- text = serializer.serialize(exemplar)
-
- print text
- tree = etree.fromstring(text)
-
- self.assertEqual('key1', tree.tag)
- self.assertEqual('value1', tree.text)
- self.assertEqual(0, len(tree))
diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py
index a07c3fa741..6c092cbd4c 100644
--- a/nova/tests/api/openstack/compute/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py
@@ -18,7 +18,6 @@ import datetime
from lxml import etree
import webob
-import nova
from nova.api.openstack.compute.contrib import volumes
from nova.compute import api as compute_api
from nova.compute import instance_types
@@ -29,7 +28,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
-from nova.volume import api as volume_api
+from nova.volume import cinder
from webob import exc
@@ -148,9 +147,9 @@ class VolumeApiTest(test.TestCase):
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, 'volume_get', return_volume)
- self.stubs.Set(volume_api.API, "delete", fakes.stub_volume_delete)
- self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get)
- self.stubs.Set(volume_api.API, "get_all", fakes.stub_volume_get_all)
+ self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
@@ -160,7 +159,7 @@ class VolumeApiTest(test.TestCase):
self.app = fakes.wsgi_app(init_only=('os-volumes',))
def test_volume_create(self):
- self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
+ self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
@@ -202,7 +201,7 @@ class VolumeApiTest(test.TestCase):
self.assertEqual(resp.status_int, 200)
def test_volume_show_no_volume(self):
- self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_get_notfound)
req = webob.Request.blank('/v2/fake/os-volumes/456')
resp = req.get_response(self.app)
@@ -215,7 +214,7 @@ class VolumeApiTest(test.TestCase):
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
- self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_get_notfound)
req = webob.Request.blank('/v2/fake/os-volumes/456')
req.method = 'DELETE'
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index e07db35199..466fd3636a 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -194,7 +194,6 @@ class ExtensionControllerTest(ExtensionTestCase):
"UserData",
"VirtualInterfaces",
"Volumes",
- "VolumeTypes",
]
self.ext_list.sort()
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 58dc2df454..4f39e569ed 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -30,7 +30,6 @@ from nova.api.openstack import compute
from nova.api.openstack.compute import limits
from nova.api.openstack.compute import versions
from nova.api.openstack import urlmap
-from nova.api.openstack import volume
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova.compute import instance_types
diff --git a/nova/tests/api/openstack/volume/__init__.py b/nova/tests/api/openstack/volume/__init__.py
deleted file mode 100644
index 7e04e7c73b..0000000000
--- a/nova/tests/api/openstack/volume/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
-from nova.tests import *
diff --git a/nova/tests/api/openstack/volume/contrib/__init__.py b/nova/tests/api/openstack/volume/contrib/__init__.py
deleted file mode 100644
index 7e04e7c73b..0000000000
--- a/nova/tests/api/openstack/volume/contrib/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
-from nova.tests import *
diff --git a/nova/tests/api/openstack/volume/contrib/test_admin_actions.py b/nova/tests/api/openstack/volume/contrib/test_admin_actions.py
deleted file mode 100644
index 4ade547796..0000000000
--- a/nova/tests/api/openstack/volume/contrib/test_admin_actions.py
+++ /dev/null
@@ -1,184 +0,0 @@
-import webob
-
-from nova import context
-from nova import db
-from nova import exception
-from nova.openstack.common import jsonutils
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def app():
- # no auth, just let environ['nova.context'] pass through
- api = fakes.volume.APIRouter(init_only=('volumes', 'snapshots'))
- mapper = fakes.urlmap.URLMap()
- mapper['/v1'] = api
- return mapper
-
-
-class AdminActionsTest(test.TestCase):
-
- def test_reset_status_as_admin(self):
- # admin context
- ctx = context.RequestContext('admin', 'fake', is_admin=True)
- ctx.elevated() # add roles
- # current status is available
- volume = db.volume_create(ctx, {'status': 'available'})
- req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
- # request status of 'error'
- req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}})
- # attach admin context to request
- req.environ['nova.context'] = ctx
- resp = req.get_response(app())
- # request is accepted
- self.assertEquals(resp.status_int, 202)
- volume = db.volume_get(ctx, volume['id'])
- # status changed to 'error'
- self.assertEquals(volume['status'], 'error')
-
- def test_reset_status_as_non_admin(self):
- # current status is 'error'
- volume = db.volume_create(context.get_admin_context(),
- {'status': 'error'})
- req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
- # request changing status to available
- req.body = jsonutils.dumps({'os-reset_status': {'status':
- 'available'}})
- # non-admin context
- req.environ['nova.context'] = context.RequestContext('fake', 'fake')
- resp = req.get_response(app())
- # request is not authorized
- self.assertEquals(resp.status_int, 403)
- volume = db.volume_get(context.get_admin_context(), volume['id'])
- # status is still 'error'
- self.assertEquals(volume['status'], 'error')
-
- def test_malformed_reset_status_body(self):
- # admin context
- ctx = context.RequestContext('admin', 'fake', is_admin=True)
- ctx.elevated() # add roles
- # current status is available
- volume = db.volume_create(ctx, {'status': 'available'})
- req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
- # malformed request body
- req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}})
- # attach admin context to request
- req.environ['nova.context'] = ctx
- resp = req.get_response(app())
- # bad request
- self.assertEquals(resp.status_int, 400)
- volume = db.volume_get(ctx, volume['id'])
- # status is still 'available'
- self.assertEquals(volume['status'], 'available')
-
- def test_invalid_status_for_volume(self):
- # admin context
- ctx = context.RequestContext('admin', 'fake', is_admin=True)
- ctx.elevated() # add roles
- # current status is available
- volume = db.volume_create(ctx, {'status': 'available'})
- req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
- # 'invalid' is not a valid status
- req.body = jsonutils.dumps({'os-reset_status': {'status': 'invalid'}})
- # attach admin context to request
- req.environ['nova.context'] = ctx
- resp = req.get_response(app())
- # bad request
- self.assertEquals(resp.status_int, 400)
- volume = db.volume_get(ctx, volume['id'])
- # status is still 'available'
- self.assertEquals(volume['status'], 'available')
-
- def test_reset_status_for_missing_volume(self):
- # admin context
- ctx = context.RequestContext('admin', 'fake', is_admin=True)
- ctx.elevated() # add roles
- # missing-volume-id
- req = webob.Request.blank('/v1/fake/volumes/%s/action' %
- 'missing-volume-id')
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
- # malformed request body
- req.body = jsonutils.dumps({'os-reset_status': {'status':
- 'available'}})
- # attach admin context to request
- req.environ['nova.context'] = ctx
- resp = req.get_response(app())
- # not found
- self.assertEquals(resp.status_int, 404)
- self.assertRaises(exception.NotFound, db.volume_get, ctx,
- 'missing-volume-id')
-
- def test_snapshot_reset_status(self):
- # admin context
- ctx = context.RequestContext('admin', 'fake', is_admin=True)
- ctx.elevated() # add roles
- # snapshot in 'error_deleting'
- volume = db.volume_create(ctx, {})
- snapshot = db.snapshot_create(ctx, {'status': 'error_deleting',
- 'volume_id': volume['id']})
- req = webob.Request.blank('/v1/fake/snapshots/%s/action' %
- snapshot['id'])
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
- # request status of 'error'
- req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}})
- # attach admin context to request
- req.environ['nova.context'] = ctx
- resp = req.get_response(app())
- # request is accepted
- self.assertEquals(resp.status_int, 202)
- snapshot = db.snapshot_get(ctx, snapshot['id'])
- # status changed to 'error'
- self.assertEquals(snapshot['status'], 'error')
-
- def test_invalid_status_for_snapshot(self):
- # admin context
- ctx = context.RequestContext('admin', 'fake', is_admin=True)
- ctx.elevated() # add roles
- # snapshot in 'available'
- volume = db.volume_create(ctx, {})
- snapshot = db.snapshot_create(ctx, {'status': 'available',
- 'volume_id': volume['id']})
- req = webob.Request.blank('/v1/fake/snapshots/%s/action' %
- snapshot['id'])
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
- # 'attaching' is not a valid status for snapshots
- req.body = jsonutils.dumps({'os-reset_status': {'status':
- 'attaching'}})
- # attach admin context to request
- req.environ['nova.context'] = ctx
- resp = req.get_response(app())
- # request is accepted
- print resp
- self.assertEquals(resp.status_int, 400)
- snapshot = db.snapshot_get(ctx, snapshot['id'])
- # status is still 'available'
- self.assertEquals(snapshot['status'], 'available')
-
- def test_force_delete(self):
- # admin context
- ctx = context.RequestContext('admin', 'fake', is_admin=True)
- ctx.elevated() # add roles
- # current status is creating
- volume = db.volume_create(ctx, {'status': 'creating'})
- req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
- req.body = jsonutils.dumps({'os-force_delete': {}})
- # attach admin context to request
- req.environ['nova.context'] = ctx
- resp = req.get_response(app())
- # request is accepted
- self.assertEquals(resp.status_int, 202)
- # volume is deleted
- self.assertRaises(exception.NotFound, db.volume_get, ctx, volume['id'])
diff --git a/nova/tests/api/openstack/volume/contrib/test_types_extra_specs.py b/nova/tests/api/openstack/volume/contrib/test_types_extra_specs.py
deleted file mode 100644
index edc127b8a9..0000000000
--- a/nova/tests/api/openstack/volume/contrib/test_types_extra_specs.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-# Copyright 2011 University of Southern California
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import webob
-
-from nova.api.openstack.volume.contrib import types_extra_specs
-from nova import test
-from nova.tests.api.openstack import fakes
-import nova.wsgi
-
-
-def return_create_volume_type_extra_specs(context, volume_type_id,
- extra_specs):
- return stub_volume_type_extra_specs()
-
-
-def return_volume_type_extra_specs(context, volume_type_id):
- return stub_volume_type_extra_specs()
-
-
-def return_empty_volume_type_extra_specs(context, volume_type_id):
- return {}
-
-
-def delete_volume_type_extra_specs(context, volume_type_id, key):
- pass
-
-
-def stub_volume_type_extra_specs():
- specs = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- "key4": "value4",
- "key5": "value5"}
- return specs
-
-
-def volume_type_get(context, volume_type_id):
- pass
-
-
-class VolumeTypesExtraSpecsTest(test.TestCase):
-
- def setUp(self):
- super(VolumeTypesExtraSpecsTest, self).setUp()
- self.stubs.Set(nova.db, 'volume_type_get', volume_type_get)
- self.api_path = '/v1/fake/os-volume-types/1/extra_specs'
- self.controller = types_extra_specs.VolumeTypeExtraSpecsController()
-
- def test_index(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
- return_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path)
- res_dict = self.controller.index(req, 1)
-
- self.assertEqual('value1', res_dict['extra_specs']['key1'])
-
- def test_index_no_data(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
- return_empty_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path)
- res_dict = self.controller.index(req, 1)
-
- self.assertEqual(0, len(res_dict['extra_specs']))
-
- def test_show(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
- return_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key5')
- res_dict = self.controller.show(req, 1, 'key5')
-
- self.assertEqual('value5', res_dict['key5'])
-
- def test_show_spec_not_found(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_get',
- return_empty_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key6')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, 1, 'key6')
-
- def test_delete(self):
- self.stubs.Set(nova.db, 'volume_type_extra_specs_delete',
- delete_volume_type_extra_specs)
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key5')
- self.controller.delete(req, 1, 'key5')
-
- def test_create(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
- body = {"extra_specs": {"key1": "value1"}}
-
- req = fakes.HTTPRequest.blank(self.api_path)
- res_dict = self.controller.create(req, 1, body)
-
- self.assertEqual('value1', res_dict['extra_specs']['key1'])
-
- def test_update_item(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
- body = {"key1": "value1"}
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key1')
- res_dict = self.controller.update(req, 1, 'key1', body)
-
- self.assertEqual('value1', res_dict['key1'])
-
- def test_update_item_too_many_keys(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
- body = {"key1": "value1", "key2": "value2"}
-
- req = fakes.HTTPRequest.blank(self.api_path + '/key1')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'key1', body)
-
- def test_update_item_body_uri_mismatch(self):
- self.stubs.Set(nova.db,
- 'volume_type_extra_specs_update_or_create',
- return_create_volume_type_extra_specs)
- body = {"key1": "value1"}
-
- req = fakes.HTTPRequest.blank(self.api_path + '/bad')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'bad', body)
-
-
-class VolumeTypeExtraSpecsSerializerTest(test.TestCase):
- def test_index_create_serializer(self):
- serializer = types_extra_specs.VolumeTypeExtraSpecsTemplate()
-
- # Just getting some input data
- extra_specs = stub_volume_type_extra_specs()
- text = serializer.serialize(dict(extra_specs=extra_specs))
-
- print text
- tree = etree.fromstring(text)
-
- self.assertEqual('extra_specs', tree.tag)
- self.assertEqual(len(extra_specs), len(tree))
- seen = set(extra_specs.keys())
- for child in tree:
- self.assertTrue(child.tag in seen)
- self.assertEqual(extra_specs[child.tag], child.text)
- seen.remove(child.tag)
- self.assertEqual(len(seen), 0)
-
- def test_update_show_serializer(self):
- serializer = types_extra_specs.VolumeTypeExtraSpecTemplate()
-
- exemplar = dict(key1='value1')
- text = serializer.serialize(exemplar)
-
- print text
- tree = etree.fromstring(text)
-
- self.assertEqual('key1', tree.tag)
- self.assertEqual('value1', tree.text)
- self.assertEqual(0, len(tree))
-
-
-class VolumeTypeExtraSpecsUnprocessableEntityTestCase(test.TestCase):
-
- """
- Tests of places we throw 422 Unprocessable Entity from
- """
-
- def setUp(self):
- super(VolumeTypeExtraSpecsUnprocessableEntityTestCase, self).setUp()
- self.controller = types_extra_specs.VolumeTypeExtraSpecsController()
-
- def _unprocessable_extra_specs_create(self, body):
- req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs')
- req.method = 'POST'
-
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.controller.create, req, '1', body)
-
- def test_create_no_body(self):
- self._unprocessable_extra_specs_create(body=None)
-
- def test_create_missing_volume(self):
- body = {'foo': {'a': 'b'}}
- self._unprocessable_extra_specs_create(body=body)
-
- def test_create_malformed_entity(self):
- body = {'extra_specs': 'string'}
- self._unprocessable_extra_specs_create(body=body)
-
- def _unprocessable_extra_specs_update(self, body):
- req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs')
- req.method = 'POST'
-
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.controller.update, req, '1', body)
-
- def test_update_no_body(self):
- self._unprocessable_extra_specs_update(body=None)
-
- def test_update_empty_body(self):
- self._unprocessable_extra_specs_update(body={})
diff --git a/nova/tests/api/openstack/volume/contrib/test_types_manage.py b/nova/tests/api/openstack/volume/contrib/test_types_manage.py
deleted file mode 100644
index f690381442..0000000000
--- a/nova/tests/api/openstack/volume/contrib/test_types_manage.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-
-from nova.api.openstack.volume.contrib import types_manage
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.volume import volume_types
-
-
-def stub_volume_type(id):
- specs = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- "key4": "value4",
- "key5": "value5"}
- return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
-
-
-def return_volume_types_get_volume_type(context, id):
- if id == "777":
- raise exception.VolumeTypeNotFound(volume_type_id=id)
- return stub_volume_type(int(id))
-
-
-def return_volume_types_destroy(context, name):
- if name == "777":
- raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
- pass
-
-
-def return_volume_types_create(context, name, specs):
- pass
-
-
-def return_volume_types_get_by_name(context, name):
- if name == "777":
- raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
- return stub_volume_type(int(name.split("_")[2]))
-
-
-class VolumeTypesManageApiTest(test.TestCase):
- def setUp(self):
- super(VolumeTypesManageApiTest, self).setUp()
- self.controller = types_manage.VolumeTypesManageController()
-
- def test_volume_types_delete(self):
- self.stubs.Set(volume_types, 'get_volume_type',
- return_volume_types_get_volume_type)
- self.stubs.Set(volume_types, 'destroy',
- return_volume_types_destroy)
-
- req = fakes.HTTPRequest.blank('/v1/fake/types/1')
- self.controller._delete(req, 1)
-
- def test_volume_types_delete_not_found(self):
- self.stubs.Set(volume_types, 'get_volume_type',
- return_volume_types_get_volume_type)
- self.stubs.Set(volume_types, 'destroy',
- return_volume_types_destroy)
-
- req = fakes.HTTPRequest.blank('/v1/fake/types/777')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete,
- req, '777')
-
- def test_create(self):
- self.stubs.Set(volume_types, 'create',
- return_volume_types_create)
- self.stubs.Set(volume_types, 'get_volume_type_by_name',
- return_volume_types_get_by_name)
-
- body = {"volume_type": {"name": "vol_type_1",
- "extra_specs": {"key1": "value1"}}}
- req = fakes.HTTPRequest.blank('/v1/fake/types')
- res_dict = self.controller._create(req, body)
-
- self.assertEqual(1, len(res_dict))
- self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
-
-
-class VolumeTypesUnprocessableEntityTestCase(test.TestCase):
-
- """
- Tests of places we throw 422 Unprocessable Entity from
- """
-
- def setUp(self):
- super(VolumeTypesUnprocessableEntityTestCase, self).setUp()
- self.controller = types_manage.VolumeTypesManageController()
-
- def _unprocessable_volume_type_create(self, body):
- req = fakes.HTTPRequest.blank('/v2/fake/types')
- req.method = 'POST'
-
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.controller._create, req, body)
-
- def test_create_no_body(self):
- self._unprocessable_volume_type_create(body=None)
-
- def test_create_missing_volume(self):
- body = {'foo': {'a': 'b'}}
- self._unprocessable_volume_type_create(body=body)
-
- def test_create_malformed_entity(self):
- body = {'volume_type': 'string'}
- self._unprocessable_volume_type_create(body=body)
diff --git a/nova/tests/api/openstack/volume/contrib/test_volume_actions.py b/nova/tests/api/openstack/volume/contrib/test_volume_actions.py
deleted file mode 100644
index 4dd79d3660..0000000000
--- a/nova/tests/api/openstack/volume/contrib/test_volume_actions.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import webob
-
-from nova.api.openstack.volume.contrib import volume_actions
-from nova import exception
-from nova.openstack.common.rpc import common as rpc_common
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.volume import api as volume_api
-
-
-def stub_volume_get(self, context, volume_id):
- volume = fakes.stub_volume(volume_id)
- if volume_id == 5:
- volume['status'] = 'in-use'
- else:
- volume['status'] = 'available'
- return volume
-
-
-def stub_upload_volume_to_image_service(self, context, volume, metadata,
- force):
- ret = {"id": volume['id'],
- "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1),
- "status": 'uploading',
- "display_description": volume['display_description'],
- "size": volume['size'],
- "volume_type": volume['volume_type'],
- "image_id": 1,
- "container_format": 'bare',
- "disk_format": 'raw',
- "image_name": 'image_name'}
- return ret
-
-
-class VolumeImageActionsTest(test.TestCase):
- def setUp(self):
- super(VolumeImageActionsTest, self).setUp()
- self.controller = volume_actions.VolumeActionsController()
-
- self.stubs.Set(volume_api.API, 'get', stub_volume_get)
-
- def test_copy_volume_to_image(self):
- self.stubs.Set(volume_api.API,
- "copy_volume_to_image",
- stub_upload_volume_to_image_service)
-
- id = 1
- vol = {"container_format": 'bare',
- "disk_format": 'raw',
- "image_name": 'image_name',
- "force": True}
- body = {"os-volume_upload_image": vol}
- req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
- res_dict = self.controller._volume_upload_image(req, id, body)
- expected = {'os-volume_upload_image': {'id': id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'status': 'uploading',
- 'display_description': 'displaydesc',
- 'size': 1,
- 'volume_type': {'name': 'vol_type_name'},
- 'image_id': 1,
- 'container_format': 'bare',
- 'disk_format': 'raw',
- 'image_name': 'image_name'}}
- self.assertDictMatch(res_dict, expected)
-
- def test_copy_volume_to_image_volumenotfound(self):
- def stub_volume_get_raise_exc(self, context, volume_id):
- raise exception.VolumeNotFound(volume_id=volume_id)
-
- self.stubs.Set(volume_api.API, 'get', stub_volume_get_raise_exc)
-
- id = 1
- vol = {"container_format": 'bare',
- "disk_format": 'raw',
- "image_name": 'image_name',
- "force": True}
- body = {"os-volume_upload_image": vol}
- req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._volume_upload_image,
- req,
- id,
- body)
-
- def test_copy_volume_to_image_invalidvolume(self):
- def stub_upload_volume_to_image_service_raise(self, context, volume,
- metadata, force):
- raise exception.InvalidVolume
- self.stubs.Set(volume_api.API,
- "copy_volume_to_image",
- stub_upload_volume_to_image_service_raise)
-
- id = 1
- vol = {"container_format": 'bare',
- "disk_format": 'raw',
- "image_name": 'image_name',
- "force": True}
- body = {"os-volume_upload_image": vol}
- req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._volume_upload_image,
- req,
- id,
- body)
-
- def test_copy_volume_to_image_valueerror(self):
- def stub_upload_volume_to_image_service_raise(self, context, volume,
- metadata, force):
- raise ValueError
- self.stubs.Set(volume_api.API,
- "copy_volume_to_image",
- stub_upload_volume_to_image_service_raise)
-
- id = 1
- vol = {"container_format": 'bare',
- "disk_format": 'raw',
- "image_name": 'image_name',
- "force": True}
- body = {"os-volume_upload_image": vol}
- req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._volume_upload_image,
- req,
- id,
- body)
-
- def test_copy_volume_to_image_remoteerror(self):
- def stub_upload_volume_to_image_service_raise(self, context, volume,
- metadata, force):
- raise rpc_common.RemoteError
- self.stubs.Set(volume_api.API,
- "copy_volume_to_image",
- stub_upload_volume_to_image_service_raise)
-
- id = 1
- vol = {"container_format": 'bare',
- "disk_format": 'raw',
- "image_name": 'image_name',
- "force": True}
- body = {"os-volume_upload_image": vol}
- req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._volume_upload_image,
- req,
- id,
- body)
diff --git a/nova/tests/api/openstack/volume/extensions/__init__.py b/nova/tests/api/openstack/volume/extensions/__init__.py
deleted file mode 100644
index 848908a953..0000000000
--- a/nova/tests/api/openstack/volume/extensions/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/nova/tests/api/openstack/volume/extensions/foxinsocks.py b/nova/tests/api/openstack/volume/extensions/foxinsocks.py
deleted file mode 100644
index cf901472c6..0000000000
--- a/nova/tests/api/openstack/volume/extensions/foxinsocks.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob.exc
-
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-
-
-class FoxInSocksController(object):
-
- def index(self, req):
- return "Try to say this Mr. Knox, sir..."
-
-
-class FoxInSocksServerControllerExtension(wsgi.Controller):
- @wsgi.action('add_tweedle')
- def _add_tweedle(self, req, id, body):
-
- return "Tweedle Beetle Added."
-
- @wsgi.action('delete_tweedle')
- def _delete_tweedle(self, req, id, body):
-
- return "Tweedle Beetle Deleted."
-
- @wsgi.action('fail')
- def _fail(self, req, id, body):
-
- raise webob.exc.HTTPBadRequest(explanation='Tweedle fail')
-
-
-class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller):
- @wsgi.extends
- def show(self, req, resp_obj, id):
- #NOTE: This only handles JSON responses.
- # You can use content type header to test for XML.
- resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
-
-
-class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller):
- @wsgi.extends
- def show(self, req, resp_obj, id):
- #NOTE: This only handles JSON responses.
- # You can use content type header to test for XML.
- resp_obj.obj['big_bands'] = 'Pig Bands!'
-
-
-class Foxinsocks(extensions.ExtensionDescriptor):
- """The Fox In Socks Extension"""
-
- name = "Fox In Socks"
- alias = "FOXNSOX"
- namespace = "http://www.fox.in.socks/api/ext/pie/v1.0"
- updated = "2011-01-22T13:25:27-06:00"
-
- def __init__(self, ext_mgr):
- ext_mgr.register(self)
-
- def get_resources(self):
- resources = []
- resource = extensions.ResourceExtension('foxnsocks',
- FoxInSocksController())
- resources.append(resource)
- return resources
-
- def get_controller_extensions(self):
- extension_list = []
-
- extension_set = [
- (FoxInSocksServerControllerExtension, 'servers'),
- (FoxInSocksFlavorGooseControllerExtension, 'flavors'),
- (FoxInSocksFlavorBandsControllerExtension, 'flavors'),
- ]
- for klass, collection in extension_set:
- controller = klass()
- ext = extensions.ControllerExtension(self, collection, controller)
- extension_list.append(ext)
-
- return extension_list
diff --git a/nova/tests/api/openstack/volume/test_extensions.py b/nova/tests/api/openstack/volume/test_extensions.py
deleted file mode 100644
index e291e74ddf..0000000000
--- a/nova/tests/api/openstack/volume/test_extensions.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import iso8601
-from lxml import etree
-import webob
-
-from nova.api.openstack import volume
-from nova.api.openstack import xmlutil
-from nova import flags
-from nova.openstack.common import jsonutils
-from nova import test
-
-FLAGS = flags.FLAGS
-NS = "{http://docs.openstack.org/common/api/v1.0}"
-
-
-class ExtensionTestCase(test.TestCase):
- def setUp(self):
- super(ExtensionTestCase, self).setUp()
- ext_list = FLAGS.osapi_volume_extension[:]
- fox = ('nova.tests.api.openstack.volume.extensions.'
- 'foxinsocks.Foxinsocks')
- if fox not in ext_list:
- ext_list.append(fox)
- self.flags(osapi_volume_extension=ext_list)
-
-
-class ExtensionControllerTest(ExtensionTestCase):
-
- def setUp(self):
- super(ExtensionControllerTest, self).setUp()
- self.ext_list = [
- "TypesManage",
- "TypesExtraSpecs",
- ]
- self.ext_list.sort()
-
- def test_list_extensions_json(self):
- app = volume.APIRouter()
- request = webob.Request.blank("/fake/extensions")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
-
- # Make sure we have all the extensions, extra extensions being OK.
- data = jsonutils.loads(response.body)
- names = [str(x['name']) for x in data['extensions']
- if str(x['name']) in self.ext_list]
- names.sort()
- self.assertEqual(names, self.ext_list)
-
- # Ensure all the timestamps are valid according to iso8601
- for ext in data['extensions']:
- iso8601.parse_date(ext['updated'])
-
- # Make sure that at least Fox in Sox is correct.
- (fox_ext, ) = [
- x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
- self.assertEqual(fox_ext, {
- 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
- 'name': 'Fox In Socks',
- 'updated': '2011-01-22T13:25:27-06:00',
- 'description': 'The Fox In Socks Extension',
- 'alias': 'FOXNSOX',
- 'links': []
- },
- )
-
- for ext in data['extensions']:
- url = '/fake/extensions/%s' % ext['alias']
- request = webob.Request.blank(url)
- response = request.get_response(app)
- output = jsonutils.loads(response.body)
- self.assertEqual(output['extension']['alias'], ext['alias'])
-
- def test_get_extension_json(self):
- app = volume.APIRouter()
- request = webob.Request.blank("/fake/extensions/FOXNSOX")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
-
- data = jsonutils.loads(response.body)
- self.assertEqual(data['extension'], {
- "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
- "name": "Fox In Socks",
- "updated": "2011-01-22T13:25:27-06:00",
- "description": "The Fox In Socks Extension",
- "alias": "FOXNSOX",
- "links": []})
-
- def test_get_non_existing_extension_json(self):
- app = volume.APIRouter()
- request = webob.Request.blank("/fake/extensions/4")
- response = request.get_response(app)
- self.assertEqual(404, response.status_int)
-
- def test_list_extensions_xml(self):
- app = volume.APIRouter()
- request = webob.Request.blank("/fake/extensions")
- request.accept = "application/xml"
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
-
- root = etree.XML(response.body)
- self.assertEqual(root.tag.split('extensions')[0], NS)
-
- # Make sure we have all the extensions, extras extensions being OK.
- exts = root.findall('{0}extension'.format(NS))
- self.assert_(len(exts) >= len(self.ext_list))
-
- # Make sure that at least Fox in Sox is correct.
- (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
- self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
- self.assertEqual(fox_ext.get('namespace'),
- 'http://www.fox.in.socks/api/ext/pie/v1.0')
- self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
- self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
- 'The Fox In Socks Extension')
-
- xmlutil.validate_schema(root, 'extensions')
-
- def test_get_extension_xml(self):
- app = volume.APIRouter()
- request = webob.Request.blank("/fake/extensions/FOXNSOX")
- request.accept = "application/xml"
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- xml = response.body
-
- root = etree.XML(xml)
- self.assertEqual(root.tag.split('extension')[0], NS)
- self.assertEqual(root.get('alias'), 'FOXNSOX')
- self.assertEqual(root.get('name'), 'Fox In Socks')
- self.assertEqual(root.get('namespace'),
- 'http://www.fox.in.socks/api/ext/pie/v1.0')
- self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
- self.assertEqual(root.findtext('{0}description'.format(NS)),
- 'The Fox In Socks Extension')
-
- xmlutil.validate_schema(root, 'extension')
diff --git a/nova/tests/api/openstack/volume/test_router.py b/nova/tests/api/openstack/volume/test_router.py
deleted file mode 100644
index 5934a21e57..0000000000
--- a/nova/tests/api/openstack/volume/test_router.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2011 Denali Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from nova.api.openstack import volume
-from nova.api.openstack.volume import snapshots
-from nova.api.openstack.volume import versions
-from nova.api.openstack.volume import volumes
-from nova.api.openstack import wsgi
-from nova import flags
-from nova.openstack.common import log as logging
-from nova import test
-from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-LOG = logging.getLogger(__name__)
-
-
-class FakeController(object):
- def __init__(self, ext_mgr=None):
- self.ext_mgr = ext_mgr
-
- def index(self, req):
- return {}
-
- def detail(self, req):
- return {}
-
-
-def create_resource(ext_mgr):
- return wsgi.Resource(FakeController(ext_mgr))
-
-
-def create_volume_resource(ext_mgr):
- return wsgi.Resource(FakeController(ext_mgr))
-
-
-class VolumeRouterTestCase(test.TestCase):
- def setUp(self):
- super(VolumeRouterTestCase, self).setUp()
- # NOTE(vish): versions is just returning text so, no need to stub.
- self.stubs.Set(snapshots, 'create_resource', create_resource)
- self.stubs.Set(volumes, 'create_resource', create_volume_resource)
- self.app = volume.APIRouter()
-
- def test_versions(self):
- req = fakes.HTTPRequest.blank('')
- req.method = 'GET'
- req.content_type = 'application/json'
- response = req.get_response(self.app)
- self.assertEqual(302, response.status_int)
- req = fakes.HTTPRequest.blank('/')
- req.method = 'GET'
- req.content_type = 'application/json'
- response = req.get_response(self.app)
- self.assertEqual(200, response.status_int)
-
- def test_versions_dispatch(self):
- req = fakes.HTTPRequest.blank('/')
- req.method = 'GET'
- req.content_type = 'application/json'
- resource = versions.Versions()
- result = resource.dispatch(resource.index, req, {})
- self.assertTrue(result)
-
- def test_volumes(self):
- req = fakes.HTTPRequest.blank('/fake/volumes')
- req.method = 'GET'
- req.content_type = 'application/json'
- response = req.get_response(self.app)
- self.assertEqual(200, response.status_int)
-
- def test_volumes_detail(self):
- req = fakes.HTTPRequest.blank('/fake/volumes/detail')
- req.method = 'GET'
- req.content_type = 'application/json'
- response = req.get_response(self.app)
- self.assertEqual(200, response.status_int)
-
- def test_types(self):
- req = fakes.HTTPRequest.blank('/fake/types')
- req.method = 'GET'
- req.content_type = 'application/json'
- response = req.get_response(self.app)
- self.assertEqual(200, response.status_int)
-
- def test_snapshots(self):
- req = fakes.HTTPRequest.blank('/fake/snapshots')
- req.method = 'GET'
- req.content_type = 'application/json'
- response = req.get_response(self.app)
- self.assertEqual(200, response.status_int)
-
- def test_snapshots_detail(self):
- req = fakes.HTTPRequest.blank('/fake/snapshots/detail')
- req.method = 'GET'
- req.content_type = 'application/json'
- response = req.get_response(self.app)
- self.assertEqual(200, response.status_int)
diff --git a/nova/tests/api/openstack/volume/test_snapshots.py b/nova/tests/api/openstack/volume/test_snapshots.py
deleted file mode 100644
index c6e703f83c..0000000000
--- a/nova/tests/api/openstack/volume/test_snapshots.py
+++ /dev/null
@@ -1,299 +0,0 @@
-# Copyright 2011 Denali Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import webob
-
-from nova.api.openstack.volume import snapshots
-from nova import db
-from nova import exception
-from nova import flags
-from nova.openstack.common import log as logging
-from nova.openstack.common import timeutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova import volume
-
-FLAGS = flags.FLAGS
-
-LOG = logging.getLogger(__name__)
-
-
-def _get_default_snapshot_param():
- return {
- 'id': 123,
- 'volume_id': 12,
- 'status': 'available',
- 'volume_size': 100,
- 'created_at': None,
- 'display_name': 'Default name',
- 'display_description': 'Default description',
- }
-
-
-def stub_snapshot_create(self, context, volume_id, name, description):
- snapshot = _get_default_snapshot_param()
- snapshot['volume_id'] = volume_id
- snapshot['display_name'] = name
- snapshot['display_description'] = description
- return snapshot
-
-
-def stub_snapshot_delete(self, context, snapshot):
- if snapshot['id'] != 123:
- raise exception.NotFound
-
-
-def stub_snapshot_get(self, context, snapshot_id):
- if snapshot_id != 123:
- raise exception.NotFound
-
- param = _get_default_snapshot_param()
- return param
-
-
-def stub_snapshot_get_all(self, context, search_opts=None):
- param = _get_default_snapshot_param()
- return [param]
-
-
-class SnapshotApiTest(test.TestCase):
- def setUp(self):
- super(SnapshotApiTest, self).setUp()
- self.controller = snapshots.SnapshotsController()
-
- self.stubs.Set(db, 'snapshot_get_all_by_project',
- fakes.stub_snapshot_get_all_by_project)
- self.stubs.Set(db, 'snapshot_get_all',
- fakes.stub_snapshot_get_all)
-
- def test_snapshot_create(self):
- self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
- self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
- snapshot = {"volume_id": '12',
- "force": False,
- "display_name": "Snapshot Test Name",
- "display_description": "Snapshot Test Desc"}
- body = dict(snapshot=snapshot)
- req = fakes.HTTPRequest.blank('/v1/snapshots')
- resp_dict = self.controller.create(req, body)
-
- self.assertTrue('snapshot' in resp_dict)
- self.assertEqual(resp_dict['snapshot']['display_name'],
- snapshot['display_name'])
- self.assertEqual(resp_dict['snapshot']['display_description'],
- snapshot['display_description'])
-
- def test_snapshot_create_force(self):
- self.stubs.Set(volume.api.API, "create_snapshot_force",
- stub_snapshot_create)
- self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
- snapshot = {"volume_id": '12',
- "force": True,
- "display_name": "Snapshot Test Name",
- "display_description": "Snapshot Test Desc"}
- body = dict(snapshot=snapshot)
- req = fakes.HTTPRequest.blank('/v1/snapshots')
- resp_dict = self.controller.create(req, body)
-
- self.assertTrue('snapshot' in resp_dict)
- self.assertEqual(resp_dict['snapshot']['display_name'],
- snapshot['display_name'])
- self.assertEqual(resp_dict['snapshot']['display_description'],
- snapshot['display_description'])
-
- # Test invalid force paramter
- snapshot = {"volume_id": 12,
- "force": '**&&^^%%$$##@@'}
- body = dict(snapshot=snapshot)
- req = fakes.HTTPRequest.blank('/v1/snapshots')
- self.assertRaises(exception.InvalidParameterValue,
- self.controller.create,
- req,
- body)
-
- def test_snapshot_create_nonexistent_volume_id(self):
- self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get_notfound)
-
- snapshot = {"volume_id": 13,
- "force": False,
- "display_name": "Snapshot Test Name",
- "display_description": "Snapshot Test Desc"}
- body = dict(snapshot=snapshot)
- req = fakes.HTTPRequest.blank('/v1/snapshots')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.create,
- req,
- body)
-
- def test_snapshot_delete(self):
- self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
- self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
-
- snapshot_id = 123
- req = fakes.HTTPRequest.blank('/v1/snapshots/%d' % snapshot_id)
- resp = self.controller.delete(req, snapshot_id)
- self.assertEqual(resp.status_int, 202)
-
- def test_snapshot_delete_invalid_id(self):
- self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
- snapshot_id = 234
- req = fakes.HTTPRequest.blank('/v1/snapshots/%d' % snapshot_id)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete,
- req,
- snapshot_id)
-
- def test_snapshot_show(self):
- self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
- req = fakes.HTTPRequest.blank('/v1/snapshots/123')
- resp_dict = self.controller.show(req, 123)
-
- self.assertTrue('snapshot' in resp_dict)
- self.assertEqual(resp_dict['snapshot']['id'], '123')
-
- def test_snapshot_show_invalid_id(self):
- snapshot_id = 234
- req = fakes.HTTPRequest.blank('/v1/snapshots/%d' % snapshot_id)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show,
- req,
- snapshot_id)
-
- def test_snapshot_detail(self):
- self.stubs.Set(volume.api.API, "get_all_snapshots",
- stub_snapshot_get_all)
- req = fakes.HTTPRequest.blank('/v1/snapshots/detail')
- resp_dict = self.controller.detail(req)
-
- self.assertTrue('snapshots' in resp_dict)
- resp_snapshots = resp_dict['snapshots']
- self.assertEqual(len(resp_snapshots), 1)
-
- resp_snapshot = resp_snapshots.pop()
- self.assertEqual(resp_snapshot['id'], '123')
-
- def test_admin_list_snapshots_limited_to_project(self):
- req = fakes.HTTPRequest.blank('/v1/fake/snapshots',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertTrue('snapshots' in res)
- self.assertEqual(1, len(res['snapshots']))
-
- def test_admin_list_snapshots_all_tenants(self):
- req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1',
- use_admin_context=True)
- res = self.controller.index(req)
- self.assertTrue('snapshots' in res)
- self.assertEqual(3, len(res['snapshots']))
-
- def test_all_tenants_non_admin_gets_all_tenants(self):
- req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1')
- res = self.controller.index(req)
- self.assertTrue('snapshots' in res)
- self.assertEqual(1, len(res['snapshots']))
-
- def test_non_admin_get_by_project(self):
- req = fakes.HTTPRequest.blank('/v1/fake/snapshots')
- res = self.controller.index(req)
- self.assertTrue('snapshots' in res)
- self.assertEqual(1, len(res['snapshots']))
-
-
-class SnapshotSerializerTest(test.TestCase):
- def _verify_snapshot(self, snap, tree):
- self.assertEqual(tree.tag, 'snapshot')
-
- for attr in ('id', 'status', 'size', 'created_at',
- 'display_name', 'display_description', 'volume_id'):
- self.assertEqual(str(snap[attr]), tree.get(attr))
-
- def test_snapshot_show_create_serializer(self):
- serializer = snapshots.SnapshotTemplate()
- raw_snapshot = dict(
- id='snap_id',
- status='snap_status',
- size=1024,
- created_at=timeutils.utcnow(),
- display_name='snap_name',
- display_description='snap_desc',
- volume_id='vol_id',
- )
- text = serializer.serialize(dict(snapshot=raw_snapshot))
-
- print text
- tree = etree.fromstring(text)
-
- self._verify_snapshot(raw_snapshot, tree)
-
- def test_snapshot_index_detail_serializer(self):
- serializer = snapshots.SnapshotsTemplate()
- raw_snapshots = [dict(
- id='snap1_id',
- status='snap1_status',
- size=1024,
- created_at=timeutils.utcnow(),
- display_name='snap1_name',
- display_description='snap1_desc',
- volume_id='vol1_id',
- ),
- dict(
- id='snap2_id',
- status='snap2_status',
- size=1024,
- created_at=timeutils.utcnow(),
- display_name='snap2_name',
- display_description='snap2_desc',
- volume_id='vol2_id',
- )]
- text = serializer.serialize(dict(snapshots=raw_snapshots))
-
- print text
- tree = etree.fromstring(text)
-
- self.assertEqual('snapshots', tree.tag)
- self.assertEqual(len(raw_snapshots), len(tree))
- for idx, child in enumerate(tree):
- self._verify_snapshot(raw_snapshots[idx], child)
-
-
-class SnapshotsUnprocessableEntityTestCase(test.TestCase):
-
- """
- Tests of places we throw 422 Unprocessable Entity from
- """
-
- def setUp(self):
- super(SnapshotsUnprocessableEntityTestCase, self).setUp()
- self.controller = snapshots.SnapshotsController()
-
- def _unprocessable_snapshot_create(self, body):
- req = fakes.HTTPRequest.blank('/v2/fake/snapshots')
- req.method = 'POST'
-
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.controller.create, req, body)
-
- def test_create_no_body(self):
- self._unprocessable_snapshot_create(body=None)
-
- def test_create_missing_snapshot(self):
- body = {'foo': {'a': 'b'}}
- self._unprocessable_snapshot_create(body=body)
-
- def test_create_malformed_entity(self):
- body = {'snapshot': 'string'}
- self._unprocessable_snapshot_create(body=body)
diff --git a/nova/tests/api/openstack/volume/test_types.py b/nova/tests/api/openstack/volume/test_types.py
deleted file mode 100644
index e7aa4cc0ab..0000000000
--- a/nova/tests/api/openstack/volume/test_types.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# aLL Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import webob
-
-from nova.api.openstack.volume import types
-from nova.api.openstack.volume.views import types as views_types
-from nova import exception
-from nova.openstack.common import timeutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.volume import volume_types
-
-
-def stub_volume_type(id):
- specs = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- "key4": "value4",
- "key5": "value5"}
- return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
-
-
-def return_volume_types_get_all_types(context):
- return dict(vol_type_1=stub_volume_type(1),
- vol_type_2=stub_volume_type(2),
- vol_type_3=stub_volume_type(3))
-
-
-def return_empty_volume_types_get_all_types(context):
- return {}
-
-
-def return_volume_types_get_volume_type(context, id):
- if id == "777":
- raise exception.VolumeTypeNotFound(volume_type_id=id)
- return stub_volume_type(int(id))
-
-
-def return_volume_types_get_by_name(context, name):
- if name == "777":
- raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
- return stub_volume_type(int(name.split("_")[2]))
-
-
-class VolumeTypesApiTest(test.TestCase):
- def setUp(self):
- super(VolumeTypesApiTest, self).setUp()
- self.controller = types.VolumeTypesController()
-
- def test_volume_types_index(self):
- self.stubs.Set(volume_types, 'get_all_types',
- return_volume_types_get_all_types)
-
- req = fakes.HTTPRequest.blank('/v1/fake/types')
- res_dict = self.controller.index(req)
-
- self.assertEqual(3, len(res_dict['volume_types']))
-
- expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3']
- actual_names = map(lambda e: e['name'], res_dict['volume_types'])
- self.assertEqual(set(actual_names), set(expected_names))
- for entry in res_dict['volume_types']:
- self.assertEqual('value1', entry['extra_specs']['key1'])
-
- def test_volume_types_index_no_data(self):
- self.stubs.Set(volume_types, 'get_all_types',
- return_empty_volume_types_get_all_types)
-
- req = fakes.HTTPRequest.blank('/v1/fake/types')
- res_dict = self.controller.index(req)
-
- self.assertEqual(0, len(res_dict['volume_types']))
-
- def test_volume_types_show(self):
- self.stubs.Set(volume_types, 'get_volume_type',
- return_volume_types_get_volume_type)
-
- req = fakes.HTTPRequest.blank('/v1/fake/types/1')
- res_dict = self.controller.show(req, 1)
-
- self.assertEqual(1, len(res_dict))
- self.assertEqual('1', res_dict['volume_type']['id'])
- self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
-
- def test_volume_types_show_not_found(self):
- self.stubs.Set(volume_types, 'get_volume_type',
- return_volume_types_get_volume_type)
-
- req = fakes.HTTPRequest.blank('/v1/fake/types/777')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, '777')
-
- def test_view_builder_show(self):
- view_builder = views_types.ViewBuilder()
-
- now = timeutils.isotime()
- raw_volume_type = dict(name='new_type',
- deleted=False,
- created_at=now,
- updated_at=now,
- extra_specs={},
- deleted_at=None,
- id=42)
-
- request = fakes.HTTPRequest.blank("/v1")
- output = view_builder.show(request, raw_volume_type)
-
- self.assertTrue('volume_type' in output)
- expected_volume_type = dict(name='new_type',
- extra_specs={},
- id=42)
- self.assertDictMatch(output['volume_type'], expected_volume_type)
-
- def test_view_builder_list(self):
- view_builder = views_types.ViewBuilder()
-
- now = timeutils.isotime()
- raw_volume_types = []
- for i in range(0, 10):
- raw_volume_types.append(dict(name='new_type',
- deleted=False,
- created_at=now,
- updated_at=now,
- extra_specs={},
- deleted_at=None,
- id=42 + i))
-
- request = fakes.HTTPRequest.blank("/v1")
- output = view_builder.index(request, raw_volume_types)
-
- self.assertTrue('volume_types' in output)
- for i in range(0, 10):
- expected_volume_type = dict(name='new_type',
- extra_specs={},
- id=42 + i)
- self.assertDictMatch(output['volume_types'][i],
- expected_volume_type)
-
-
-class VolumeTypesSerializerTest(test.TestCase):
- def _verify_volume_type(self, vtype, tree):
- self.assertEqual('volume_type', tree.tag)
- self.assertEqual(vtype['name'], tree.get('name'))
- self.assertEqual(str(vtype['id']), tree.get('id'))
- self.assertEqual(1, len(tree))
- extra_specs = tree[0]
- self.assertEqual('extra_specs', extra_specs.tag)
- seen = set(vtype['extra_specs'].keys())
- for child in extra_specs:
- self.assertTrue(child.tag in seen)
- self.assertEqual(vtype['extra_specs'][child.tag], child.text)
- seen.remove(child.tag)
- self.assertEqual(len(seen), 0)
-
- def test_index_serializer(self):
- serializer = types.VolumeTypesTemplate()
-
- # Just getting some input data
- vtypes = return_volume_types_get_all_types(None)
- text = serializer.serialize({'volume_types': vtypes.values()})
-
- tree = etree.fromstring(text)
-
- self.assertEqual('volume_types', tree.tag)
- self.assertEqual(len(vtypes), len(tree))
- for child in tree:
- name = child.get('name')
- self.assertTrue(name in vtypes)
- self._verify_volume_type(vtypes[name], child)
-
- def test_voltype_serializer(self):
- serializer = types.VolumeTypeTemplate()
-
- vtype = stub_volume_type(1)
- text = serializer.serialize(dict(volume_type=vtype))
-
- tree = etree.fromstring(text)
-
- self._verify_volume_type(vtype, tree)
diff --git a/nova/tests/api/openstack/volume/test_volumes.py b/nova/tests/api/openstack/volume/test_volumes.py
deleted file mode 100644
index e7a0e66f30..0000000000
--- a/nova/tests/api/openstack/volume/test_volumes.py
+++ /dev/null
@@ -1,602 +0,0 @@
-# Copyright 2013 Josh Durgin
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from lxml import etree
-import webob
-
-from nova.api.openstack.volume import extensions
-from nova.api.openstack.volume import volumes
-from nova import db
-from nova import exception
-from nova import flags
-from nova.openstack.common import timeutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.image import fake as fake_image
-from nova.volume import api as volume_api
-
-
-FLAGS = flags.FLAGS
-
-TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001'
-
-
-def stub_snapshot_get(self, context, snapshot_id):
- if snapshot_id != TEST_SNAPSHOT_UUID:
- raise exception.NotFound
-
- return {
- 'id': snapshot_id,
- 'volume_id': 12,
- 'status': 'available',
- 'volume_size': 100,
- 'created_at': None,
- 'display_name': 'Default name',
- 'display_description': 'Default description',
- }
-
-
-class VolumeApiTest(test.TestCase):
- def setUp(self):
- super(VolumeApiTest, self).setUp()
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = volumes.VolumeController(self.ext_mgr)
-
- self.stubs.Set(db, 'volume_get_all', fakes.stub_volume_get_all)
- self.stubs.Set(db, 'volume_get_all_by_project',
- fakes.stub_volume_get_all_by_project)
- self.stubs.Set(volume_api.API, 'get', fakes.stub_volume_get)
- self.stubs.Set(volume_api.API, 'delete', fakes.stub_volume_delete)
-
- def test_volume_create(self):
- self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
-
- vol = {"size": 100,
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "zone1:host1"}
- body = {"volume": vol}
- req = fakes.HTTPRequest.blank('/v1/volumes')
- res = self.controller.create(req, body)
- expected = {'volume': {'status': 'fakestatus',
- 'display_description': 'Volume Test Desc',
- 'availability_zone': 'zone1:host1',
- 'display_name': 'Volume Test Name',
- 'attachments': [{'device': '/',
- 'server_id': 'fakeuuid',
- 'id': '1',
- 'volume_id': '1'}],
- 'volume_type': 'vol_type_name',
- 'snapshot_id': None,
- 'metadata': {},
- 'id': '1',
- 'created_at': datetime.datetime(1999, 1, 1,
- 1, 1, 1),
- 'size': 100}}
- self.assertEqual(res.obj, expected)
- self.assertEqual(res.code, 200)
- self.assertTrue('location' in res.headers)
-
- def test_volume_creation_fails_with_bad_size(self):
- vol = {"size": '',
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "zone1:host1"}
- body = {"volume": vol}
- req = fakes.HTTPRequest.blank('/v1/volumes')
- self.assertRaises(exception.InvalidInput,
- self.controller.create,
- req,
- body)
-
- def test_volume_create_no_body(self):
- body = {}
- req = fakes.HTTPRequest.blank('/v1/volumes')
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.controller.create,
- req,
- body)
-
- def test_volume_create_with_image_id(self):
- self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
- self.ext_mgr.extensions = {'os-image-create': 'fake'}
- vol = {"size": '1',
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "nova",
- "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
- expected = {'volume': {'status': 'fakestatus',
- 'display_description': 'Volume Test Desc',
- 'availability_zone': 'nova',
- 'display_name': 'Volume Test Name',
- 'attachments': [{'device': '/',
- 'server_id': 'fakeuuid',
- 'id': '1',
- 'volume_id': '1'}],
- 'volume_type': 'vol_type_name',
- 'image_id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
- 'snapshot_id': None,
- 'metadata': {},
- 'id': '1',
- 'created_at': datetime.datetime(1999, 1, 1,
- 1, 1, 1),
- 'size': '1'}
- }
- body = {"volume": vol}
- req = fakes.HTTPRequest.blank('/v1/volumes')
- res = self.controller.create(req, body)
- self.maxDiff = 4096
- self.assertEqual(res.obj, expected)
-
- def test_volume_create_with_image_id_and_snapshot_id(self):
- self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
- self.stubs.Set(volume_api.API, "get_snapshot", stub_snapshot_get)
- self.ext_mgr.extensions = {'os-image-create': 'fake'}
- vol = {"size": '1',
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "nova",
- "imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
- "snapshot_id": TEST_SNAPSHOT_UUID}
- body = {"volume": vol}
- req = fakes.HTTPRequest.blank('/v1/volumes')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- req,
- body)
-
- def test_volume_create_with_image_id_is_integer(self):
- self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
- self.ext_mgr.extensions = {'os-image-create': 'fake'}
- vol = {"size": '1',
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "nova",
- "imageRef": 1234}
- body = {"volume": vol}
- req = fakes.HTTPRequest.blank('/v1/volumes')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- req,
- body)
-
- def test_volume_create_with_image_id_not_uuid_format(self):
- self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
- self.ext_mgr.extensions = {'os-image-create': 'fake'}
- vol = {"size": '1',
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "nova",
- "imageRef": '12345'}
- body = {"volume": vol}
- req = fakes.HTTPRequest.blank('/v1/volumes')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- req,
- body)
-
- def test_volume_list(self):
- self.stubs.Set(volume_api.API, 'get_all',
- fakes.stub_volume_get_all_by_project)
-
- req = fakes.HTTPRequest.blank('/v1/volumes')
- res_dict = self.controller.index(req)
- expected = {'volumes': [{'status': 'fakestatus',
- 'display_description': 'displaydesc',
- 'availability_zone': 'fakeaz',
- 'display_name': 'displayname',
- 'attachments': [{'device': '/',
- 'server_id': 'fakeuuid',
- 'id': '1',
- 'volume_id': '1'}],
- 'volume_type': 'vol_type_name',
- 'snapshot_id': None,
- 'metadata': {},
- 'id': '1',
- 'created_at': datetime.datetime(1999, 1, 1,
- 1, 1, 1),
- 'size': 1}]}
- self.maxDiff = None
- self.assertEqual(res_dict, expected)
-
- def test_volume_list_detail(self):
- self.stubs.Set(volume_api.API, 'get_all',
- fakes.stub_volume_get_all_by_project)
-
- req = fakes.HTTPRequest.blank('/v1/volumes/detail')
- res_dict = self.controller.index(req)
- expected = {'volumes': [{'status': 'fakestatus',
- 'display_description': 'displaydesc',
- 'availability_zone': 'fakeaz',
- 'display_name': 'displayname',
- 'attachments': [{'device': '/',
- 'server_id': 'fakeuuid',
- 'id': '1',
- 'volume_id': '1'}],
- 'volume_type': 'vol_type_name',
- 'snapshot_id': None,
- 'metadata': {},
- 'id': '1',
- 'created_at': datetime.datetime(1999, 1, 1,
- 1, 1, 1),
- 'size': 1}]}
- self.assertEqual(res_dict, expected)
-
- def test_volume_show(self):
- req = fakes.HTTPRequest.blank('/v1/volumes/1')
- res_dict = self.controller.show(req, '1')
- expected = {'volume': {'status': 'fakestatus',
- 'display_description': 'displaydesc',
- 'availability_zone': 'fakeaz',
- 'display_name': 'displayname',
- 'attachments': [{'device': '/',
- 'server_id': 'fakeuuid',
- 'id': '1',
- 'volume_id': '1'}],
- 'volume_type': 'vol_type_name',
- 'snapshot_id': None,
- 'metadata': {},
- 'id': '1',
- 'created_at': datetime.datetime(1999, 1, 1,
- 1, 1, 1),
- 'size': 1}}
- self.assertEqual(res_dict, expected)
-
- def test_volume_show_no_attachments(self):
- def stub_volume_get(self, context, volume_id):
- return fakes.stub_volume(volume_id, attach_status='detached')
-
- self.stubs.Set(volume_api.API, 'get', stub_volume_get)
-
- req = fakes.HTTPRequest.blank('/v1/volumes/1')
- res_dict = self.controller.show(req, '1')
- expected = {'volume': {'status': 'fakestatus',
- 'display_description': 'displaydesc',
- 'availability_zone': 'fakeaz',
- 'display_name': 'displayname',
- 'attachments': [],
- 'volume_type': 'vol_type_name',
- 'snapshot_id': None,
- 'metadata': {},
- 'id': '1',
- 'created_at': datetime.datetime(1999, 1, 1,
- 1, 1, 1),
- 'size': 1}}
- self.assertEqual(res_dict, expected)
-
- def test_volume_show_no_volume(self):
- self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
-
- req = fakes.HTTPRequest.blank('/v1/volumes/1')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show,
- req,
- 1)
-
- def test_volume_delete(self):
- req = fakes.HTTPRequest.blank('/v1/volumes/1')
- resp = self.controller.delete(req, 1)
- self.assertEqual(resp.status_int, 202)
-
- def test_volume_delete_no_volume(self):
- self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
-
- req = fakes.HTTPRequest.blank('/v1/volumes/1')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete,
- req,
- 1)
-
- def test_admin_list_volumes_limited_to_project(self):
- req = fakes.HTTPRequest.blank('/v1/fake/volumes',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertTrue('volumes' in res)
- self.assertEqual(1, len(res['volumes']))
-
- def test_admin_list_volumes_all_tenants(self):
- req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1',
- use_admin_context=True)
- res = self.controller.index(req)
- self.assertTrue('volumes' in res)
- self.assertEqual(3, len(res['volumes']))
-
- def test_all_tenants_non_admin_gets_all_tenants(self):
- req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1')
- res = self.controller.index(req)
- self.assertTrue('volumes' in res)
- self.assertEqual(1, len(res['volumes']))
-
- def test_non_admin_get_by_project(self):
- req = fakes.HTTPRequest.blank('/v1/fake/volumes')
- res = self.controller.index(req)
- self.assertTrue('volumes' in res)
- self.assertEqual(1, len(res['volumes']))
-
-
-class VolumeSerializerTest(test.TestCase):
- def _verify_volume_attachment(self, attach, tree):
- for attr in ('id', 'volume_id', 'server_id', 'device'):
- self.assertEqual(str(attach[attr]), tree.get(attr))
-
- def _verify_volume(self, vol, tree):
- self.assertEqual(tree.tag, 'volume')
-
- for attr in ('id', 'status', 'size', 'availability_zone', 'created_at',
- 'display_name', 'display_description', 'volume_type',
- 'snapshot_id'):
- self.assertEqual(str(vol[attr]), tree.get(attr))
-
- for child in tree:
- self.assertTrue(child.tag in ('attachments', 'metadata'))
- if child.tag == 'attachments':
- self.assertEqual(1, len(child))
- self.assertEqual('attachment', child[0].tag)
- self._verify_volume_attachment(vol['attachments'][0], child[0])
- elif child.tag == 'metadata':
- not_seen = set(vol['metadata'].keys())
- for gr_child in child:
- self.assertTrue(gr_child.get("key") in not_seen)
- self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
- gr_child.text)
- not_seen.remove(gr_child.get("key"))
- self.assertEqual(0, len(not_seen))
-
- def test_volume_show_create_serializer(self):
- serializer = volumes.VolumeTemplate()
- raw_volume = dict(
- id='vol_id',
- status='vol_status',
- size=1024,
- availability_zone='vol_availability',
- created_at=timeutils.utcnow(),
- attachments=[dict(
- id='vol_id',
- volume_id='vol_id',
- server_id='instance_uuid',
- device='/foo')],
- display_name='vol_name',
- display_description='vol_desc',
- volume_type='vol_type',
- snapshot_id='snap_id',
- metadata=dict(
- foo='bar',
- baz='quux',
- ),
- )
- text = serializer.serialize(dict(volume=raw_volume))
-
- print text
- tree = etree.fromstring(text)
-
- self._verify_volume(raw_volume, tree)
-
- def test_volume_index_detail_serializer(self):
- serializer = volumes.VolumesTemplate()
- raw_volumes = [dict(
- id='vol1_id',
- status='vol1_status',
- size=1024,
- availability_zone='vol1_availability',
- created_at=timeutils.utcnow(),
- attachments=[dict(
- id='vol1_id',
- volume_id='vol1_id',
- server_id='instance_uuid',
- device='/foo1')],
- display_name='vol1_name',
- display_description='vol1_desc',
- volume_type='vol1_type',
- snapshot_id='snap1_id',
- metadata=dict(
- foo='vol1_foo',
- bar='vol1_bar',
- ),
- ),
- dict(
- id='vol2_id',
- status='vol2_status',
- size=1024,
- availability_zone='vol2_availability',
- created_at=timeutils.utcnow(),
- attachments=[dict(
- id='vol2_id',
- volume_id='vol2_id',
- server_id='instance_uuid',
- device='/foo2')],
- display_name='vol2_name',
- display_description='vol2_desc',
- volume_type='vol2_type',
- snapshot_id='snap2_id',
- metadata=dict(
- foo='vol2_foo',
- bar='vol2_bar',
- ),
- )]
- text = serializer.serialize(dict(volumes=raw_volumes))
-
- print text
- tree = etree.fromstring(text)
-
- self.assertEqual('volumes', tree.tag)
- self.assertEqual(len(raw_volumes), len(tree))
- for idx, child in enumerate(tree):
- self._verify_volume(raw_volumes[idx], child)
-
-
-class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
-
- def setUp(self):
- super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
- self.deserializer = volumes.CreateDeserializer()
-
- def test_minimal_volume(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- },
- }
- self.assertEquals(request['body'], expected)
-
- def test_display_name(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- "display_name": "Volume-xml",
- },
- }
- self.assertEquals(request['body'], expected)
-
- def test_display_description(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"
- display_description="description"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- "display_name": "Volume-xml",
- "display_description": "description",
- },
- }
- self.assertEquals(request['body'], expected)
-
- def test_volume_type(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"
- display_description="description"
- volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "display_name": "Volume-xml",
- "size": "1",
- "display_name": "Volume-xml",
- "display_description": "description",
- "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
- },
- }
- self.assertEquals(request['body'], expected)
-
- def test_availability_zone(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"
- display_description="description"
- volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
- availability_zone="us-east1"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- "display_name": "Volume-xml",
- "display_description": "description",
- "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
- "availability_zone": "us-east1",
- },
- }
- self.assertEquals(request['body'], expected)
-
- def test_metadata(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- display_name="Volume-xml"
- size="1">
- <metadata><meta key="Type">work</meta></metadata></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "display_name": "Volume-xml",
- "size": "1",
- "metadata": {
- "Type": "work",
- },
- },
- }
- self.assertEquals(request['body'], expected)
-
- def test_full_volume(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"
- display_description="description"
- volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
- availability_zone="us-east1">
- <metadata><meta key="Type">work</meta></metadata></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- "display_name": "Volume-xml",
- "display_description": "description",
- "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
- "availability_zone": "us-east1",
- "metadata": {
- "Type": "work",
- },
- },
- }
- self.maxDiff = None
- self.assertEquals(request['body'], expected)
-
-
-class VolumesUnprocessableEntityTestCase(test.TestCase):
-
- """
- Tests of places we throw 422 Unprocessable Entity from
- """
-
- def setUp(self):
- super(VolumesUnprocessableEntityTestCase, self).setUp()
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = volumes.VolumeController(self.ext_mgr)
-
- def _unprocessable_volume_create(self, body):
- req = fakes.HTTPRequest.blank('/v2/fake/volumes')
- req.method = 'POST'
-
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.controller.create, req, body)
-
- def test_create_no_body(self):
- self._unprocessable_volume_create(body=None)
-
- def test_create_missing_volume(self):
- body = {'foo': {'a': 'b'}}
- self._unprocessable_volume_create(body=body)
-
- def test_create_malformed_entity(self):
- body = {'volume': 'string'}
- self._unprocessable_volume_create(body=body)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index d989c488e0..72886d1a6d 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -59,7 +59,7 @@ from nova.tests.db.fakes import FakeModel
from nova.tests import fake_network
from nova.tests.image import fake as fake_image
from nova import utils
-import nova.volume
+from nova.volume import cinder
QUOTAS = quota.QUOTAS
@@ -653,9 +653,9 @@ class ComputeTestCase(BaseTestCase):
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
- self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get)
- self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach)
- self.stubs.Set(nova.volume.api.API, 'reserve_volume',
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
+ self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.compute_api.attach_volume(self.context, instance, 1,
@@ -2192,11 +2192,16 @@ class ComputeTestCase(BaseTestCase):
topic = rpc.queue_get_for(c, FLAGS.compute_topic, instance['host'])
# creating volume testdata
- volume_id = db.volume_create(c, {'size': 1})['id']
+ volume_id = 'fake'
values = {'instance_uuid': inst_uuid, 'device_name': '/dev/vdc',
'delete_on_termination': False, 'volume_id': volume_id}
db.block_device_mapping_create(c, values)
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id}
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+
# creating mocks
self.mox.StubOutWithMock(rpc, 'call')
@@ -2242,7 +2247,6 @@ class ComputeTestCase(BaseTestCase):
for bdms in db.block_device_mapping_get_all_by_instance(
c, inst_uuid):
db.block_device_mapping_destroy(c, bdms['id'])
- db.volume_destroy(c, volume_id)
db.instance_destroy(c, inst_uuid)
def test_live_migration_works_correctly(self):
@@ -2291,7 +2295,6 @@ class ComputeTestCase(BaseTestCase):
db.instance_update(c, inst_uuid,
{'task_state': task_states.MIGRATING,
'power_state': power_state.PAUSED})
- v_ref = db.volume_create(c, {'size': 1, 'instance_id': inst_id})
fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
'instance_uuid': inst_ref['uuid']})
fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
@@ -2326,7 +2329,6 @@ class ComputeTestCase(BaseTestCase):
# cleanup
db.instance_destroy(c, inst_uuid)
- db.volume_destroy(c, v_ref['id'])
db.floating_ip_destroy(c, flo_addr)
def test_run_kill_vm(self):
@@ -3376,8 +3378,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
- volume_id = db.volume_create(context.get_admin_context(),
- {'size': 1})['id']
+ volume_id = 'fake'
volume = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc',
'delete_on_termination': False,
@@ -3402,8 +3403,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
- volume_id = db.volume_create(context.get_admin_context(),
- {'size': 1})['id']
+ volume_id = 'fake'
volume = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc',
'delete_on_termination': False,
@@ -4580,9 +4580,9 @@ class ComputeAPITestCase(BaseTestCase):
def fake_rpc_attach_volume(self, context, **kwargs):
called['fake_rpc_attach_volume'] = True
- self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get)
- self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach)
- self.stubs.Set(nova.volume.api.API, 'reserve_volume',
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
+ self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
@@ -4611,9 +4611,9 @@ class ComputeAPITestCase(BaseTestCase):
def fake_rpc_attach_volume(self, context, **kwargs):
called['fake_rpc_attach_volume'] = True
- self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get)
- self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach)
- self.stubs.Set(nova.volume.api.API, 'reserve_volume',
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
+ self.stubs.Set(cinder.API, 'reserve_volume',
fake_reserve_volume)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
@@ -4623,37 +4623,35 @@ class ComputeAPITestCase(BaseTestCase):
admin = context.get_admin_context()
instance = self._create_fake_instance()
- # Create a volume and attach it to our instance
- volume_id = db.volume_create(admin, {'size': 1})['id']
+ volume_id = 'fake'
values = {'instance_uuid': instance['uuid'],
'device_name': '/dev/vdc',
'delete_on_termination': False,
'volume_id': volume_id,
}
db.block_device_mapping_create(admin, values)
- db.volume_attached(admin, volume_id, instance["uuid"],
- "/dev/vdc")
+
+ def fake_volume_get(self, context, volume):
+ return {'id': volume_id}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume):
result["detached"] = volume["id"] == volume_id
- self.stubs.Set(nova.volume.api.API, "detach", fake_detach)
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
# Kill the instance and check that it was detached
self.compute.terminate_instance(admin, instance=instance)
self.assertTrue(result["detached"])
def test_inject_network_info(self):
- instance = self._create_fake_instance()
- self.compute_api.attach_volume(self.context, instance, 1, device=None)
- self.assertTrue(called.get('fake_check_attach'))
- self.assertTrue(called.get('fake_reserve_volume'))
- self.assertTrue(called.get('fake_reserve_volume'))
- self.assertTrue(called.get('fake_rpc_attach_volume'))
-
- def test_inject_network_info(self):
instance = self._create_fake_instance(params={'host': FLAGS.host})
self.compute.run_instance(self.context,
instance=jsonutils.to_primitive(instance))
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 32f34b3e76..0b50d45e28 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -26,7 +26,6 @@ flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('policy_file', 'nova.policy')
-flags.DECLARE('volume_driver', 'nova.volume.manager')
def set_defaults(conf):
@@ -44,7 +43,6 @@ def set_defaults(conf):
conf.set_default('sqlite_synchronous', False)
conf.set_default('use_ipv6', True)
conf.set_default('verbose', True)
- conf.set_default('volume_driver', 'nova.volume.driver.FakeISCSIDriver')
conf.set_default('api_paste_config', '$state_path/etc/nova/api-paste.ini')
conf.set_default('rpc_response_timeout', 5)
conf.set_default('rpc_cast_timeout', 5)
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index 37aaa83b4b..54fd85fe54 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -27,21 +27,21 @@ class fake_volume():
instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
def __init__(self, size, name,
- description, id, snapshot,
+ description, volume_id, snapshot,
volume_type, metadata,
availability_zone):
snapshot_id = None
if snapshot is not None:
snapshot_id = snapshot['id']
- if id is None:
- id = str(utils.gen_uuid())
+ if volume_id is None:
+ volume_id = str(utils.gen_uuid())
self.vol = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
'updated_at': timeutils.utcnow(),
'uuid': 'WTF',
'deleted': False,
- 'id': id,
+ 'id': volume_id,
'user_id': self.user_uuid,
'project_id': 'fake-project-id',
'snapshot_id': snapshot_id,
@@ -133,14 +133,17 @@ class API(object):
return v.vol
def create_with_kwargs(self, context, **kwargs):
+ volume_id = kwargs.get('volume_id', None)
+ print volume_id
v = fake_volume(kwargs['size'],
kwargs['name'],
kwargs['description'],
- str(kwargs.get('volume_id', None)),
+ str(volume_id),
None,
None,
None,
None)
+ print v.vol['id']
if kwargs.get('status', None) is not None:
v.vol['status'] = kwargs['status']
if kwargs['host'] is not None:
@@ -175,6 +178,7 @@ class API(object):
def check_attach(self, context, volume):
if volume['status'] != 'available':
msg = _("status must be available")
+ msg = "%s" % volume
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'attached':
msg = _("already attached")
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index f566a5020f..9581a0e7e7 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -329,14 +329,6 @@
"updated": "%(timestamp)s"
},
{
- "alias": "os-volume-types",
- "description": "%(text)s",
- "links": [],
- "name": "VolumeTypes",
- "namespace": "http://docs.openstack.org/compute/ext/volume_types/api/v1.1",
- "updated": "%(timestamp)s"
- },
- {
"alias": "os-volumes",
"description": "%(text)s",
"links": [],
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index 20e650d7c4..e8246aad8a 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -123,9 +123,6 @@
<extension alias="os-virtual-interfaces" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/virtual_interfaces/api/v1.1" name="VirtualInterfaces">
<description>%(text)s</description>
</extension>
- <extension alias="os-volume-types" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/volume_types/api/v1.1" name="VolumeTypes">
- <description>%(text)s</description>
- </extension>
<extension alias="os-volumes" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/volumes/api/v1.1" name="Volumes">
<description>%(text)s</description>
</extension>
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index d810139204..f3bd944da3 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -75,7 +75,6 @@ class _IntegratedTestBase(test.TestCase):
# set up services
self.compute = self.start_service('compute')
self.scheduler = self.start_service('cert')
- self.volume = self.start_service('volume')
self.network = self.start_service('network')
self.scheduler = self.start_service('scheduler')
@@ -101,13 +100,11 @@ class _IntegratedTestBase(test.TestCase):
# Ensure tests only listen on localhost
f['ec2_listen'] = '127.0.0.1'
f['osapi_compute_listen'] = '127.0.0.1'
- f['osapi_volume_listen'] = '127.0.0.1'
f['metadata_listen'] = '127.0.0.1'
# Auto-assign ports to allow concurrent tests
f['ec2_listen_port'] = 0
f['osapi_compute_listen_port'] = 0
- f['osapi_volume_listen_port'] = 0
f['metadata_listen_port'] = 0
f['fake_network'] = True
diff --git a/nova/tests/integrated/test_volumes.py b/nova/tests/integrated/test_volumes.py
deleted file mode 100644
index fe70c3ce8f..0000000000
--- a/nova/tests/integrated/test_volumes.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-import unittest
-
-from nova.openstack.common.log import logging
-from nova import service
-from nova.tests.integrated.api import client
-from nova.tests.integrated import integrated_helpers
-from nova.volume import driver
-
-
-LOG = logging.getLogger(__name__)
-
-
-class VolumesTest(integrated_helpers._IntegratedTestBase):
- def setUp(self):
- super(VolumesTest, self).setUp()
- driver.LoggingVolumeDriver.clear_logs()
-
- def _start_api_service(self):
- self.osapi = service.WSGIService("osapi_volume")
- self.osapi.start()
- self.auth_url = 'http://%s:%s/v1' % (self.osapi.host, self.osapi.port)
- LOG.warn(self.auth_url)
-
- def _get_flags(self):
- f = super(VolumesTest, self)._get_flags()
- f['use_local_volumes'] = False # Avoids calling local_path
- f['volume_driver'] = 'nova.volume.driver.LoggingVolumeDriver'
- return f
-
- def test_get_volumes_summary(self):
- """Simple check that listing volumes works."""
- volumes = self.api.get_volumes(False)
- for volume in volumes:
- LOG.debug("volume: %s" % volume)
-
- def test_get_volumes(self):
- """Simple check that listing volumes works."""
- volumes = self.api.get_volumes()
- for volume in volumes:
- LOG.debug("volume: %s" % volume)
-
- def _poll_while(self, volume_id, continue_states, max_retries=5):
- """Poll (briefly) while the state is in continue_states."""
- retries = 0
- while True:
- try:
- found_volume = self.api.get_volume(volume_id)
- except client.OpenStackApiNotFoundException:
- found_volume = None
- LOG.debug("Got 404, proceeding")
- break
-
- LOG.debug("Found %s" % found_volume)
-
- self.assertEqual(volume_id, found_volume['id'])
-
- if not found_volume['status'] in continue_states:
- break
-
- time.sleep(1)
- retries = retries + 1
- if retries > max_retries:
- break
- return found_volume
-
- def test_create_and_delete_volume(self):
- """Creates and deletes a volume."""
-
- # Create volume
- created_volume = self.api.post_volume({'volume': {'size': 1}})
- LOG.debug("created_volume: %s" % created_volume)
- self.assertTrue(created_volume['id'])
- created_volume_id = created_volume['id']
-
- # Check it's there
- found_volume = self.api.get_volume(created_volume_id)
- self.assertEqual(created_volume_id, found_volume['id'])
-
- # It should also be in the all-volume list
- volumes = self.api.get_volumes()
- volume_names = [volume['id'] for volume in volumes]
- self.assertTrue(created_volume_id in volume_names)
-
- # Wait (briefly) for creation. Delay is due to the 'message queue'
- found_volume = self._poll_while(created_volume_id, ['creating'])
-
- # It should be available...
- self.assertEqual('available', found_volume['status'])
-
- # Delete the volume
- self.api.delete_volume(created_volume_id)
-
- # Wait (briefly) for deletion. Delay is due to the 'message queue'
- found_volume = self._poll_while(created_volume_id, ['deleting'])
-
- # Should be gone
- self.assertFalse(found_volume)
-
- LOG.debug("Logs: %s" % driver.LoggingVolumeDriver.all_logs())
-
- create_actions = driver.LoggingVolumeDriver.logs_like(
- 'create_volume',
- id=created_volume_id)
- LOG.debug("Create_Actions: %s" % create_actions)
-
- self.assertEquals(1, len(create_actions))
- create_action = create_actions[0]
- self.assertEquals(create_action['id'], created_volume_id)
- self.assertEquals(create_action['availability_zone'], 'nova')
- self.assertEquals(create_action['size'], 1)
-
- export_actions = driver.LoggingVolumeDriver.logs_like(
- 'create_export',
- id=created_volume_id)
- self.assertEquals(1, len(export_actions))
- export_action = export_actions[0]
- self.assertEquals(export_action['id'], created_volume_id)
- self.assertEquals(export_action['availability_zone'], 'nova')
-
- delete_actions = driver.LoggingVolumeDriver.logs_like(
- 'delete_volume',
- id=created_volume_id)
- self.assertEquals(1, len(delete_actions))
- delete_action = export_actions[0]
- self.assertEquals(delete_action['id'], created_volume_id)
-
- def test_create_volume_with_metadata(self):
- """Creates a volume with metadata."""
-
- # Create volume
- metadata = {'key1': 'value1',
- 'key2': 'value2'}
- created_volume = self.api.post_volume(
- {'volume': {'size': 1,
- 'metadata': metadata}})
- LOG.debug("created_volume: %s" % created_volume)
- self.assertTrue(created_volume['id'])
- created_volume_id = created_volume['id']
-
- # Check it's there and metadata present
- found_volume = self.api.get_volume(created_volume_id)
- self.assertEqual(created_volume_id, found_volume['id'])
- self.assertEqual(metadata, found_volume['metadata'])
-
- def test_create_volume_in_availability_zone(self):
- """Creates a volume in availability_zone."""
-
- # Create volume
- availability_zone = 'zone1:host1'
- created_volume = self.api.post_volume(
- {'volume': {'size': 1,
- 'availability_zone': availability_zone}})
- LOG.debug("created_volume: %s" % created_volume)
- self.assertTrue(created_volume['id'])
- created_volume_id = created_volume['id']
-
- # Check it's there and availability zone present
- found_volume = self.api.get_volume(created_volume_id)
- self.assertEqual(created_volume_id, found_volume['id'])
- self.assertEqual(availability_zone, found_volume['availability_zone'])
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index c6b2d7dd31..6a0e93b7d5 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -175,18 +175,6 @@ class SchedulerManagerTestCase(test.TestCase):
self.manager.run_instance(self.context, request_spec,
None, None, None, None, {})
- def test_create_volume_no_valid_host_puts_volume_in_error(self):
- self._mox_schedule_method_helper('schedule_create_volume')
- self.mox.StubOutWithMock(db, 'volume_update')
-
- self.manager.driver.schedule_create_volume(self.context, '1', '2',
- None).AndRaise(exception.NoValidHost(reason=''))
- db.volume_update(self.context, '1', {'status': 'error'})
-
- self.mox.ReplayAll()
- self.assertRaises(exception.NoValidHost, self.manager.create_volume,
- self.context, '1', '2')
-
def test_prep_resize_no_valid_host_back_in_active_state(self):
fake_instance_uuid = 'fake-instance-id'
inst = {"vm_state": "", "task_state": ""}
@@ -305,13 +293,10 @@ class SchedulerTestCase(test.TestCase):
self.assertEqual(result, ['host2'])
def _live_migration_instance(self):
- volume1 = {'id': 31338}
- volume2 = {'id': 31339}
return {'id': 31337,
'uuid': 'fake_uuid',
'name': 'fake-instance',
'host': 'fake_host1',
- 'volumes': [volume1, volume2],
'power_state': power_state.RUNNING,
'memory_mb': 1024,
'root_gb': 1024,
@@ -656,48 +641,6 @@ class SchedulerDriverModuleTestCase(test.TestCase):
super(SchedulerDriverModuleTestCase, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
- def test_cast_to_volume_host_update_db_with_volume_id(self):
- host = 'fake_host1'
- method = 'fake_method'
- fake_kwargs = {'volume_id': 31337,
- 'extra_arg': 'meow'}
- queue = 'fake_queue'
-
- self.mox.StubOutWithMock(timeutils, 'utcnow')
- self.mox.StubOutWithMock(db, 'volume_update')
- self.mox.StubOutWithMock(rpc, 'queue_get_for')
- self.mox.StubOutWithMock(rpc, 'cast')
-
- timeutils.utcnow().AndReturn('fake-now')
- db.volume_update(self.context, 31337,
- {'host': host, 'scheduled_at': 'fake-now'})
- rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue)
- rpc.cast(self.context, queue,
- {'method': method,
- 'args': fake_kwargs})
-
- self.mox.ReplayAll()
- driver.cast_to_volume_host(self.context, host, method,
- **fake_kwargs)
-
- def test_cast_to_volume_host_update_db_without_volume_id(self):
- host = 'fake_host1'
- method = 'fake_method'
- fake_kwargs = {'extra_arg': 'meow'}
- queue = 'fake_queue'
-
- self.mox.StubOutWithMock(rpc, 'queue_get_for')
- self.mox.StubOutWithMock(rpc, 'cast')
-
- rpc.queue_get_for(self.context, 'volume', host).AndReturn(queue)
- rpc.cast(self.context, queue,
- {'method': method,
- 'args': fake_kwargs})
-
- self.mox.ReplayAll()
- driver.cast_to_volume_host(self.context, host, method,
- **fake_kwargs)
-
def test_cast_to_compute_host_update_db_with_instance_uuid(self):
host = 'fake_host1'
method = 'fake_method'
@@ -753,19 +696,6 @@ class SchedulerDriverModuleTestCase(test.TestCase):
driver.cast_to_host(self.context, 'compute', host, method,
**fake_kwargs)
- def test_cast_to_host_volume_topic(self):
- host = 'fake_host1'
- method = 'fake_method'
- fake_kwargs = {'extra_arg': 'meow'}
-
- self.mox.StubOutWithMock(driver, 'cast_to_volume_host')
- driver.cast_to_volume_host(self.context, host, method,
- **fake_kwargs)
-
- self.mox.ReplayAll()
- driver.cast_to_host(self.context, 'volume', host, method,
- **fake_kwargs)
-
def test_cast_to_host_unknown_topic(self):
host = 'fake_host1'
method = 'fake_method'
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index 18532d6425..3302aedb84 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -118,9 +118,6 @@ class CinderTestCase(test.TestCase):
self.fake_client_factory = FakeClientFactory()
self.stubs.Set(cinder.cinder_client, "Client",
self.fake_client_factory)
- self.flags(
- volume_api_class='nova.volume.cinder.API',
- )
self.api = cinder.API()
catalog = [{
"type": "volume",
diff --git a/nova/tests/test_iscsi.py b/nova/tests/test_iscsi.py
deleted file mode 100644
index 09a6e9e896..0000000000
--- a/nova/tests/test_iscsi.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os.path
-import shutil
-import string
-import tempfile
-
-from nova import test
-from nova.volume import iscsi
-
-
-class TargetAdminTestCase(object):
-
- def setUp(self):
- self.cmds = []
-
- self.tid = 1
- self.target_name = 'iqn.2011-09.org.foo.bar:blaa'
- self.lun = 10
- self.path = '/foo'
- self.vol_id = 'blaa'
-
- self.script_template = None
- self.stubs.Set(os.path, 'isfile', lambda _: True)
- self.stubs.Set(os, 'unlink', lambda _: '')
- self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target)
-
- def fake_get_target(obj, iqn):
- return 1
-
- def get_script_params(self):
- return {'tid': self.tid,
- 'target_name': self.target_name,
- 'lun': self.lun,
- 'path': self.path}
-
- def get_script(self):
- return self.script_template % self.get_script_params()
-
- def fake_execute(self, *cmd, **kwargs):
- self.cmds.append(string.join(cmd))
- return "", None
-
- def clear_cmds(self):
- cmds = []
-
- def verify_cmds(self, cmds):
- self.assertEqual(len(cmds), len(self.cmds))
- for a, b in zip(cmds, self.cmds):
- self.assertEqual(a, b)
-
- def verify(self):
- script = self.get_script()
- cmds = []
- for line in script.split('\n'):
- if not line.strip():
- continue
- cmds.append(line)
- self.verify_cmds(cmds)
-
- def run_commands(self):
- tgtadm = iscsi.get_target_admin()
- tgtadm.set_execute(self.fake_execute)
- tgtadm.create_iscsi_target(self.target_name, self.tid,
- self.lun, self.path)
- tgtadm.show_target(self.tid, iqn=self.target_name)
- tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id)
-
- def test_target_admin(self):
- self.clear_cmds()
- self.run_commands()
- self.verify()
-
-
-class TgtAdmTestCase(test.TestCase, TargetAdminTestCase):
-
- def setUp(self):
- super(TgtAdmTestCase, self).setUp()
- TargetAdminTestCase.setUp(self)
- self.persist_tempdir = tempfile.mkdtemp()
- self.flags(iscsi_helper='tgtadm')
- self.flags(volumes_dir=self.persist_tempdir)
- self.script_template = "\n".join([
- 'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
- 'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
-
- def tearDown(self):
- try:
- shutil.rmtree(self.persist_tempdir)
- except OSError:
- pass
- super(TgtAdmTestCase, self).tearDown()
-
-
-class IetAdmTestCase(test.TestCase, TargetAdminTestCase):
-
- def setUp(self):
- super(IetAdmTestCase, self).setUp()
- TargetAdminTestCase.setUp(self)
- self.flags(iscsi_helper='ietadm')
- self.script_template = "\n".join([
- 'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
- 'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
- '--params Path=%(path)s,Type=fileio',
- 'ietadm --op show --tid=%(tid)s',
- 'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
- 'ietadm --op delete --tid=%(tid)s'])
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index c84440e580..f8bc3c3392 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -155,7 +155,6 @@ class LibvirtVolumeTestCase(test.TestCase):
}
def test_libvirt_volume_driver_serial(self):
- vol_driver = volume_driver.VolumeDriver()
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
diff --git a/nova/tests/test_netapp.py b/nova/tests/test_netapp.py
deleted file mode 100644
index 79a8526ee2..0000000000
--- a/nova/tests/test_netapp.py
+++ /dev/null
@@ -1,1380 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 NetApp, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests for NetApp volume driver
-
-"""
-
-import BaseHTTPServer
-import httplib
-import StringIO
-
-from lxml import etree
-
-from nova.openstack.common import log as logging
-from nova import test
-from nova.volume import netapp
-
-
-LOG = logging.getLogger(__name__)
-
-
-WSDL_HEADER = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<definitions xmlns="http://schemas.xmlsoap.org/wsdl/"
- xmlns:na="http://www.netapp.com/management/v1"
- xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
- xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="NetAppDfm"
- targetNamespace="http://www.netapp.com/management/v1">"""
-
-WSDL_TYPES = """<types>
-<xsd:schema attributeFormDefault="unqualified" elementFormDefault="qualified"
- targetNamespace="http://www.netapp.com/management/v1">
-<xsd:element name="ApiProxy">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Request" type="na:Request"/>
- <xsd:element name="Target" type="xsd:string"/>
- <xsd:element minOccurs="0" name="Timeout" type="xsd:integer"/>
- <xsd:element minOccurs="0" name="Username" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="ApiProxyResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Response" type="na:Response"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetEditBegin">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="DatasetNameOrId" type="na:ObjNameOrId"/>
- <xsd:element minOccurs="0" name="Force" type="xsd:boolean"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetEditBeginResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="EditLockId" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetEditCommit">
- <xsd:complexType>
- <xsd:all>
- <xsd:element minOccurs="0" name="AssumeConfirmation"
- type="xsd:boolean"/>
- <xsd:element name="EditLockId" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetEditCommitResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element minOccurs="0" name="IsProvisioningFailure"
- type="xsd:boolean"/>
- <xsd:element minOccurs="0" name="JobIds" type="na:ArrayOfJobInfo"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetEditRollback">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="EditLockId" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetEditRollbackResult">
- <xsd:complexType/>
-</xsd:element>
-<xsd:element name="DatasetListInfoIterEnd">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetListInfoIterEndResult">
- <xsd:complexType/>
-</xsd:element>
-<xsd:element name="DatasetListInfoIterNext">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Maximum" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetListInfoIterNextResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Datasets" type="na:ArrayOfDatasetInfo"/>
- <xsd:element name="Records" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetListInfoIterStart">
- <xsd:complexType>
- <xsd:all>
- <xsd:element minOccurs="0" name="ObjectNameOrId"
- type="na:ObjNameOrId"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetListInfoIterStartResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Records" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetMemberListInfoIterEnd">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetMemberListInfoIterEndResult">
- <xsd:complexType/>
-</xsd:element>
-<xsd:element name="DatasetMemberListInfoIterNext">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Maximum" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetMemberListInfoIterNextResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="DatasetMembers"
- type="na:ArrayOfDatasetMemberInfo"/>
- <xsd:element name="Records" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetMemberListInfoIterStart">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="DatasetNameOrId" type="na:ObjNameOrId"/>
- <xsd:element minOccurs="0" name="IncludeExportsInfo"
- type="xsd:boolean"/>
- <xsd:element minOccurs="0" name="IncludeIndirect"
- type="xsd:boolean"/>
- <xsd:element minOccurs="0" name="MemberType" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetMemberListInfoIterStartResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Records" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetProvisionMember">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="EditLockId" type="xsd:integer"/>
- <xsd:element name="ProvisionMemberRequestInfo"
- type="na:ProvisionMemberRequestInfo"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetProvisionMemberResult">
- <xsd:complexType/>
-</xsd:element>
-<xsd:element name="DatasetRemoveMember">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="DatasetMemberParameters"
- type="na:ArrayOfDatasetMemberParameter"/>
- <xsd:element minOccurs="0" name="Destroy" type="xsd:boolean"/>
- <xsd:element name="EditLockId" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DatasetRemoveMemberResult">
- <xsd:complexType/>
-</xsd:element>
-<xsd:element name="DpJobProgressEventListIterEnd">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DpJobProgressEventListIterEndResult">
- <xsd:complexType/>
-</xsd:element>
-<xsd:element name="DpJobProgressEventListIterNext">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Maximum" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DpJobProgressEventListIterNextResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="ProgressEvents"
- type="na:ArrayOfDpJobProgressEventInfo"/>
- <xsd:element name="Records" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DpJobProgressEventListIterStart">
- <xsd:complexType>
- <xsd:all>
- <xsd:element minOccurs="0" name="JobId" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DpJobProgressEventListIterStartResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Records" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DfmAbout">
- <xsd:complexType>
- <xsd:all>
- <xsd:element minOccurs="0" name="IncludeDirectorySizeInfo"
- type="xsd:boolean"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="DfmAboutResult">
- <xsd:complexType>
- <xsd:all/>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="HostListInfoIterEnd">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="HostListInfoIterEndResult">
- <xsd:complexType/>
-</xsd:element>
-<xsd:element name="HostListInfoIterNext">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Maximum" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="HostListInfoIterNextResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Hosts" type="na:ArrayOfHostInfo"/>
- <xsd:element name="Records" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="HostListInfoIterStart">
- <xsd:complexType>
- <xsd:all>
- <xsd:element minOccurs="0" name="ObjectNameOrId"
- type="na:ObjNameOrId"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="HostListInfoIterStartResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Records" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="LunListInfoIterEnd">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="LunListInfoIterEndResult">
- <xsd:complexType/>
-</xsd:element>
-<xsd:element name="LunListInfoIterNext">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Maximum" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="LunListInfoIterNextResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Luns" type="na:ArrayOfLunInfo"/>
- <xsd:element name="Records" type="xsd:integer"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="LunListInfoIterStart">
- <xsd:complexType>
- <xsd:all>
- <xsd:element minOccurs="0" name="ObjectNameOrId"
- type="na:ObjNameOrId"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="LunListInfoIterStartResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element name="Records" type="xsd:integer"/>
- <xsd:element name="Tag" type="xsd:string"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="StorageServiceDatasetProvision">
- <xsd:complexType>
- <xsd:all>
- <xsd:element minOccurs="0" name="AssumeConfirmation"
- type="xsd:boolean"/>
- <xsd:element name="DatasetName" type="na:ObjName"/>
- <xsd:element name="StorageServiceNameOrId" type="na:ObjNameOrId"/>
- <xsd:element minOccurs="0" name="StorageSetDetails"
- type="na:ArrayOfStorageSetInfo"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:element name="StorageServiceDatasetProvisionResult">
- <xsd:complexType>
- <xsd:all>
- <xsd:element minOccurs="0" name="ConformanceAlerts"
- type="na:ArrayOfConformanceAlert"/>
- <xsd:element name="DatasetId" type="na:ObjId"/>
- <xsd:element minOccurs="0" name="DryRunResults"
- type="na:ArrayOfDryRunResult"/>
- </xsd:all>
- </xsd:complexType>
-</xsd:element>
-<xsd:complexType name="ArrayOfDatasetInfo">
- <xsd:sequence>
- <xsd:element maxOccurs="unbounded" name="DatasetInfo"
- type="na:DatasetInfo"/>
- </xsd:sequence>
-</xsd:complexType>
-<xsd:complexType name="ArrayOfDatasetMemberInfo">
- <xsd:sequence>
- <xsd:element maxOccurs="unbounded" name="DatasetMemberInfo"
- type="na:DatasetMemberInfo"/>
- </xsd:sequence>
-</xsd:complexType>
-<xsd:complexType name="ArrayOfDatasetMemberParameter">
- <xsd:sequence>
- <xsd:element maxOccurs="unbounded" name="DatasetMemberParameter"
- type="na:DatasetMemberParameter"/>
- </xsd:sequence>
-</xsd:complexType>
-<xsd:complexType name="ArrayOfDfmMetadataField">
- <xsd:sequence>
- <xsd:element maxOccurs="unbounded" name="DfmMetadataField"
- type="na:DfmMetadataField"/>
- </xsd:sequence>
-</xsd:complexType>
-<xsd:complexType name="ArrayOfDpJobProgressEventInfo">
- <xsd:sequence>
- <xsd:element maxOccurs="unbounded" name="DpJobProgressEventInfo"
- type="na:DpJobProgressEventInfo"/>
- </xsd:sequence>
-</xsd:complexType>
-<xsd:complexType name="ArrayOfHostInfo">
- <xsd:sequence>
- <xsd:element maxOccurs="unbounded" name="HostInfo" type="na:HostInfo"/>
- </xsd:sequence>
-</xsd:complexType>
-<xsd:complexType name="ArrayOfJobInfo">
- <xsd:sequence>
- <xsd:element maxOccurs="unbounded" name="JobInfo" type="na:JobInfo"/>
- </xsd:sequence>
-</xsd:complexType>
-<xsd:complexType name="ArrayOfLunInfo">
- <xsd:sequence>
- <xsd:element maxOccurs="unbounded" name="LunInfo" type="na:LunInfo"/>
- </xsd:sequence>
-</xsd:complexType>
-<xsd:complexType name="ArrayOfStorageSetInfo">
- <xsd:sequence>
- <xsd:element maxOccurs="unbounded" name="StorageSetInfo"
- type="na:StorageSetInfo"/>
- </xsd:sequence>
-</xsd:complexType>
-<xsd:complexType name="DatasetExportInfo">
- <xsd:all>
- <xsd:element minOccurs="0" name="DatasetExportProtocol"
- type="na:DatasetExportProtocol"/>
- <xsd:element minOccurs="0" name="DatasetLunMappingInfo"
- type="na:DatasetLunMappingInfo"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:simpleType name="DatasetExportProtocol">
- <xsd:restriction base="xsd:string"/>
-</xsd:simpleType>
-<xsd:complexType name="DatasetInfo">
- <xsd:all>
- <xsd:element name="DatasetId" type="na:ObjId"/>
- <xsd:element name="DatasetName" type="na:ObjName"/>
- <xsd:element name="DatasetMetadata" type="na:ArrayOfDfmMetadataField"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="DatasetLunMappingInfo">
- <xsd:all>
- <xsd:element name="IgroupOsType" type="xsd:string"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="DatasetMemberInfo">
- <xsd:all>
- <xsd:element name="MemberId" type="na:ObjId"/>
- <xsd:element name="MemberName" type="na:ObjName"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="DatasetMemberParameter">
- <xsd:all>
- <xsd:element name="ObjectNameOrId" type="na:ObjNameOrId"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="DfmMetadataField">
- <xsd:all>
- <xsd:element name="FieldName" type="xsd:string"/>
- <xsd:element name="FieldValue" type="xsd:string"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="DpJobProgressEventInfo">
- <xsd:all>
- <xsd:element name="EventStatus" type="na:ObjStatus"/>
- <xsd:element name="EventType" type="xsd:string"/>
- <xsd:element minOccurs="0" name="ProgressLunInfo"
- type="na:ProgressLunInfo"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:simpleType name="DpPolicyNodeName">
- <xsd:restriction base="xsd:string"/>
-</xsd:simpleType>
-<xsd:simpleType name="HostId">
- <xsd:restriction base="xsd:integer"/>
-</xsd:simpleType>
-<xsd:complexType name="HostInfo">
- <xsd:all>
- <xsd:element name="HostAddress" type="xsd:string"/>
- <xsd:element name="HostId" type="na:HostId"/>
- <xsd:element name="HostName" type="xsd:string"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="JobInfo">
- <xsd:all>
- <xsd:element name="JobId" type="xsd:integer"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="LunInfo">
- <xsd:all>
- <xsd:element name="HostId" type="na:ObjId"/>
- <xsd:element name="LunPath" type="na:ObjName"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:simpleType name="ObjId">
- <xsd:restriction base="xsd:integer"/>
-</xsd:simpleType>
-<xsd:simpleType name="ObjName">
- <xsd:restriction base="xsd:string"/>
-</xsd:simpleType>
-<xsd:simpleType name="ObjNameOrId">
- <xsd:restriction base="xsd:string"/>
-</xsd:simpleType>
-<xsd:simpleType name="ObjStatus">
- <xsd:restriction base="xsd:string"/>
-</xsd:simpleType>
-<xsd:complexType name="ProgressLunInfo">
- <xsd:all>
- <xsd:element name="LunPathId" type="na:ObjId"/>
- <xsd:element name="LunName" type="na:ObjName"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="ProvisionMemberRequestInfo">
- <xsd:all>
- <xsd:element minOccurs="0" name="Description" type="xsd:string"/>
- <xsd:element minOccurs="0" name="MaximumSnapshotSpace"
- type="xsd:integer"/>
- <xsd:element name="Name" type="xsd:string"/>
- <xsd:element name="Size" type="xsd:integer"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="Request">
- <xsd:all>
- <xsd:element minOccurs="0" name="Args">
- <xsd:complexType>
- <xsd:sequence>
- <xsd:any maxOccurs="unbounded" minOccurs="0"/>
- </xsd:sequence>
- </xsd:complexType>
- </xsd:element>
- <xsd:element name="Name" type="xsd:string">
- </xsd:element>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="Response">
- <xsd:all>
- <xsd:element minOccurs="0" name="Errno" type="xsd:integer"/>
- <xsd:element minOccurs="0" name="Reason" type="xsd:string"/>
- <xsd:element minOccurs="0" name="Results">
- <xsd:complexType>
- <xsd:sequence>
- <xsd:any maxOccurs="unbounded" minOccurs="0"/>
- </xsd:sequence>
- </xsd:complexType>
- </xsd:element>
- <xsd:element name="Status" type="xsd:string"/>
- </xsd:all>
-</xsd:complexType>
-<xsd:complexType name="StorageSetInfo">
- <xsd:all>
- <xsd:element minOccurs="0" name="DatasetExportInfo"
- type="na:DatasetExportInfo"/>
- <xsd:element minOccurs="0" name="DpNodeName"
- type="na:DpPolicyNodeName"/>
- <xsd:element minOccurs="0" name="ServerNameOrId"
- type="na:ObjNameOrId"/>
- </xsd:all>
-</xsd:complexType>
-</xsd:schema></types>"""
-
-WSDL_TRAILER = """<service name="DfmService">
-<port binding="na:DfmBinding" name="DfmPort">
-<soap:address location="https://HOST_NAME:8488/apis/soap/v1"/>
-</port></service></definitions>"""
-
-RESPONSE_PREFIX = """<?xml version="1.0" encoding="UTF-8"?>
-<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/"
- xmlns:na="http://www.netapp.com/management/v1"><env:Header/><env:Body>"""
-
-RESPONSE_SUFFIX = """</env:Body></env:Envelope>"""
-
-APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext',
- 'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
- 'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
- 'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
- 'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
- 'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
- 'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
- 'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
- 'StorageServiceDatasetProvision']
-
-iter_count = 0
-iter_table = {}
-
-
-class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
- """HTTP handler that fakes enough stuff to allow the driver to run"""
-
- def do_GET(s):
- """Respond to a GET request."""
- if '/dfm.wsdl' != s.path:
- s.send_response(404)
- s.end_headers
- return
- s.send_response(200)
- s.send_header("Content-Type", "application/wsdl+xml")
- s.end_headers()
- out = s.wfile
- out.write(WSDL_HEADER)
- out.write(WSDL_TYPES)
- for api in APIS:
- out.write('<message name="%sRequest">' % api)
- out.write('<part element="na:%s" name="parameters"/>' % api)
- out.write('</message>')
- out.write('<message name="%sResponse">' % api)
- out.write('<part element="na:%sResult" name="results"/>' % api)
- out.write('</message>')
- out.write('<portType name="DfmInterface">')
- for api in APIS:
- out.write('<operation name="%s">' % api)
- out.write('<input message="na:%sRequest"/>' % api)
- out.write('<output message="na:%sResponse"/>' % api)
- out.write('</operation>')
- out.write('</portType>')
- out.write('<binding name="DfmBinding" type="na:DfmInterface">')
- out.write('<soap:binding style="document" ' +
- 'transport="http://schemas.xmlsoap.org/soap/http"/>')
- for api in APIS:
- out.write('<operation name="%s">' % api)
- out.write('<soap:operation soapAction="urn:%s"/>' % api)
- out.write('<input><soap:body use="literal"/></input>')
- out.write('<output><soap:body use="literal"/></output>')
- out.write('</operation>')
- out.write('</binding>')
- out.write(WSDL_TRAILER)
-
- def do_POST(s):
- """Respond to a POST request."""
- if '/apis/soap/v1' != s.path:
- s.send_response(404)
- s.end_headers
- return
- request_xml = s.rfile.read(int(s.headers['Content-Length']))
- ntap_ns = 'http://www.netapp.com/management/v1'
- nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/',
- 'na': ntap_ns}
- root = etree.fromstring(request_xml)
-
- body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0]
- request = body.getchildren()[0]
- tag = request.tag
- if not tag.startswith('{' + ntap_ns + '}'):
- s.send_response(500)
- s.end_headers
- return
- api = tag[(2 + len(ntap_ns)):]
- global iter_count
- global iter_table
- if 'DatasetListInfoIterStart' == api:
- iter_name = 'dataset_%s' % iter_count
- iter_count = iter_count + 1
- iter_table[iter_name] = 0
- body = """<na:DatasetListInfoIterStartResult>
- <na:Records>1</na:Records>
- <na:Tag>%s</na:Tag>
- </na:DatasetListInfoIterStartResult>""" % iter_name
- elif 'DatasetListInfoIterNext' == api:
- tags = body.xpath('na:DatasetListInfoIterNext/na:Tag',
- namespaces=nsmap)
- iter_name = tags[0].text
- if iter_table[iter_name]:
- body = """<na:DatasetListInfoIterNextResult>
- <na:Datasets></na:Datasets>
- <na:Records>0</na:Records>
- </na:DatasetListInfoIterNextResult>"""
- else:
- iter_table[iter_name] = 1
- body = """<na:DatasetListInfoIterNextResult>
- <na:Datasets>
- <na:DatasetInfo>
- <na:DatasetId>0</na:DatasetId>
- <na:DatasetMetadata>
- <na:DfmMetadataField>
- <na:FieldName>OpenStackProject</na:FieldName>
- <na:FieldValue>testproj</na:FieldValue>
- </na:DfmMetadataField>
- <na:DfmMetadataField>
- <na:FieldName>OpenStackVolType</na:FieldName>
- <na:FieldValue></na:FieldValue>
- </na:DfmMetadataField>
- </na:DatasetMetadata>
- <na:DatasetName>OpenStack_testproj</na:DatasetName>
- </na:DatasetInfo>
- </na:Datasets>
- <na:Records>1</na:Records>
- </na:DatasetListInfoIterNextResult>"""
- elif 'DatasetListInfoIterEnd' == api:
- body = """<na:DatasetListInfoIterEndResult/>"""
- elif 'DatasetEditBegin' == api:
- body = """<na:DatasetEditBeginResult>
- <na:EditLockId>0</na:EditLockId>
- </na:DatasetEditBeginResult>"""
- elif 'DatasetEditCommit' == api:
- body = """<na:DatasetEditCommitResult>
- <na:IsProvisioningFailure>false</na:IsProvisioningFailure>
- <na:JobIds>
- <na:JobInfo>
- <na:JobId>0</na:JobId>
- </na:JobInfo>
- </na:JobIds>
- </na:DatasetEditCommitResult>"""
- elif 'DatasetProvisionMember' == api:
- body = """<na:DatasetProvisionMemberResult/>"""
- elif 'DatasetRemoveMember' == api:
- body = """<na:DatasetRemoveMemberResult/>"""
- elif 'DfmAbout' == api:
- body = """<na:DfmAboutResult/>"""
- elif 'DpJobProgressEventListIterStart' == api:
- iter_name = 'dpjobprogress_%s' % iter_count
- iter_count = iter_count + 1
- iter_table[iter_name] = 0
- body = """<na:DpJobProgressEventListIterStartResult>
- <na:Records>2</na:Records>
- <na:Tag>%s</na:Tag>
- </na:DpJobProgressEventListIterStartResult>""" % iter_name
- elif 'DpJobProgressEventListIterNext' == api:
- tags = body.xpath('na:DpJobProgressEventListIterNext/na:Tag',
- namespaces=nsmap)
- iter_name = tags[0].text
- if iter_table[iter_name]:
- body = """<na:DpJobProgressEventListIterNextResult/>"""
- else:
- iter_table[iter_name] = 1
- name = ('filer:/OpenStack_testproj/volume-00000001/'
- 'volume-00000001')
- body = """<na:DpJobProgressEventListIterNextResult>
- <na:ProgressEvents>
- <na:DpJobProgressEventInfo>
- <na:EventStatus>normal</na:EventStatus>
- <na:EventType>lun-create</na:EventType>
- <na:ProgressLunInfo>
- <na:LunPathId>0</na:LunPathId>
- <na:LunName>%s</na:LunName>
- </na:ProgressLunInfo>
- </na:DpJobProgressEventInfo>
- <na:DpJobProgressEventInfo>
- <na:EventStatus>normal</na:EventStatus>
- <na:EventType>job-end</na:EventType>
- </na:DpJobProgressEventInfo>
- </na:ProgressEvents>
- <na:Records>2</na:Records>
- </na:DpJobProgressEventListIterNextResult>""" % name
- elif 'DpJobProgressEventListIterEnd' == api:
- body = """<na:DpJobProgressEventListIterEndResult/>"""
- elif 'DatasetMemberListInfoIterStart' == api:
- iter_name = 'datasetmember_%s' % iter_count
- iter_count = iter_count + 1
- iter_table[iter_name] = 0
- body = """<na:DatasetMemberListInfoIterStartResult>
- <na:Records>1</na:Records>
- <na:Tag>%s</na:Tag>
- </na:DatasetMemberListInfoIterStartResult>""" % iter_name
- elif 'DatasetMemberListInfoIterNext' == api:
- tags = body.xpath('na:DatasetMemberListInfoIterNext/na:Tag',
- namespaces=nsmap)
- iter_name = tags[0].text
- if iter_table[iter_name]:
- body = """<na:DatasetMemberListInfoIterNextResult>
- <na:DatasetMembers></na:DatasetMembers>
- <na:Records>0</na:Records>
- </na:DatasetMemberListInfoIterNextResult>"""
- else:
- iter_table[iter_name] = 1
- name = ('filer:/OpenStack_testproj/volume-00000001/'
- 'volume-00000001')
- body = """<na:DatasetMemberListInfoIterNextResult>
- <na:DatasetMembers>
- <na:DatasetMemberInfo>
- <na:MemberId>0</na:MemberId>
- <na:MemberName>%s</na:MemberName>
- </na:DatasetMemberInfo>
- </na:DatasetMembers>
- <na:Records>1</na:Records>
- </na:DatasetMemberListInfoIterNextResult>""" % name
- elif 'DatasetMemberListInfoIterEnd' == api:
- body = """<na:DatasetMemberListInfoIterEndResult/>"""
- elif 'HostListInfoIterStart' == api:
- body = """<na:HostListInfoIterStartResult>
- <na:Records>1</na:Records>
- <na:Tag>host</na:Tag>
- </na:HostListInfoIterStartResult>"""
- elif 'HostListInfoIterNext' == api:
- body = """<na:HostListInfoIterNextResult>
- <na:Hosts>
- <na:HostInfo>
- <na:HostAddress>1.2.3.4</na:HostAddress>
- <na:HostId>0</na:HostId>
- <na:HostName>filer</na:HostName>
- </na:HostInfo>
- </na:Hosts>
- <na:Records>1</na:Records>
- </na:HostListInfoIterNextResult>"""
- elif 'HostListInfoIterEnd' == api:
- body = """<na:HostListInfoIterEndResult/>"""
- elif 'LunListInfoIterStart' == api:
- body = """<na:LunListInfoIterStartResult>
- <na:Records>1</na:Records>
- <na:Tag>lun</na:Tag>
- </na:LunListInfoIterStartResult>"""
- elif 'LunListInfoIterNext' == api:
- path = 'OpenStack_testproj/volume-00000001/volume-00000001'
- body = """<na:LunListInfoIterNextResult>
- <na:Luns>
- <na:LunInfo>
- <na:HostId>0</na:HostId>
- <na:LunPath>%s</na:LunPath>
- </na:LunInfo>
- </na:Luns>
- <na:Records>1</na:Records>
- </na:LunListInfoIterNextResult>""" % path
- elif 'LunListInfoIterEnd' == api:
- body = """<na:LunListInfoIterEndResult/>"""
- elif 'ApiProxy' == api:
- names = body.xpath('na:ApiProxy/na:Request/na:Name',
- namespaces=nsmap)
- proxy = names[0].text
- if 'igroup-list-info' == proxy:
- igroup = 'openstack-iqn.1993-08.org.debian:01:23456789'
- initiator = 'iqn.1993-08.org.debian:01:23456789'
- proxy_body = """<initiator-groups>
- <initiator-group-info>
- <initiator-group-name>%s</initiator-group-name>
- <initiator-group-type>iscsi</initiator-group-type>
- <initiator-group-os-type>linux</initiator-group-os-type>
- <initiators>
- <initiator-info>
- <initiator-name>%s</initiator-name>
- </initiator-info>
- </initiators>
- </initiator-group-info>
- </initiator-groups>""" % (igroup, initiator)
- elif 'igroup-create' == proxy:
- proxy_body = ''
- elif 'igroup-add' == proxy:
- proxy_body = ''
- elif 'lun-map-list-info' == proxy:
- proxy_body = '<initiator-groups/>'
- elif 'lun-map' == proxy:
- proxy_body = '<lun-id-assigned>0</lun-id-assigned>'
- elif 'lun-unmap' == proxy:
- proxy_body = ''
- elif 'iscsi-portal-list-info' == proxy:
- proxy_body = """<iscsi-portal-list-entries>
- <iscsi-portal-list-entry-info>
- <ip-address>1.2.3.4</ip-address>
- <ip-port>3260</ip-port>
- <tpgroup-tag>1000</tpgroup-tag>
- </iscsi-portal-list-entry-info>
- </iscsi-portal-list-entries>"""
- elif 'iscsi-node-get-name' == proxy:
- target = 'iqn.1992-08.com.netapp:sn.111111111'
- proxy_body = '<node-name>%s</node-name>' % target
- else:
- # Unknown proxy API
- s.send_response(500)
- s.end_headers
- return
- api = api + ':' + proxy
- proxy_header = '<na:ApiProxyResult><na:Response><na:Results>'
- proxy_trailer = """</na:Results><na:Status>passed</na:Status>
- </na:Response></na:ApiProxyResult>"""
- body = proxy_header + proxy_body + proxy_trailer
- else:
- # Unknown API
- s.send_response(500)
- s.end_headers
- return
- s.send_response(200)
- s.send_header("Content-Type", "text/xml; charset=utf-8")
- s.end_headers()
- s.wfile.write(RESPONSE_PREFIX)
- s.wfile.write(body)
- s.wfile.write(RESPONSE_SUFFIX)
-
-
-class FakeHttplibSocket(object):
- """A fake socket implementation for httplib.HTTPResponse"""
- def __init__(self, value):
- self._rbuffer = StringIO.StringIO(value)
- self._wbuffer = StringIO.StringIO('')
- oldclose = self._wbuffer.close
-
- def newclose():
- self.result = self._wbuffer.getvalue()
- oldclose()
- self._wbuffer.close = newclose
-
- def makefile(self, mode, _other):
- """Returns the socket's internal buffer"""
- if mode == 'r' or mode == 'rb':
- return self._rbuffer
- if mode == 'w' or mode == 'wb':
- return self._wbuffer
-
-
-class FakeHTTPConnection(object):
- """A fake httplib.HTTPConnection for netapp tests
-
- Requests made via this connection actually get translated and routed into
- the fake Dfm handler above, we then turn the response into
- the httplib.HTTPResponse that the caller expects.
- """
- def __init__(self, host, timeout=None):
- self.host = host
-
- def request(self, method, path, data=None, headers=None):
- if not headers:
- headers = {}
- req_str = '%s %s HTTP/1.1\r\n' % (method, path)
- for key, value in headers.iteritems():
- req_str += "%s: %s\r\n" % (key, value)
- if data:
- req_str += '\r\n%s' % data
-
- # NOTE(vish): normally the http transport normailizes from unicode
- sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
- # NOTE(vish): stop the server from trying to look up address from
- # the fake socket
- FakeDfmServerHandler.address_string = lambda x: '127.0.0.1'
- self.app = FakeDfmServerHandler(sock, '127.0.0.1:8088', None)
-
- self.sock = FakeHttplibSocket(sock.result)
- self.http_response = httplib.HTTPResponse(self.sock)
-
- def set_debuglevel(self, level):
- pass
-
- def getresponse(self):
- self.http_response.begin()
- return self.http_response
-
- def getresponsebody(self):
- return self.sock.result
-
-
-class NetAppDriverTestCase(test.TestCase):
- """Test case for NetAppISCSIDriver"""
- STORAGE_SERVICE = 'Openstack Service'
- STORAGE_SERVICE_PREFIX = 'Openstack Service-'
- PROJECT_ID = 'testproj'
- VOLUME_NAME = 'volume-00000001'
- VOLUME_TYPE = ''
- VOLUME_SIZE = 2147483648L # 2 GB
- INITIATOR = 'iqn.1993-08.org.debian:01:23456789'
-
- def setUp(self):
- super(NetAppDriverTestCase, self).setUp()
- driver = netapp.NetAppISCSIDriver()
- self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection)
- driver._create_client(wsdl_url='http://localhost:8088/dfm.wsdl',
- login='root', password='password',
- hostname='localhost', port=8088, cache=False)
- driver._set_storage_service(self.STORAGE_SERVICE)
- driver._set_storage_service_prefix(self.STORAGE_SERVICE_PREFIX)
- driver._set_vfiler('')
- self.driver = driver
-
- def test_connect(self):
- self.driver.check_for_setup_error()
-
- def test_create_destroy(self):
- self.driver._discover_luns()
- self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID,
- self.VOLUME_TYPE, self.VOLUME_SIZE)
- self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID)
-
- def test_map_unmap(self):
- self.driver._discover_luns()
- self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID,
- self.VOLUME_TYPE, self.VOLUME_SIZE)
- volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID,
- 'id': 0, 'provider_auth': None}
- updates = self.driver._get_export(volume)
- self.assertTrue(updates['provider_location'])
- volume['provider_location'] = updates['provider_location']
- connector = {'initiator': self.INITIATOR}
- connection_info = self.driver.initialize_connection(volume, connector)
- self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
- properties = connection_info['data']
- self.driver.terminate_connection(volume, connector)
- self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID)
-
-
-WSDL_HEADER_CMODE = """<?xml version="1.0" encoding="UTF-8"?>
-<definitions xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
- xmlns:na="http://cloud.netapp.com/"
-xmlns:xsd="http://www.w3.org/2001/XMLSchema"
-xmlns="http://schemas.xmlsoap.org/wsdl/"
-targetNamespace="http://cloud.netapp.com/" name="CloudStorageService">
-"""
-
-WSDL_TYPES_CMODE = """<types>
-<xs:schema xmlns:na="http://cloud.netapp.com/"
-xmlns:xs="http://www.w3.org/2001/XMLSchema" version="1.0"
-targetNamespace="http://cloud.netapp.com/">
-
- <xs:element name="ProvisionLun">
- <xs:complexType>
- <xs:all>
- <xs:element name="Name" type="xs:string"/>
- <xs:element name="Size" type="xsd:long"/>
- <xs:element name="Metadata" type="na:Metadata" minOccurs="0"
- maxOccurs="unbounded"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
- <xs:element name="ProvisionLunResult">
- <xs:complexType>
- <xs:all>
- <xs:element name="Lun" type="na:Lun"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
-
- <xs:element name="DestroyLun">
- <xs:complexType>
- <xs:all>
- <xs:element name="Handle" type="xsd:string"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
- <xs:element name="DestroyLunResult">
- <xs:complexType>
- <xs:all/>
- </xs:complexType>
- </xs:element>
-
- <xs:element name="CloneLun">
- <xs:complexType>
- <xs:all>
- <xs:element name="Handle" type="xsd:string"/>
- <xs:element name="NewName" type="xsd:string"/>
- <xs:element name="Metadata" type="na:Metadata" minOccurs="0"
- maxOccurs="unbounded"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
- <xs:element name="CloneLunResult">
- <xs:complexType>
- <xs:all>
- <xs:element name="Lun" type="na:Lun"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
-
- <xs:element name="MapLun">
- <xs:complexType>
- <xs:all>
- <xs:element name="Handle" type="xsd:string"/>
- <xs:element name="InitiatorType" type="xsd:string"/>
- <xs:element name="InitiatorName" type="xsd:string"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
- <xs:element name="MapLunResult">
- <xs:complexType>
- <xs:all/>
- </xs:complexType>
- </xs:element>
-
- <xs:element name="UnmapLun">
- <xs:complexType>
- <xs:all>
- <xs:element name="Handle" type="xsd:string"/>
- <xs:element name="InitiatorType" type="xsd:string"/>
- <xs:element name="InitiatorName" type="xsd:string"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
- <xs:element name="UnmapLunResult">
- <xs:complexType>
- <xs:all/>
- </xs:complexType>
- </xs:element>
-
- <xs:element name="ListLuns">
- <xs:complexType>
- <xs:all>
- <xs:element name="NameFilter" type="xsd:string" minOccurs="0"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
- <xs:element name="ListLunsResult">
- <xs:complexType>
- <xs:all>
- <xs:element name="Lun" type="na:Lun" minOccurs="0"
- maxOccurs="unbounded"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
-
- <xs:element name="GetLunTargetDetails">
- <xs:complexType>
- <xs:all>
- <xs:element name="Handle" type="xsd:string"/>
- <xs:element name="InitiatorType" type="xsd:string"/>
- <xs:element name="InitiatorName" type="xsd:string"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
- <xs:element name="GetLunTargetDetailsResult">
- <xs:complexType>
- <xs:all>
- <xs:element name="TargetDetails" type="na:TargetDetails"
- minOccurs="0" maxOccurs="unbounded"/>
- </xs:all>
- </xs:complexType>
- </xs:element>
-
- <xs:complexType name="Metadata">
- <xs:sequence>
- <xs:element name="Key" type="xs:string"/>
- <xs:element name="Value" type="xs:string"/>
- </xs:sequence>
- </xs:complexType>
-
- <xs:complexType name="Lun">
- <xs:sequence>
- <xs:element name="Name" type="xs:string"/>
- <xs:element name="Size" type="xs:long"/>
- <xs:element name="Handle" type="xs:string"/>
- <xs:element name="Metadata" type="na:Metadata" minOccurs="0"
- maxOccurs="unbounded"/>
- </xs:sequence>
- </xs:complexType>
-
- <xs:complexType name="TargetDetails">
- <xs:sequence>
- <xs:element name="Address" type="xs:string"/>
- <xs:element name="Port" type="xs:int"/>
- <xs:element name="Portal" type="xs:int"/>
- <xs:element name="Iqn" type="xs:string"/>
- <xs:element name="LunNumber" type="xs:int"/>
- </xs:sequence>
- </xs:complexType>
-
- </xs:schema></types>"""
-
-WSDL_TRAILER_CMODE = """<service name="CloudStorageService">
- <port name="CloudStoragePort" binding="na:CloudStorageBinding">
- <soap:address location="http://hostname:8080/ws/ntapcloud"/>
- </port>
- </service>
-</definitions>"""
-
-RESPONSE_PREFIX_CMODE = """<?xml version='1.0' encoding='UTF-8'?>
-<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
-<soapenv:Body>"""
-
-RESPONSE_SUFFIX_CMODE = """</soapenv:Body></soapenv:Envelope>"""
-
-CMODE_APIS = ['ProvisionLun', 'DestroyLun', 'CloneLun', 'MapLun', 'UnmapLun',
- 'ListLuns', 'GetLunTargetDetails']
-
-
-class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
- """HTTP handler that fakes enough stuff to allow the driver to run"""
-
- def do_GET(s):
- """Respond to a GET request."""
- if '/ntap_cloud.wsdl' != s.path:
- s.send_response(404)
- s.end_headers
- return
- s.send_response(200)
- s.send_header("Content-Type", "application/wsdl+xml")
- s.end_headers()
- out = s.wfile
- out.write(WSDL_HEADER_CMODE)
- out.write(WSDL_TYPES_CMODE)
- for api in CMODE_APIS:
- out.write('<message name="%sRequest">' % api)
- out.write('<part element="na:%s" name="req"/>' % api)
- out.write('</message>')
- out.write('<message name="%sResponse">' % api)
- out.write('<part element="na:%sResult" name="res"/>' % api)
- out.write('</message>')
- out.write('<portType name="CloudStorage">')
- for api in CMODE_APIS:
- out.write('<operation name="%s">' % api)
- out.write('<input message="na:%sRequest"/>' % api)
- out.write('<output message="na:%sResponse"/>' % api)
- out.write('</operation>')
- out.write('</portType>')
- out.write('<binding name="CloudStorageBinding" '
- 'type="na:CloudStorage">')
- out.write('<soap:binding style="document" ' +
- 'transport="http://schemas.xmlsoap.org/soap/http"/>')
- for api in CMODE_APIS:
- out.write('<operation name="%s">' % api)
- out.write('<soap:operation soapAction=""/>')
- out.write('<input><soap:body use="literal"/></input>')
- out.write('<output><soap:body use="literal"/></output>')
- out.write('</operation>')
- out.write('</binding>')
- out.write(WSDL_TRAILER_CMODE)
-
- def do_POST(s):
- """Respond to a POST request."""
- if '/ws/ntapcloud' != s.path:
- s.send_response(404)
- s.end_headers
- return
- request_xml = s.rfile.read(int(s.headers['Content-Length']))
- ntap_ns = 'http://cloud.netapp.com/'
- nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
- 'na': ntap_ns}
- root = etree.fromstring(request_xml)
-
- body = root.xpath('/soapenv:Envelope/soapenv:Body',
- namespaces=nsmap)[0]
- request = body.getchildren()[0]
- tag = request.tag
- if not tag.startswith('{' + ntap_ns + '}'):
- s.send_response(500)
- s.end_headers
- return
- api = tag[(2 + len(ntap_ns)):]
- if 'ProvisionLun' == api:
- body = """<ns:ProvisionLunResult xmlns:ns=
- "http://cloud.netapp.com/">
- <Lun><Name>lun1</Name><Size>20</Size>
- <Handle>1d9c006c-a406-42f6-a23f-5ed7a6dc33e3</Handle>
- <Metadata><Key>OsType</Key>
- <Value>linux</Value></Metadata></Lun>
- </ns:ProvisionLunResult>"""
- elif 'DestroyLun' == api:
- body = """<ns:DestroyLunResult xmlns:ns="http://cloud.netapp.com/"
- />"""
- elif 'CloneLun' == api:
- body = """<ns:CloneLunResult xmlns:ns="http://cloud.netapp.com/">
- <Lun><Name>lun2</Name><Size>2</Size>
- <Handle>98ea1791d228453899d422b4611642c3</Handle>
- <Metadata><Key>OsType</Key>
- <Value>linux</Value></Metadata>
- </Lun></ns:CloneLunResult>"""
- elif 'MapLun' == api:
- body = """<ns1:MapLunResult xmlns:ns="http://cloud.netapp.com/"
- />"""
- elif 'Unmap' == api:
- body = """<ns1:UnmapLunResult xmlns:ns="http://cloud.netapp.com/"
- />"""
- elif 'ListLuns' == api:
- body = """<ns:ListLunsResult xmlns:ns="http://cloud.netapp.com/">
- <Lun>
- <Name>lun1</Name>
- <Size>20</Size>
- <Handle>asdjdnsd</Handle>
- </Lun>
- </ns:ListLunsResult>"""
- elif 'GetLunTargetDetails' == api:
- body = """<ns:GetLunTargetDetailsResult
- xmlns:ns="http://cloud.netapp.com/">
- <TargetDetail>
- <Address>1.2.3.4</Address>
- <Port>3260</Port>
- <Portal>1000</Portal>
- <Iqn>iqn.199208.com.netapp:sn.123456789</Iqn>
- <LunNumber>0</LunNumber>
- </TargetDetail>
- </ns:GetLunTargetDetailsResult>"""
- else:
- # Unknown API
- s.send_response(500)
- s.end_headers
- return
- s.send_response(200)
- s.send_header("Content-Type", "text/xml; charset=utf-8")
- s.end_headers()
- s.wfile.write(RESPONSE_PREFIX_CMODE)
- s.wfile.write(body)
- s.wfile.write(RESPONSE_SUFFIX_CMODE)
-
-
-class FakeCmodeHTTPConnection(object):
- """A fake httplib.HTTPConnection for netapp tests
-
- Requests made via this connection actually get translated and routed into
- the fake Dfm handler above, we then turn the response into
- the httplib.HTTPResponse that the caller expects.
- """
- def __init__(self, host, timeout=None):
- self.host = host
-
- def request(self, method, path, data=None, headers=None):
- if not headers:
- headers = {}
- req_str = '%s %s HTTP/1.1\r\n' % (method, path)
- for key, value in headers.iteritems():
- req_str += "%s: %s\r\n" % (key, value)
- if data:
- req_str += '\r\n%s' % data
-
- # NOTE(vish): normally the http transport normailizes from unicode
- sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
- # NOTE(vish): stop the server from trying to look up address from
- # the fake socket
- FakeCMODEServerHandler.address_string = lambda x: '127.0.0.1'
- self.app = FakeCMODEServerHandler(sock, '127.0.0.1:8080', None)
-
- self.sock = FakeHttplibSocket(sock.result)
- self.http_response = httplib.HTTPResponse(self.sock)
-
- def set_debuglevel(self, level):
- pass
-
- def getresponse(self):
- self.http_response.begin()
- return self.http_response
-
- def getresponsebody(self):
- return self.sock.result
-
-
-class NetAppCmodeISCSIDriverTestCase(test.TestCase):
- """Test case for NetAppISCSIDriver"""
- volume = {
- 'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
- 'os_type': 'linux', 'provider_location': 'lun1',
- 'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
- 'display_name': None, 'display_description': 'lun1',
- 'volume_type_id': None
- }
- snapshot = {
- 'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
- 'volume_size': 1, 'project_id': 'project'
- }
- volume_sec = {
- 'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
- 'os_type': 'linux', 'provider_location': 'lun1',
- 'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
- 'display_name': None, 'display_description': 'lun1',
- 'volume_type_id': None
- }
-
- def setUp(self):
- super(NetAppCmodeISCSIDriverTestCase, self).setUp()
- driver = netapp.NetAppCmodeISCSIDriver()
- self.stubs.Set(httplib, 'HTTPConnection', FakeCmodeHTTPConnection)
- driver._create_client(wsdl_url='http://localhost:8080/ntap_cloud.wsdl',
- login='root', password='password',
- hostname='localhost', port=8080, cache=False)
- self.driver = driver
-
- def test_connect(self):
- self.driver.check_for_setup_error()
-
- def test_create_destroy(self):
- self.driver.create_volume(self.volume)
- self.driver.delete_volume(self.volume)
-
- def test_create_vol_snapshot_destroy(self):
- self.driver.create_volume(self.volume)
- self.driver.create_snapshot(self.snapshot)
- self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot)
- self.driver.delete_snapshot(self.snapshot)
- self.driver.delete_volume(self.volume)
-
- def test_map_unmap(self):
- self.driver.create_volume(self.volume)
- updates = self.driver.create_export(None, self.volume)
- self.assertTrue(updates['provider_location'])
- self.volume['provider_location'] = updates['provider_location']
- connector = {'initiator': 'init1'}
- connection_info = self.driver.initialize_connection(self.volume,
- connector)
- self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
- properties = connection_info['data']
- self.driver.terminate_connection(self.volume, connector)
- self.driver.delete_volume(self.volume)
diff --git a/nova/tests/test_netapp_nfs.py b/nova/tests/test_netapp_nfs.py
deleted file mode 100644
index 1a8824386a..0000000000
--- a/nova/tests/test_netapp_nfs.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 NetApp, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)"""
-
-from nova import context
-from nova import exception
-from nova import test
-
-from nova.volume import netapp
-from nova.volume import netapp_nfs
-from nova.volume import nfs
-
-from mox import IgnoreArg
-from mox import IsA
-from mox import MockObject
-
-import mox
-import suds
-import types
-
-
-class FakeVolume(object):
- def __init__(self, size=0):
- self.size = size
- self.id = hash(self)
- self.name = None
-
- def __getitem__(self, key):
- return self.__dict__[key]
-
-
-class FakeSnapshot(object):
- def __init__(self, volume_size=0):
- self.volume_name = None
- self.name = None
- self.volume_id = None
- self.volume_size = volume_size
- self.user_id = None
- self.status = None
-
- def __getitem__(self, key):
- return self.__dict__[key]
-
-
-class FakeResponce(object):
- def __init__(self, status):
- """
- :param status: Either 'failed' or 'passed'
- """
- self.Status = status
-
- if status == 'failed':
- self.Reason = 'Sample error'
-
-
-class NetappNfsDriverTestCase(test.TestCase):
- """Test case for NetApp specific NFS clone driver"""
-
- def setUp(self):
- self._driver = netapp_nfs.NetAppNFSDriver()
- super(NetappNfsDriverTestCase, self).setUp()
-
- def test_check_for_setup_error(self):
- mox = self.mox
- drv = self._driver
-
- # check exception raises when flags are not set
- self.assertRaises(exception.NovaException,
- drv.check_for_setup_error)
-
- # set required flags
- self.flags(netapp_wsdl_url='val',
- netapp_login='val',
- netapp_password='val',
- netapp_server_hostname='val',
- netapp_server_port='val')
-
- mox.StubOutWithMock(nfs.NfsDriver, 'check_for_setup_error')
- nfs.NfsDriver.check_for_setup_error()
- mox.ReplayAll()
-
- drv.check_for_setup_error()
-
- def test_do_setup(self):
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, 'check_for_setup_error')
- mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, '_get_client')
-
- drv.check_for_setup_error()
- netapp_nfs.NetAppNFSDriver._get_client()
-
- mox.ReplayAll()
-
- drv.do_setup(IsA(context.RequestContext))
-
- def test_create_snapshot(self):
- """Test snapshot can be created and deleted"""
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_clone_volume')
- drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
- mox.ReplayAll()
-
- drv.create_snapshot(FakeSnapshot())
-
- def test_create_volume_from_snapshot(self):
- """Tests volume creation from snapshot"""
- drv = self._driver
- mox = self.mox
- volume = FakeVolume(1)
- snapshot = FakeSnapshot(2)
-
- self.assertRaises(exception.NovaException,
- drv.create_volume_from_snapshot,
- volume,
- snapshot)
-
- snapshot = FakeSnapshot(1)
-
- location = '127.0.0.1:/nfs'
- expected_result = {'provider_location': location}
- mox.StubOutWithMock(drv, '_clone_volume')
- mox.StubOutWithMock(drv, '_get_volume_location')
- drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
- drv._get_volume_location(IgnoreArg()).AndReturn(location)
-
- mox.ReplayAll()
-
- loc = drv.create_volume_from_snapshot(volume, snapshot)
-
- self.assertEquals(loc, expected_result)
-
- def _prepare_delete_snapshot_mock(self, snapshot_exists):
- drv = self._driver
- mox = self.mox
-
- mox.StubOutWithMock(drv, '_get_provider_location')
- mox.StubOutWithMock(drv, '_volume_not_present')
-
- if snapshot_exists:
- mox.StubOutWithMock(drv, '_execute')
- mox.StubOutWithMock(drv, '_get_volume_path')
-
- drv._get_provider_location(IgnoreArg())
- drv._volume_not_present(IgnoreArg(), IgnoreArg())\
- .AndReturn(not snapshot_exists)
-
- if snapshot_exists:
- drv._get_volume_path(IgnoreArg(), IgnoreArg())
- drv._execute('rm', None, run_as_root=True)
-
- mox.ReplayAll()
-
- return mox
-
- def test_delete_existing_snapshot(self):
- drv = self._driver
- self._prepare_delete_snapshot_mock(True)
-
- drv.delete_snapshot(FakeSnapshot())
-
- def test_delete_missing_snapshot(self):
- drv = self._driver
- self._prepare_delete_snapshot_mock(False)
-
- drv.delete_snapshot(FakeSnapshot())
-
- def _prepare_clone_mock(self, status):
- drv = self._driver
- mox = self.mox
-
- volume = FakeVolume()
- setattr(volume, 'provider_location', '127.0.0.1:/nfs')
-
- drv._client = MockObject(suds.client.Client)
- drv._client.factory = MockObject(suds.client.Factory)
- drv._client.service = MockObject(suds.client.ServiceSelector)
-
- # ApiProxy() method is generated by ServiceSelector at runtime from the
- # XML, so mocking is impossible.
- setattr(drv._client.service,
- 'ApiProxy',
- types.MethodType(lambda *args, **kwargs: FakeResponce(status),
- suds.client.ServiceSelector))
- mox.StubOutWithMock(drv, '_get_host_id')
- mox.StubOutWithMock(drv, '_get_full_export_path')
-
- drv._get_host_id(IgnoreArg()).AndReturn('10')
- drv._get_full_export_path(IgnoreArg(), IgnoreArg()).AndReturn('/nfs')
-
- return mox
-
- def test_successfull_clone_volume(self):
- drv = self._driver
- mox = self._prepare_clone_mock('passed')
-
- mox.ReplayAll()
-
- volume_name = 'volume_name'
- clone_name = 'clone_name'
- volume_id = volume_name + str(hash(volume_name))
-
- drv._clone_volume(volume_name, clone_name, volume_id)
-
- def test_failed_clone_volume(self):
- drv = self._driver
- mox = self._prepare_clone_mock('failed')
-
- mox.ReplayAll()
-
- volume_name = 'volume_name'
- clone_name = 'clone_name'
- volume_id = volume_name + str(hash(volume_name))
-
- self.assertRaises(exception.NovaException,
- drv._clone_volume,
- volume_name, clone_name, volume_id)
diff --git a/nova/tests/test_nexenta.py b/nova/tests/test_nexenta.py
deleted file mode 100644
index aac877cc14..0000000000
--- a/nova/tests/test_nexenta.py
+++ /dev/null
@@ -1,278 +0,0 @@
-#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2011 Nexenta Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Unit tests for OpenStack Nova volume driver
-"""
-
-import base64
-import urllib2
-
-import nova.flags
-import nova.test
-from nova.volume import nexenta
-from nova.volume.nexenta import jsonrpc
-from nova.volume.nexenta import volume
-
-FLAGS = nova.flags.FLAGS
-
-
-class TestNexentaDriver(nova.test.TestCase):
- TEST_VOLUME_NAME = 'volume1'
- TEST_VOLUME_NAME2 = 'volume2'
- TEST_SNAPSHOT_NAME = 'snapshot1'
- TEST_VOLUME_REF = {
- 'name': TEST_VOLUME_NAME,
- 'size': 1,
- }
- TEST_VOLUME_REF2 = {
- 'name': TEST_VOLUME_NAME2,
- 'size': 1,
- }
- TEST_SNAPSHOT_REF = {
- 'name': TEST_SNAPSHOT_NAME,
- 'volume_name': TEST_VOLUME_NAME,
- }
-
- def setUp(self):
- super(TestNexentaDriver, self).setUp()
- self.flags(
- nexenta_host='1.1.1.1',
- nexenta_volume='nova',
- nexenta_target_prefix='iqn:',
- nexenta_target_group_prefix='nova/',
- nexenta_blocksize='8K',
- nexenta_sparse=True,
- )
- self.nms_mock = self.mox.CreateMockAnything()
- for mod in ['volume', 'zvol', 'iscsitarget',
- 'stmf', 'scsidisk', 'snapshot']:
- setattr(self.nms_mock, mod, self.mox.CreateMockAnything())
- self.stubs.Set(jsonrpc, 'NexentaJSONProxy',
- lambda *_, **__: self.nms_mock)
- self.drv = volume.NexentaDriver()
- self.drv.do_setup({})
-
- def test_setup_error(self):
- self.nms_mock.volume.object_exists('nova').AndReturn(True)
- self.mox.ReplayAll()
- self.drv.check_for_setup_error()
-
- def test_setup_error_fail(self):
- self.nms_mock.volume.object_exists('nova').AndReturn(False)
- self.mox.ReplayAll()
- self.assertRaises(LookupError, self.drv.check_for_setup_error)
-
- def test_local_path(self):
- self.assertRaises(NotImplementedError, self.drv.local_path, '')
-
- def test_create_volume(self):
- self.nms_mock.zvol.create('nova/volume1', '1G', '8K', True)
- self.mox.ReplayAll()
- self.drv.create_volume(self.TEST_VOLUME_REF)
-
- def test_delete_volume(self):
- self.nms_mock.zvol.destroy('nova/volume1', '')
- self.mox.ReplayAll()
- self.drv.delete_volume(self.TEST_VOLUME_REF)
-
- def test_create_snapshot(self):
- self.nms_mock.zvol.create_snapshot('nova/volume1', 'snapshot1', '')
- self.mox.ReplayAll()
- self.drv.create_snapshot(self.TEST_SNAPSHOT_REF)
-
- def test_create_volume_from_snapshot(self):
- self.nms_mock.zvol.clone('nova/volume1@snapshot1', 'nova/volume2')
- self.mox.ReplayAll()
- self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2,
- self.TEST_SNAPSHOT_REF)
-
- def test_delete_snapshot(self):
- self.nms_mock.snapshot.destroy('nova/volume1@snapshot1', '')
- self.mox.ReplayAll()
- self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF)
-
- _CREATE_EXPORT_METHODS = [
- ('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},),
- u'Unable to create iscsi target\n'
- u' iSCSI target iqn.1986-03.com.sun:02:nova-volume1 already'
- u' configured\n'
- u' itadm create-target failed with error 17\n',
- ),
- ('stmf', 'create_targetgroup', ('nova/volume1',),
- u'Unable to create targetgroup: stmfadm: nova/volume1:'
- u' already exists\n',
- ),
- ('stmf', 'add_targetgroup_member', ('nova/volume1', 'iqn:volume1'),
- u'Unable to add member to targetgroup: stmfadm:'
- u' iqn.1986-03.com.sun:02:nova-volume1: already exists\n',
- ),
- ('scsidisk', 'create_lu', ('nova/volume1', {}),
- u"Unable to create lu with zvol 'nova/volume1':\n"
- u" sbdadm: filename /dev/zvol/rdsk/nova/volume1: in use\n",
- ),
- ('scsidisk', 'add_lun_mapping_entry', ('nova/volume1', {
- 'target_group': 'nova/volume1', 'lun': '0'}),
- u"Unable to add view to zvol 'nova/volume1' (LUNs in use: ):\n"
- u" stmfadm: view entry exists\n",
- ),
- ]
-
- def _stub_export_method(self, module, method, args, error, fail=False):
- m = getattr(self.nms_mock, module)
- m = getattr(m, method)
- mock = m(*args)
- if fail:
- mock.AndRaise(nexenta.NexentaException(error))
-
- def _stub_all_export_methods(self, fail=False):
- for params in self._CREATE_EXPORT_METHODS:
- self._stub_export_method(*params, fail=fail)
-
- def test_create_export(self):
- self._stub_all_export_methods()
- self.mox.ReplayAll()
- retval = self.drv.create_export({}, self.TEST_VOLUME_REF)
- self.assertEquals(retval,
- {'provider_location':
- '%s:%s,1 %s%s' % (FLAGS.nexenta_host,
- FLAGS.nexenta_iscsi_target_portal_port,
- FLAGS.nexenta_target_prefix,
- self.TEST_VOLUME_NAME)})
-
- def __get_test(i):
- def _test_create_export_fail(self):
- for params in self._CREATE_EXPORT_METHODS[:i]:
- self._stub_export_method(*params)
- self._stub_export_method(*self._CREATE_EXPORT_METHODS[i],
- fail=True)
- self.mox.ReplayAll()
- self.assertRaises(nexenta.NexentaException,
- self.drv.create_export, {}, self.TEST_VOLUME_REF)
- return _test_create_export_fail
-
- for i in range(len(_CREATE_EXPORT_METHODS)):
- locals()['test_create_export_fail_%d' % i] = __get_test(i)
-
- def test_ensure_export(self):
- self._stub_all_export_methods(fail=True)
- self.mox.ReplayAll()
- self.drv.ensure_export({}, self.TEST_VOLUME_REF)
-
- def test_remove_export(self):
- self.nms_mock.scsidisk.delete_lu('nova/volume1')
- self.nms_mock.stmf.destroy_targetgroup('nova/volume1')
- self.nms_mock.iscsitarget.delete_target('iqn:volume1')
- self.mox.ReplayAll()
- self.drv.remove_export({}, self.TEST_VOLUME_REF)
-
- def test_remove_export_fail_0(self):
- self.nms_mock.scsidisk.delete_lu('nova/volume1')
- self.nms_mock.stmf.destroy_targetgroup('nova/volume1').AndRaise(
- nexenta.NexentaException())
- self.nms_mock.iscsitarget.delete_target('iqn:volume1')
- self.mox.ReplayAll()
- self.drv.remove_export({}, self.TEST_VOLUME_REF)
-
- def test_remove_export_fail_1(self):
- self.nms_mock.scsidisk.delete_lu('nova/volume1')
- self.nms_mock.stmf.destroy_targetgroup('nova/volume1')
- self.nms_mock.iscsitarget.delete_target('iqn:volume1').AndRaise(
- nexenta.NexentaException())
- self.mox.ReplayAll()
- self.drv.remove_export({}, self.TEST_VOLUME_REF)
-
-
-class TestNexentaJSONRPC(nova.test.TestCase):
- URL = 'http://example.com/'
- URL_S = 'https://example.com/'
- USER = 'user'
- PASSWORD = 'password'
- HEADERS = {'Authorization': 'Basic %s' % (base64.b64encode(
- ':'.join((USER, PASSWORD))),),
- 'Content-Type': 'application/json'}
- REQUEST = 'the request'
-
- def setUp(self):
- super(TestNexentaJSONRPC, self).setUp()
- self.proxy = jsonrpc.NexentaJSONProxy(
- self.URL, self.USER, self.PASSWORD, auto=True)
- self.mox.StubOutWithMock(urllib2, 'Request', True)
- self.mox.StubOutWithMock(urllib2, 'urlopen')
- self.resp_mock = self.mox.CreateMockAnything()
- self.resp_info_mock = self.mox.CreateMockAnything()
- self.resp_mock.info().AndReturn(self.resp_info_mock)
- urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
-
- def test_call(self):
- urllib2.Request(self.URL,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
- self.resp_info_mock.status = ''
- self.resp_mock.read().AndReturn(
- '{"error": null, "result": "the result"}')
- self.mox.ReplayAll()
- result = self.proxy('arg1', 'arg2')
- self.assertEquals("the result", result)
-
- def test_call_deep(self):
- urllib2.Request(self.URL,
- '{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
- ' "method": "meth"}',
- self.HEADERS).AndReturn(self.REQUEST)
- self.resp_info_mock.status = ''
- self.resp_mock.read().AndReturn(
- '{"error": null, "result": "the result"}')
- self.mox.ReplayAll()
- result = self.proxy.obj1.subobj.meth('arg1', 'arg2')
- self.assertEquals("the result", result)
-
- def test_call_auto(self):
- urllib2.Request(self.URL,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
- urllib2.Request(self.URL_S,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
- self.resp_info_mock.status = 'EOF in headers'
- self.resp_mock.read().AndReturn(
- '{"error": null, "result": "the result"}')
- urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
- self.mox.ReplayAll()
- result = self.proxy('arg1', 'arg2')
- self.assertEquals("the result", result)
-
- def test_call_error(self):
- urllib2.Request(self.URL,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
- self.resp_info_mock.status = ''
- self.resp_mock.read().AndReturn(
- '{"error": {"message": "the error"}, "result": "the result"}')
- self.mox.ReplayAll()
- self.assertRaises(jsonrpc.NexentaJSONException,
- self.proxy, 'arg1', 'arg2')
-
- def test_call_fail(self):
- urllib2.Request(self.URL,
- '{"object": null, "params": ["arg1", "arg2"], "method": null}',
- self.HEADERS).AndReturn(self.REQUEST)
- self.resp_info_mock.status = 'EOF in headers'
- self.proxy.auto = False
- self.mox.ReplayAll()
- self.assertRaises(jsonrpc.NexentaJSONException,
- self.proxy, 'arg1', 'arg2')
diff --git a/nova/tests/test_nfs.py b/nova/tests/test_nfs.py
deleted file mode 100644
index d0d235b1b1..0000000000
--- a/nova/tests/test_nfs.py
+++ /dev/null
@@ -1,569 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 NetApp, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Unit tests for the NFS driver module"""
-
-import __builtin__
-import errno
-import os
-
-import mox as mox_lib
-from mox import IgnoreArg
-from mox import IsA
-from mox import stubout
-
-from nova import context
-from nova import exception
-from nova.exception import ProcessExecutionError
-from nova import test
-
-from nova.volume import nfs
-
-
-class DumbVolume(object):
- fields = {}
-
- def __setitem__(self, key, value):
- self.fields[key] = value
-
- def __getitem__(self, item):
- return self.fields[item]
-
-
-class NfsDriverTestCase(test.TestCase):
- """Test case for NFS driver"""
-
- TEST_NFS_EXPORT1 = 'nfs-host1:/export'
- TEST_NFS_EXPORT2 = 'nfs-host2:/export'
- TEST_SIZE_IN_GB = 1
- TEST_MNT_POINT = '/mnt/nfs'
- TEST_MNT_POINT_BASE = '/mnt/test'
- TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
- TEST_FILE_NAME = 'test.txt'
- TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
- ONE_GB_IN_BYTES = 1024 * 1024 * 1024
-
- def setUp(self):
- self._driver = nfs.NfsDriver()
- super(NfsDriverTestCase, self).setUp()
-
- def stub_out_not_replaying(self, obj, attr_name):
- attr_to_replace = getattr(obj, attr_name)
- stub = mox_lib.MockObject(attr_to_replace)
- self.stubs.Set(obj, attr_name, stub)
-
- def test_path_exists_should_return_true(self):
- """_path_exists should return True if stat returns 0"""
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True)
-
- mox.ReplayAll()
-
- self.assertTrue(drv._path_exists(self.TEST_FILE_NAME))
-
- def test_path_exists_should_return_false(self):
- """_path_exists should return True if stat doesn't return 0"""
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True).\
- AndRaise(ProcessExecutionError(
- stderr="stat: cannot stat `test.txt': No such file or directory"))
-
- mox.ReplayAll()
-
- self.assertFalse(drv._path_exists(self.TEST_FILE_NAME))
-
- def test_local_path(self):
- """local_path common use case"""
- self.flags(nfs_mount_point_base=self.TEST_MNT_POINT_BASE)
- drv = self._driver
-
- volume = DumbVolume()
- volume['provider_location'] = self.TEST_NFS_EXPORT1
- volume['name'] = 'volume-123'
-
- self.assertEqual(
- '/mnt/test/2f4f60214cf43c595666dd815f0360a4/volume-123',
- drv.local_path(volume))
-
- def test_mount_nfs_should_mount_correctly(self):
- """_mount_nfs common case usage"""
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_path_exists')
- drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
- self.TEST_MNT_POINT, run_as_root=True)
-
- mox.ReplayAll()
-
- drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT)
-
- def test_mount_nfs_should_suppress_already_mounted_error(self):
- """_mount_nfs should suppress already mounted error if ensure=True
- """
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_path_exists')
- drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
- self.TEST_MNT_POINT, run_as_root=True).\
- AndRaise(ProcessExecutionError(
- stderr='is busy or already mounted'))
-
- mox.ReplayAll()
-
- drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, ensure=True)
-
- def test_mount_nfs_should_reraise_already_mounted_error(self):
- """_mount_nfs should not suppress already mounted error if ensure=False
- """
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_path_exists')
- drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
- self.TEST_MNT_POINT, run_as_root=True).\
- AndRaise(ProcessExecutionError(stderr='is busy or already mounted'))
-
- mox.ReplayAll()
-
- self.assertRaises(ProcessExecutionError, drv._mount_nfs,
- self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
- ensure=False)
-
- def test_mount_nfs_should_create_mountpoint_if_not_yet(self):
- """_mount_nfs should create mountpoint if it doesn't exist"""
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_path_exists')
- drv._path_exists(self.TEST_MNT_POINT).AndReturn(False)
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
- drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg())
-
- mox.ReplayAll()
-
- drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT)
-
- def test_mount_nfs_should_not_create_mountpoint_if_already(self):
- """_mount_nfs should not create mountpoint if it already exists"""
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_path_exists')
- drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg())
-
- mox.ReplayAll()
-
- drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT)
-
- def test_get_hash_str(self):
- """_get_hash_str should calculation correct value"""
- drv = self._driver
-
- self.assertEqual('2f4f60214cf43c595666dd815f0360a4',
- drv._get_hash_str(self.TEST_NFS_EXPORT1))
-
- def test_get_mount_point_for_share(self):
- """_get_mount_point_for_share should calculate correct value"""
- drv = self._driver
-
- self.flags(nfs_mount_point_base=self.TEST_MNT_POINT_BASE)
-
- self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4',
- drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
-
- def test_get_available_capacity_with_df(self):
- """_get_available_capacity should calculate correct value"""
- mox = self.mox
- drv = self._driver
-
- df_avail = 1490560
- df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n'
- df_data = 'nfs-host:/export 2620544 996864 %d 41%% /mnt' % df_avail
- df_output = df_head + df_data
-
- self.flags(nfs_disk_util='df')
-
- mox.StubOutWithMock(drv, '_get_mount_point_for_share')
- drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\
- AndReturn(self.TEST_MNT_POINT)
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('df', '-P', '-B', '1', self.TEST_MNT_POINT,
- run_as_root=True).AndReturn((df_output, None))
-
- mox.ReplayAll()
-
- self.assertEquals(df_avail,
- drv._get_available_capacity(self.TEST_NFS_EXPORT1))
-
- def test_get_available_capacity_with_du(self):
- """_get_available_capacity should calculate correct value"""
- mox = self.mox
- drv = self._driver
-
- self.flags(nfs_disk_util='du')
-
- df_total_size = 2620544
- df_used_size = 996864
- df_avail_size = 1490560
- df_title = 'Filesystem 1-blocks Used Available Use% Mounted on\n'
- df_mnt_data = 'nfs-host:/export %d %d %d 41%% /mnt' % (df_total_size,
- df_used_size,
- df_avail_size)
- df_output = df_title + df_mnt_data
-
- du_used = 490560
- du_output = '%d /mnt' % du_used
-
- mox.StubOutWithMock(drv, '_get_mount_point_for_share')
- drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\
- AndReturn(self.TEST_MNT_POINT)
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('df', '-P', '-B', '1', self.TEST_MNT_POINT,
- run_as_root=True).\
- AndReturn((df_output, None))
- drv._execute('du', '-sb', '--apparent-size',
- '--exclude', '*snapshot*',
- self.TEST_MNT_POINT,
- run_as_root=True).AndReturn((du_output, None))
-
- mox.ReplayAll()
-
- self.assertEquals(df_total_size - du_used,
- drv._get_available_capacity(self.TEST_NFS_EXPORT1))
-
- def test_load_shares_config(self):
- mox = self.mox
- drv = self._driver
-
- self.flags(nfs_shares_config=self.TEST_SHARES_CONFIG_FILE)
-
- mox.StubOutWithMock(__builtin__, 'open')
- config_data = []
- config_data.append(self.TEST_NFS_EXPORT1)
- config_data.append('#' + self.TEST_NFS_EXPORT2)
- config_data.append('')
- __builtin__.open(self.TEST_SHARES_CONFIG_FILE).AndReturn(config_data)
- mox.ReplayAll()
-
- shares = drv._load_shares_config()
-
- self.assertEqual([self.TEST_NFS_EXPORT1], shares)
-
- def test_ensure_share_mounted(self):
- """_ensure_share_mounted simple use case"""
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_get_mount_point_for_share')
- drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\
- AndReturn(self.TEST_MNT_POINT)
-
- mox.StubOutWithMock(drv, '_mount_nfs')
- drv._mount_nfs(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, ensure=True)
-
- mox.ReplayAll()
-
- drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
-
- def test_ensure_shares_mounted_should_save_mounting_successfully(self):
- """_ensure_shares_mounted should save share if mounted with success"""
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_load_shares_config')
- drv._load_shares_config().AndReturn([self.TEST_NFS_EXPORT1])
- mox.StubOutWithMock(drv, '_ensure_share_mounted')
- drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
-
- mox.ReplayAll()
-
- drv._ensure_shares_mounted()
-
- self.assertEqual(1, len(drv._mounted_shares))
- self.assertEqual(self.TEST_NFS_EXPORT1, drv._mounted_shares[0])
-
- def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
- """_ensure_shares_mounted should not save share if failed to mount"""
- mox = self.mox
- drv = self._driver
-
- mox.StubOutWithMock(drv, '_load_shares_config')
- drv._load_shares_config().AndReturn([self.TEST_NFS_EXPORT1])
- mox.StubOutWithMock(drv, '_ensure_share_mounted')
- drv._ensure_share_mounted(self.TEST_NFS_EXPORT1).AndRaise(Exception())
-
- mox.ReplayAll()
-
- drv._ensure_shares_mounted()
-
- self.assertEqual(0, len(drv._mounted_shares))
-
- def test_setup_should_throw_error_if_shares_config_not_configured(self):
- """do_setup should throw error if shares config is not configured """
- drv = self._driver
-
- self.flags(nfs_shares_config=self.TEST_SHARES_CONFIG_FILE)
-
- self.assertRaises(exception.NfsException,
- drv.do_setup, IsA(context.RequestContext))
-
- def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
- """do_setup should throw error if nfs client is not installed """
- mox = self.mox
- drv = self._driver
-
- self.flags(nfs_shares_config=self.TEST_SHARES_CONFIG_FILE)
-
- mox.StubOutWithMock(os.path, 'exists')
- os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('mount.nfs', check_exit_code=False).\
- AndRaise(OSError(errno.ENOENT, 'No such file or directory'))
-
- mox.ReplayAll()
-
- self.assertRaises(exception.NfsException,
- drv.do_setup, IsA(context.RequestContext))
-
- def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
- """_find_share should throw error if there is no mounted shares"""
- drv = self._driver
-
- drv._mounted_shares = []
-
- self.assertRaises(exception.NotFound, drv._find_share,
- self.TEST_SIZE_IN_GB)
-
- def test_find_share(self):
- """_find_share simple use case"""
- mox = self.mox
- drv = self._driver
-
- drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
-
- mox.StubOutWithMock(drv, '_get_available_capacity')
- drv._get_available_capacity(self.TEST_NFS_EXPORT1).\
- AndReturn(2 * self.ONE_GB_IN_BYTES)
- drv._get_available_capacity(self.TEST_NFS_EXPORT2).\
- AndReturn(3 * self.ONE_GB_IN_BYTES)
-
- mox.ReplayAll()
-
- self.assertEqual(self.TEST_NFS_EXPORT2,
- drv._find_share(self.TEST_SIZE_IN_GB))
-
- def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
- """_find_share should throw error if there is no share to host vol"""
- mox = self.mox
- drv = self._driver
-
- drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
-
- mox.StubOutWithMock(drv, '_get_available_capacity')
- drv._get_available_capacity(self.TEST_NFS_EXPORT1).\
- AndReturn(0)
- drv._get_available_capacity(self.TEST_NFS_EXPORT2).\
- AndReturn(0)
-
- mox.ReplayAll()
-
- self.assertRaises(exception.NfsNoSuitableShareFound, drv._find_share,
- self.TEST_SIZE_IN_GB)
-
- def _simple_volume(self):
- volume = DumbVolume()
- volume['provider_location'] = '127.0.0.1:/mnt'
- volume['name'] = 'volume_name'
- volume['size'] = 10
-
- return volume
-
- def test_create_sparsed_volume(self):
- mox = self.mox
- drv = self._driver
- volume = self._simple_volume()
-
- self.flags(nfs_sparsed_volumes=True)
-
- mox.StubOutWithMock(drv, '_create_sparsed_file')
- mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
-
- drv._create_sparsed_file(IgnoreArg(), IgnoreArg())
- drv._set_rw_permissions_for_all(IgnoreArg())
-
- mox.ReplayAll()
-
- drv._do_create_volume(volume)
-
- def test_create_nonsparsed_volume(self):
- mox = self.mox
- drv = self._driver
- volume = self._simple_volume()
-
- self.flags(nfs_sparsed_volumes=False)
-
- mox.StubOutWithMock(drv, '_create_regular_file')
- mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
-
- drv._create_regular_file(IgnoreArg(), IgnoreArg())
- drv._set_rw_permissions_for_all(IgnoreArg())
-
- mox.ReplayAll()
-
- drv._do_create_volume(volume)
-
- def test_create_volume_should_ensure_nfs_mounted(self):
- """create_volume should ensure shares provided in config are mounted"""
- mox = self.mox
- drv = self._driver
-
- self.stub_out_not_replaying(nfs, 'LOG')
- self.stub_out_not_replaying(drv, '_find_share')
- self.stub_out_not_replaying(drv, '_do_create_volume')
-
- mox.StubOutWithMock(drv, '_ensure_shares_mounted')
- drv._ensure_shares_mounted()
-
- mox.ReplayAll()
-
- volume = DumbVolume()
- volume['size'] = self.TEST_SIZE_IN_GB
- drv.create_volume(volume)
-
- def test_create_volume_should_return_provider_location(self):
- """create_volume should return provider_location with found share """
- mox = self.mox
- drv = self._driver
-
- self.stub_out_not_replaying(nfs, 'LOG')
- self.stub_out_not_replaying(drv, '_ensure_shares_mounted')
- self.stub_out_not_replaying(drv, '_do_create_volume')
-
- mox.StubOutWithMock(drv, '_find_share')
- drv._find_share(self.TEST_SIZE_IN_GB).AndReturn(self.TEST_NFS_EXPORT1)
-
- mox.ReplayAll()
-
- volume = DumbVolume()
- volume['size'] = self.TEST_SIZE_IN_GB
- result = drv.create_volume(volume)
- self.assertEqual(self.TEST_NFS_EXPORT1, result['provider_location'])
-
- def test_delete_volume(self):
- """delete_volume simple test case"""
- mox = self.mox
- drv = self._driver
-
- self.stub_out_not_replaying(drv, '_ensure_share_mounted')
-
- volume = DumbVolume()
- volume['name'] = 'volume-123'
- volume['provider_location'] = self.TEST_NFS_EXPORT1
-
- mox.StubOutWithMock(drv, 'local_path')
- drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH)
-
- mox.StubOutWithMock(drv, '_path_exists')
- drv._path_exists(self.TEST_LOCAL_PATH).AndReturn(True)
-
- mox.StubOutWithMock(drv, '_execute')
- drv._execute('rm', '-f', self.TEST_LOCAL_PATH, run_as_root=True)
-
- mox.ReplayAll()
-
- drv.delete_volume(volume)
-
- def test_delete_should_ensure_share_mounted(self):
- """delete_volume should ensure that corresponding share is mounted"""
- mox = self.mox
- drv = self._driver
-
- self.stub_out_not_replaying(drv, '_execute')
-
- volume = DumbVolume()
- volume['name'] = 'volume-123'
- volume['provider_location'] = self.TEST_NFS_EXPORT1
-
- mox.StubOutWithMock(drv, '_ensure_share_mounted')
- drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
-
- mox.ReplayAll()
-
- drv.delete_volume(volume)
-
- def test_delete_should_not_delete_if_provider_location_not_provided(self):
- """delete_volume shouldn't try to delete if provider_location missed"""
- mox = self.mox
- drv = self._driver
-
- self.stub_out_not_replaying(drv, '_ensure_share_mounted')
-
- volume = DumbVolume()
- volume['name'] = 'volume-123'
- volume['provider_location'] = None
-
- mox.StubOutWithMock(drv, '_execute')
-
- mox.ReplayAll()
-
- drv.delete_volume(volume)
-
- def test_delete_should_not_delete_if_there_is_no_file(self):
- """delete_volume should not try to delete if file missed"""
- mox = self.mox
- drv = self._driver
-
- self.stub_out_not_replaying(drv, '_ensure_share_mounted')
-
- volume = DumbVolume()
- volume['name'] = 'volume-123'
- volume['provider_location'] = self.TEST_NFS_EXPORT1
-
- mox.StubOutWithMock(drv, 'local_path')
- drv.local_path(volume).AndReturn(self.TEST_LOCAL_PATH)
-
- mox.StubOutWithMock(drv, '_path_exists')
- drv._path_exists(self.TEST_LOCAL_PATH).AndReturn(False)
-
- mox.StubOutWithMock(drv, '_execute')
-
- mox.ReplayAll()
-
- drv.delete_volume(volume)
diff --git a/nova/tests/test_plugin_api_extensions.py b/nova/tests/test_plugin_api_extensions.py
index af30c10d1a..a40dd3276f 100644
--- a/nova/tests/test_plugin_api_extensions.py
+++ b/nova/tests/test_plugin_api_extensions.py
@@ -72,7 +72,6 @@ class APITestCase(test.TestCase):
# Marking out the default extension paths makes this test MUCH faster.
self.flags(osapi_compute_extension=[])
- self.flags(osapi_volume_extension=[])
found = False
mgr = computeextensions.ExtensionManager()
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index dd86c7c038..5ec753efee 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -127,27 +127,6 @@ class QuotaIntegrationTestCase(test.TestCase):
image_href=image_uuid)
db.instance_destroy(self.context, instance['uuid'])
- def test_too_many_volumes(self):
- volume_ids = []
- for i in range(FLAGS.quota_volumes):
- volume_id = self._create_volume()
- volume_ids.append(volume_id)
- self.assertRaises(exception.QuotaError,
- volume.API().create,
- self.context, 10, '', '', None)
- for volume_id in volume_ids:
- db.volume_destroy(self.context, volume_id)
-
- def test_too_many_gigabytes(self):
- volume_ids = []
- volume_id = self._create_volume(size=20)
- volume_ids.append(volume_id)
- self.assertRaises(exception.QuotaError,
- volume.API().create,
- self.context, 10, '', '', None)
- for volume_id in volume_ids:
- db.volume_destroy(self.context, volume_id)
-
def test_too_many_addresses(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
@@ -720,8 +699,6 @@ class DbQuotaDriverTestCase(test.TestCase):
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
- quota_volumes=10,
- quota_gigabytes=1000,
quota_floating_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
diff --git a/nova/tests/test_rbd.py b/nova/tests/test_rbd.py
deleted file mode 100644
index 8e90f3ae86..0000000000
--- a/nova/tests/test_rbd.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Josh Durgin
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import db
-from nova import exception
-from nova.openstack.common import log as logging
-from nova.openstack.common import timeutils
-from nova import test
-from nova.tests.image import fake as fake_image
-from nova.tests.test_volume import DriverTestCase
-from nova.volume.driver import RBDDriver
-
-LOG = logging.getLogger(__name__)
-
-
-class RBDTestCase(test.TestCase):
-
- def setUp(self):
- super(RBDTestCase, self).setUp()
-
- def fake_execute(*args):
- pass
- self.driver = RBDDriver(execute=fake_execute)
-
- def test_good_locations(self):
- locations = [
- 'rbd://fsid/pool/image/snap',
- 'rbd://%2F/%2F/%2F/%2F',
- ]
- map(self.driver._parse_location, locations)
-
- def test_bad_locations(self):
- locations = [
- 'rbd://image',
- 'http://path/to/somewhere/else',
- 'rbd://image/extra',
- 'rbd://image/',
- 'rbd://fsid/pool/image/',
- 'rbd://fsid/pool/image/snap/',
- 'rbd://///',
- ]
- for loc in locations:
- self.assertRaises(exception.ImageUnacceptable,
- self.driver._parse_location,
- loc)
- self.assertFalse(self.driver._is_cloneable(loc))
-
- def test_cloneable(self):
- self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc')
- location = 'rbd://abc/pool/image/snap'
- self.assertTrue(self.driver._is_cloneable(location))
-
- def test_uncloneable_different_fsid(self):
- self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc')
- location = 'rbd://def/pool/image/snap'
- self.assertFalse(self.driver._is_cloneable(location))
-
- def test_uncloneable_unreadable(self):
- def fake_exc(*args):
- raise exception.ProcessExecutionError()
- self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc')
- self.stubs.Set(self.driver, '_execute', fake_exc)
- location = 'rbd://abc/pool/image/snap'
- self.assertFalse(self.driver._is_cloneable(location))
-
-
-class FakeRBDDriver(RBDDriver):
-
- def _clone(self):
- pass
-
- def _resize(self):
- pass
-
-
-class ManagedRBDTestCase(DriverTestCase):
- driver_name = "nova.tests.test_rbd.FakeRBDDriver"
-
- def setUp(self):
- super(ManagedRBDTestCase, self).setUp()
- fake_image.stub_out_image_service(self.stubs)
-
- def _clone_volume_from_image(self, expected_status,
- clone_works=True):
- """Try to clone a volume from an image, and check the status
- afterwards"""
- def fake_clone_image(volume, image_location):
- pass
-
- def fake_clone_error(volume, image_location):
- raise exception.NovaException()
-
- self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True)
- if clone_works:
- self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image)
- else:
- self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_error)
-
- image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- volume_id = 1
- # creating volume testdata
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': timeutils.utcnow(),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'creating',
- 'instance_uuid': None,
- 'host': 'dummy'})
- try:
- if clone_works:
- self.volume.create_volume(self.context,
- volume_id,
- image_id=image_id)
- else:
- self.assertRaises(exception.NovaException,
- self.volume.create_volume,
- self.context,
- volume_id,
- image_id=image_id)
-
- volume = db.volume_get(self.context, volume_id)
- self.assertEqual(volume['status'], expected_status)
- finally:
- # cleanup
- db.volume_destroy(self.context, volume_id)
-
- def test_clone_image_status_available(self):
- """Verify that before cloning, an image is in the available state."""
- self._clone_volume_from_image('available', True)
-
- def test_clone_image_status_error(self):
- """Verify that before cloning, an image is in the available state."""
- self._clone_volume_from_image('error', False)
-
- def test_clone_success(self):
- self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True)
- self.stubs.Set(self.volume.driver, 'clone_image', lambda a, b: True)
- image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- self.assertTrue(self.volume.driver.clone_image({}, image_id))
-
- def test_clone_bad_image_id(self):
- self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True)
- self.assertFalse(self.volume.driver.clone_image({}, None))
-
- def test_clone_uncloneable(self):
- self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: False)
- self.assertFalse(self.volume.driver.clone_image({}, 'dne'))
diff --git a/nova/tests/test_solidfire.py b/nova/tests/test_solidfire.py
deleted file mode 100644
index 87a211da53..0000000000
--- a/nova/tests/test_solidfire.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import exception
-from nova.openstack.common import log as logging
-from nova import test
-from nova.volume.solidfire import SolidFire
-
-LOG = logging.getLogger(__name__)
-
-
-class SolidFireVolumeTestCase(test.TestCase):
- def fake_issue_api_request(obj, method, params):
- if method is 'GetClusterInfo':
- LOG.info('Called Fake GetClusterInfo...')
- results = {'result': {'clusterInfo':
- {'name': 'fake-cluster',
- 'mvip': '1.1.1.1',
- 'svip': '1.1.1.1',
- 'uniqueID': 'unqid',
- 'repCount': 2,
- 'attributes': {}}}}
- return results
-
- elif method is 'AddAccount':
- LOG.info('Called Fake AddAccount...')
- return {'result': {'accountID': 25}, 'id': 1}
-
- elif method is 'GetAccountByName':
- LOG.info('Called Fake GetAccountByName...')
- results = {'result': {'account':
- {'accountID': 25,
- 'username': params['username'],
- 'status': 'active',
- 'initiatorSecret': '123456789012',
- 'targetSecret': '123456789012',
- 'attributes': {},
- 'volumes': [6, 7, 20]}},
- "id": 1}
- return results
-
- elif method is 'CreateVolume':
- LOG.info('Called Fake CreateVolume...')
- return {'result': {'volumeID': 5}, 'id': 1}
-
- elif method is 'DeleteVolume':
- LOG.info('Called Fake DeleteVolume...')
- return {'result': {}, 'id': 1}
-
- elif method is 'ListVolumesForAccount':
- test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66'
- LOG.info('Called Fake ListVolumesForAccount...')
- result = {'result': {
- 'volumes': [{'volumeID': 5,
- 'name': test_name,
- 'accountID': 25,
- 'sliceCount': 1,
- 'totalSize': 1048576 * 1024,
- 'enable512e': True,
- 'access': "readWrite",
- 'status': "active",
- 'attributes':None,
- 'qos': None,
- 'iqn': test_name}]}}
- return result
-
- else:
- LOG.error('Crap, unimplemented API call in Fake:%s' % method)
-
- def fake_issue_api_request_no_volume(obj, method, params):
- if method is 'ListVolumesForAccount':
- LOG.info('Called Fake ListVolumesForAccount...')
- return {'result': {'volumes': []}}
- else:
- return obj.fake_issue_api_request(method, params)
-
- def fake_issue_api_request_fails(obj, method, params):
- return {'error': {'code': 000,
- 'name': 'DummyError',
- 'message': 'This is a fake error response'},
- 'id': 1}
-
- def fake_volume_get(obj, key, default=None):
- return {'qos': 'fast'}
-
- def test_create_volume(self):
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request)
- testvol = {'project_id': 'testprjid',
- 'name': 'testvol',
- 'size': 1,
- 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
- sfv = SolidFire()
- model_update = sfv.create_volume(testvol)
-
- def test_create_volume_with_qos(self):
- preset_qos = {}
- preset_qos['qos'] = 'fast'
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request)
-
- testvol = {'project_id': 'testprjid',
- 'name': 'testvol',
- 'size': 1,
- 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
- 'metadata': [preset_qos]}
-
- sfv = SolidFire()
- model_update = sfv.create_volume(testvol)
-
- def test_create_volume_fails(self):
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request_fails)
- testvol = {'project_id': 'testprjid',
- 'name': 'testvol',
- 'size': 1,
- 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
- sfv = SolidFire()
- self.assertRaises(exception.SolidFireAPIDataException,
- sfv.create_volume, testvol)
-
- def test_create_sfaccount(self):
- sfv = SolidFire()
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request)
- account = sfv._create_sfaccount('project-id')
- self.assertNotEqual(account, None)
-
- def test_create_sfaccount_fails(self):
- sfv = SolidFire()
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request_fails)
- account = sfv._create_sfaccount('project-id')
- self.assertEqual(account, None)
-
- def test_get_sfaccount_by_name(self):
- sfv = SolidFire()
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request)
- account = sfv._get_sfaccount_by_name('some-name')
- self.assertNotEqual(account, None)
-
- def test_get_sfaccount_by_name_fails(self):
- sfv = SolidFire()
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request_fails)
- account = sfv._get_sfaccount_by_name('some-name')
- self.assertEqual(account, None)
-
- def test_delete_volume(self):
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request)
- testvol = {'project_id': 'testprjid',
- 'name': 'test_volume',
- 'size': 1,
- 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
- sfv = SolidFire()
- model_update = sfv.delete_volume(testvol)
-
- def test_delete_volume_fails_no_volume(self):
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request_no_volume)
- testvol = {'project_id': 'testprjid',
- 'name': 'no-name',
- 'size': 1,
- 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
- sfv = SolidFire()
- self.assertRaises(exception.VolumeNotFound,
- sfv.delete_volume, testvol)
-
- def test_delete_volume_fails_account_lookup(self):
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request_fails)
- testvol = {'project_id': 'testprjid',
- 'name': 'no-name',
- 'size': 1,
- 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
- sfv = SolidFire()
- self.assertRaises(exception.SfAccountNotFound,
- sfv.delete_volume,
- testvol)
-
- def test_get_cluster_info(self):
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request)
- sfv = SolidFire()
- sfv._get_cluster_info()
-
- def test_get_cluster_info_fail(self):
- self.stubs.Set(SolidFire, '_issue_api_request',
- self.fake_issue_api_request_fails)
- sfv = SolidFire()
- self.assertRaises(exception.SolidFireAPIException,
- sfv._get_cluster_info)
diff --git a/nova/tests/test_storwize_svc.py b/nova/tests/test_storwize_svc.py
deleted file mode 100644
index 5130432569..0000000000
--- a/nova/tests/test_storwize_svc.py
+++ /dev/null
@@ -1,1376 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 IBM, Inc.
-# Copyright (c) 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# Authors:
-# Ronen Kat <ronenkat@il.ibm.com>
-# Avishay Traeger <avishay@il.ibm.com>
-
-"""
-Tests for the IBM Storwize V7000 and SVC volume driver.
-"""
-
-import random
-import socket
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import excutils
-from nova.openstack.common import log as logging
-from nova import test
-from nova.volume import storwize_svc
-
-FLAGS = flags.FLAGS
-
-LOG = logging.getLogger(__name__)
-
-
-class StorwizeSVCManagementSimulator:
- def __init__(self, pool_name):
- self._flags = {"storwize_svc_volpool_name": pool_name}
- self._volumes_list = {}
- self._hosts_list = {}
- self._mappings_list = {}
- self._fcmappings_list = {}
- self._next_cmd_error = {
- "lsportip": "",
- "lsnodecanister": "",
- "mkvdisk": "",
- "lsvdisk": "",
- "lsfcmap": "",
- "prestartfcmap": "",
- "startfcmap": "",
- "rmfcmap": "",
- }
- self._errors = {
- "CMMVC5701E": ("", "CMMVC5701E No object ID was specified."),
- "CMMVC6035E": ("", "CMMVC6035E The action failed as the " +
- "object already exists."),
- "CMMVC5753E": ("", "CMMVC5753E The specified object does not " +
- "exist or is not a suitable candidate."),
- "CMMVC5707E": ("", "CMMVC5707E Required parameters are missing."),
- "CMMVC6581E": ("", "CMMVC6581E The command has failed because " +
- "the maximum number of allowed iSCSI " +
- "qualified names (IQNs) has been reached, " +
- "or the IQN is already assigned or is not " +
- "valid."),
- "CMMVC5754E": ("", "CMMVC5754E The specified object does not " +
- "exist, or the name supplied does not meet " +
- "the naming rules."),
- "CMMVC6071E": ("", "CMMVC6071E The VDisk-to-host mapping was " +
- "not created because the VDisk is already " +
- "mapped to a host."),
- "CMMVC5879E": ("", "CMMVC5879E The VDisk-to-host mapping was " +
- "not created because a VDisk is already " +
- "mapped to this host with this SCSI LUN."),
- "CMMVC5840E": ("", "CMMVC5840E The virtual disk (VDisk) was " +
- "not deleted because it is mapped to a " +
- "host or because it is part of a FlashCopy " +
- "or Remote Copy mapping, or is involved in " +
- "an image mode migrate."),
- "CMMVC6527E": ("", "CMMVC6527E The name that you have entered " +
- "is not valid. The name can contain letters, " +
- "numbers, spaces, periods, dashes, and " +
- "underscores. The name must begin with a " +
- "letter or an underscore. The name must not " +
- "begin or end with a space."),
- "CMMVC5871E": ("", "CMMVC5871E The action failed because one or " +
- "more of the configured port names is in a " +
- "mapping."),
- "CMMVC5924E": ("", "CMMVC5924E The FlashCopy mapping was not " +
- "created because the source and target " +
- "virtual disks (VDisks) are different sizes."),
- "CMMVC6303E": ("", "CMMVC6303E The create failed because the " +
- "source and target VDisks are the same."),
- "CMMVC7050E": ("", "CMMVC7050E The command failed because at " +
- "least one node in the I/O group does not " +
- "support compressed VDisks."),
- }
-
- # Find an unused ID
- def _find_unused_id(self, d):
- ids = []
- for k, v in d.iteritems():
- ids.append(int(v["id"]))
- ids.sort()
- for index, n in enumerate(ids):
- if n > index:
- return str(index)
- return str(len(ids))
-
- # Check if name is valid
- def _is_invalid_name(self, name):
- if (name[0] == " ") or (name[-1] == " "):
- return True
- for c in name:
- if ((not c.isalnum()) and (c != " ") and (c != ".")
- and (c != "-") and (c != "_")):
- return True
- return False
-
- # Convert argument string to dictionary
- def _cmd_to_dict(self, cmd):
- arg_list = cmd.split()
- no_param_args = [
- "autodelete",
- "autoexpand",
- "bytes",
- "compressed",
- "force",
- "nohdr",
- ]
- one_param_args = [
- "cleanrate",
- "delim",
- "filtervalue",
- "grainsize",
- "host",
- "iogrp",
- "iscsiname",
- "mdiskgrp",
- "name",
- "rsize",
- "scsi",
- "size",
- "source",
- "target",
- "unit",
- "easytier",
- "warning",
- ]
-
- # Handle the special case of lsnode which is a two-word command
- # Use the one word version of the command internally
- if arg_list[0] == "svcinfo" and arg_list[1] == "lsnode":
- ret = {"cmd": "lsnodecanister"}
- arg_list.pop(0)
- else:
- ret = {"cmd": arg_list[0]}
-
- skip = False
- for i in range(1, len(arg_list)):
- if skip:
- skip = False
- continue
- if arg_list[i][0] == "-":
- if arg_list[i][1:] in no_param_args:
- ret[arg_list[i][1:]] = True
- elif arg_list[i][1:] in one_param_args:
- ret[arg_list[i][1:]] = arg_list[i + 1]
- skip = True
- else:
- raise exception.InvalidInput(
- reason=_('unrecognized argument %s') % arg_list[i])
- else:
- ret["obj"] = arg_list[i]
- return ret
-
- # Generic function for printing information
- def _print_info_cmd(self, rows, delim=" ", nohdr=False, **kwargs):
- if nohdr:
- del rows[0]
-
- for index in range(len(rows)):
- rows[index] = delim.join(rows[index])
- return ("%s" % "\n".join(rows), "")
-
- # Print mostly made-up stuff in the correct syntax
- def _cmd_lsmdiskgrp(self, **kwargs):
- rows = [None] * 3
- rows[0] = ["id", "name", "status", "mdisk_count",
- "vdisk_count capacity", "extent_size", "free_capacity",
- "virtual_capacity", "used_capacity", "real_capacity",
- "overallocation", "warning", "easy_tier",
- "easy_tier_status"]
- rows[1] = ["1", self._flags["storwize_svc_volpool_name"], "online",
- "1", str(len(self._volumes_list)), "3.25TB", "256",
- "3.21TB", "1.54TB", "264.97MB", "35.58GB", "47", "80",
- "auto", "inactive"]
- rows[2] = ["2", "volpool2", "online",
- "1", "0", "3.25TB", "256",
- "3.21TB", "1.54TB", "264.97MB", "35.58GB", "47", "80",
- "auto", "inactive"]
- return self._print_info_cmd(rows=rows, **kwargs)
-
- # Print mostly made-up stuff in the correct syntax
- def _cmd_lsnodecanister(self, **kwargs):
- rows = [None] * 3
- rows[0] = ["id", "name", "UPS_serial_number", "WWNN", "status",
- "IO_group_id", "IO_group_name", "config_node",
- "UPS_unique_id", "hardware", "iscsi_name", "iscsi_alias",
- "panel_name", "enclosure_id", "canister_id",
- "enclosure_serial_number"]
- rows[1] = ["5", "node1", "", "123456789ABCDEF0", "online", "0",
- "io_grp0",
- "yes", "123456789ABCDEF0", "100",
- "iqn.1982-01.com.ibm:1234.sim.node1", "", "01-1", "1", "1",
- "0123ABC"]
- rows[2] = ["6", "node2", "", "123456789ABCDEF1", "online", "0",
- "io_grp0",
- "no", "123456789ABCDEF1", "100",
- "iqn.1982-01.com.ibm:1234.sim.node2", "", "01-2", "1", "2",
- "0123ABC"]
-
- if self._next_cmd_error["lsnodecanister"] == "header_mismatch":
- rows[0].pop(2)
- self._next_cmd_error["lsnodecanister"] = ""
- if self._next_cmd_error["lsnodecanister"] == "remove_field":
- for row in rows:
- row.pop(0)
- self._next_cmd_error["lsnodecanister"] = ""
-
- return self._print_info_cmd(rows=rows, **kwargs)
-
- # Print mostly made-up stuff in the correct syntax
- def _cmd_lsportip(self, **kwargs):
- if self._next_cmd_error["lsportip"] == "ip_no_config":
- self._next_cmd_error["lsportip"] = ""
- ip_addr1 = ""
- ip_addr2 = ""
- gw = ""
- else:
- ip_addr1 = "1.234.56.78"
- ip_addr2 = "1.234.56.79"
- gw = "1.234.56.1"
-
- rows = [None] * 17
- rows[0] = ["id", "node_id", "node_name", "IP_address", "mask",
- "gateway", "IP_address_6", "prefix_6", "gateway_6", "MAC",
- "duplex", "state", "speed", "failover"]
- rows[1] = ["1", "5", "node1", ip_addr1, "255.255.255.0",
- gw, "", "", "", "01:23:45:67:89:00", "Full",
- "online", "1Gb/s", "no"]
- rows[2] = ["1", "5", "node1", "", "", "", "", "", "",
- "01:23:45:67:89:00", "Full", "online", "1Gb/s", "yes"]
- rows[3] = ["2", "5", "node1", "", "", "", "", "", "",
- "01:23:45:67:89:01", "Full", "unconfigured", "1Gb/s", "no"]
- rows[4] = ["2", "5", "node1", "", "", "", "", "", "",
- "01:23:45:67:89:01", "Full", "unconfigured", "1Gb/s", "yes"]
- rows[5] = ["3", "5", "node1", "", "", "", "", "", "", "", "",
- "unconfigured", "", "no"]
- rows[6] = ["3", "5", "node1", "", "", "", "", "", "", "", "",
- "unconfigured", "", "yes"]
- rows[7] = ["4", "5", "node1", "", "", "", "", "", "", "", "",
- "unconfigured", "", "no"]
- rows[8] = ["4", "5", "node1", "", "", "", "", "", "", "", "",
- "unconfigured", "", "yes"]
- rows[9] = ["1", "6", "node2", ip_addr2, "255.255.255.0",
- gw, "", "", "", "01:23:45:67:89:02", "Full",
- "online", "1Gb/s", "no"]
- rows[10] = ["1", "6", "node2", "", "", "", "", "", "",
- "01:23:45:67:89:02", "Full", "online", "1Gb/s", "yes"]
- rows[11] = ["2", "6", "node2", "", "", "", "", "", "",
- "01:23:45:67:89:03", "Full", "unconfigured", "1Gb/s", "no"]
- rows[12] = ["2", "6", "node2", "", "", "", "", "", "",
- "01:23:45:67:89:03", "Full", "unconfigured", "1Gb/s",
- "yes"]
- rows[13] = ["3", "6", "node2", "", "", "", "", "", "", "", "",
- "unconfigured", "", "no"]
- rows[14] = ["3", "6", "node2", "", "", "", "", "", "", "", "",
- "unconfigured", "", "yes"]
- rows[15] = ["4", "6", "node2", "", "", "", "", "", "", "", "",
- "unconfigured", "", "no"]
- rows[16] = ["4", "6", "node2", "", "", "", "", "", "", "", "",
- "unconfigured", "", "yes"]
-
- if self._next_cmd_error["lsportip"] == "header_mismatch":
- rows[0].pop(2)
- self._next_cmd_error["lsportip"] = ""
- if self._next_cmd_error["lsportip"] == "remove_field":
- for row in rows:
- row.pop(1)
- self._next_cmd_error["lsportip"] = ""
-
- return self._print_info_cmd(rows=rows, **kwargs)
-
- # Create a vdisk
- def _cmd_mkvdisk(self, **kwargs):
- # We only save the id/uid, name, and size - all else will be made up
- volume_info = {}
- volume_info["id"] = self._find_unused_id(self._volumes_list)
- volume_info["uid"] = ("ABCDEF" * 3) + ("0" * 14) + volume_info["id"]
-
- if "name" in kwargs:
- volume_info["name"] = kwargs["name"].strip('\'\"')
- else:
- volume_info["name"] = "vdisk" + volume_info["id"]
-
- # Assume size and unit are given, store it in bytes
- capacity = int(kwargs["size"])
- unit = kwargs["unit"]
-
- if unit == "b":
- cap_bytes = capacity
- elif unit == "kb":
- cap_bytes = capacity * pow(1024, 1)
- elif unit == "mb":
- cap_bytes = capacity * pow(1024, 2)
- elif unit == "gb":
- cap_bytes = capacity * pow(1024, 3)
- elif unit == "tb":
- cap_bytes = capacity * pow(1024, 4)
- elif unit == "pb":
- cap_bytes = capacity * pow(1024, 5)
- volume_info["cap_bytes"] = str(cap_bytes)
- volume_info["capacity"] = str(capacity) + unit.upper()
-
- if "easytier" in kwargs:
- if kwargs["easytier"] == "on":
- volume_info["easy_tier"] = "on"
- else:
- volume_info["easy_tier"] = "off"
-
- if "rsize" in kwargs:
- # Fake numbers
- volume_info["used_capacity"] = "0.75MB"
- volume_info["real_capacity"] = "36.98MB"
- volume_info["free_capacity"] = "36.23MB"
- volume_info["used_capacity_bytes"] = "786432"
- volume_info["real_capacity_bytes"] = "38776340"
- volume_info["free_capacity_bytes"] = "37989908"
- if "warning" in kwargs:
- volume_info["warning"] = kwargs["warning"].rstrip('%')
- else:
- volume_info["warning"] = "80"
- if "autoexpand" in kwargs:
- volume_info["autoexpand"] = "on"
- else:
- volume_info["autoexpand"] = "off"
- if "grainsize" in kwargs:
- volume_info["grainsize"] = kwargs["grainsize"]
- else:
- volume_info["grainsize"] = "32"
- if "compressed" in kwargs:
- if self._next_cmd_error["mkvdisk"] == "no_compression":
- self._next_cmd_error["mkvdisk"] = ""
- return self._errors["CMMVC7050E"]
- volume_info["compressed_copy"] = "yes"
- else:
- volume_info["compressed_copy"] = "no"
- else:
- volume_info["used_capacity"] = volume_info["capacity"]
- volume_info["real_capacity"] = volume_info["capacity"]
- volume_info["free_capacity"] = "0.00MB"
- volume_info["used_capacity_bytes"] = volume_info["cap_bytes"]
- volume_info["real_capacity_bytes"] = volume_info["cap_bytes"]
- volume_info["free_capacity_bytes"] = "0"
- volume_info["warning"] = ""
- volume_info["autoexpand"] = ""
- volume_info["grainsize"] = ""
- volume_info["compressed_copy"] = "no"
-
- if volume_info["name"] in self._volumes_list:
- return self._errors["CMMVC6035E"]
- else:
- self._volumes_list[volume_info["name"]] = volume_info
- return ("Virtual Disk, id [%s], successfully created" %
- (volume_info["id"]), "")
-
- # Delete a vdisk
- def _cmd_rmvdisk(self, **kwargs):
- force = 0
- if "force" in kwargs:
- force = 1
-
- if "obj" not in kwargs:
- return self._errors["CMMVC5701E"]
- vol_name = kwargs["obj"].strip('\'\"')
-
- if not vol_name in self._volumes_list:
- return self._errors["CMMVC5753E"]
-
- if force == 0:
- for k, mapping in self._mappings_list.iteritems():
- if mapping["vol"] == vol_name:
- return self._errors["CMMVC5840E"]
- for k, fcmap in self._fcmappings_list.iteritems():
- if ((fcmap["source"] == vol_name) or
- (fcmap["target"] == vol_name)):
- return self._errors["CMMVC5840E"]
-
- del self._volumes_list[vol_name]
- return ("", "")
-
- def _get_fcmap_info(self, vol_name):
- ret_vals = {
- "fc_id": "",
- "fc_name": "",
- "fc_map_count": "0",
- }
- for k, fcmap in self._fcmappings_list.iteritems():
- if ((fcmap["source"] == vol_name) or
- (fcmap["target"] == vol_name)):
- ret_vals["fc_id"] = fcmap["id"]
- ret_vals["fc_name"] = fcmap["name"]
- ret_vals["fc_map_count"] = "1"
- return ret_vals
-
- # List information about vdisks
- def _cmd_lsvdisk(self, **kwargs):
- if "obj" not in kwargs:
- rows = []
- rows.append(["id", "name", "IO_group_id", "IO_group_name",
- "status", "mdisk_grp_id", "mdisk_grp_name",
- "capacity", "type", "FC_id", "FC_name", "RC_id",
- "RC_name", "vdisk_UID", "fc_map_count", "copy_count",
- "fast_write_state", "se_copy_count", "RC_change"])
-
- for k, vol in self._volumes_list.iteritems():
- if (("filtervalue" not in kwargs) or
- (kwargs["filtervalue"] == "name=" + vol["name"])):
- fcmap_info = self._get_fcmap_info(vol["name"])
-
- if "bytes" in kwargs:
- cap = vol["cap_bytes"]
- else:
- cap = vol["capacity"]
- rows.append([str(vol["id"]), vol["name"], "0", "io_grp0",
- "online", "0",
- self._flags["storwize_svc_volpool_name"],
- cap, "striped",
- fcmap_info["fc_id"], fcmap_info["fc_name"],
- "", "", vol["uid"],
- fcmap_info["fc_map_count"], "1", "empty",
- "1", "no"])
-
- return self._print_info_cmd(rows=rows, **kwargs)
-
- else:
- if kwargs["obj"] not in self._volumes_list:
- return self._errors["CMMVC5754E"]
- vol = self._volumes_list[kwargs["obj"]]
- fcmap_info = self._get_fcmap_info(vol["name"])
- if "bytes" in kwargs:
- cap = vol["cap_bytes"]
- cap_u = vol["used_capacity_bytes"]
- cap_r = vol["real_capacity_bytes"]
- cap_f = vol["free_capacity_bytes"]
- else:
- cap = vol["capacity"]
- cap_u = vol["used_capacity"]
- cap_r = vol["real_capacity"]
- cap_f = vol["free_capacity"]
- rows = []
-
- rows.append(["id", str(vol["id"])])
- rows.append(["name", vol["name"]])
- rows.append(["IO_group_id", "0"])
- rows.append(["IO_group_name", "io_grp0"])
- rows.append(["status", "online"])
- rows.append(["mdisk_grp_id", "0"])
- rows.append(["mdisk_grp_name",
- self._flags["storwize_svc_volpool_name"]])
- rows.append(["capacity", cap])
- rows.append(["type", "striped"])
- rows.append(["formatted", "no"])
- rows.append(["mdisk_id", ""])
- rows.append(["mdisk_name", ""])
- rows.append(["FC_id", fcmap_info["fc_id"]])
- rows.append(["FC_name", fcmap_info["fc_name"]])
- rows.append(["RC_id", ""])
- rows.append(["RC_name", ""])
- rows.append(["vdisk_UID", vol["uid"]])
- rows.append(["throttling", "0"])
-
- if self._next_cmd_error["lsvdisk"] == "blank_pref_node":
- rows.append(["preferred_node_id", ""])
- self._next_cmd_error["lsvdisk"] = ""
- elif self._next_cmd_error["lsvdisk"] == "no_pref_node":
- self._next_cmd_error["lsvdisk"] = ""
- else:
- rows.append(["preferred_node_id", "6"])
- rows.append(["fast_write_state", "empty"])
- rows.append(["cache", "readwrite"])
- rows.append(["udid", ""])
- rows.append(["fc_map_count", fcmap_info["fc_map_count"]])
- rows.append(["sync_rate", "50"])
- rows.append(["copy_count", "1"])
- rows.append(["se_copy_count", "0"])
- rows.append(["mirror_write_priority", "latency"])
- rows.append(["RC_change", "no"])
- rows.append(["used_capacity", cap_u])
- rows.append(["real_capacity", cap_r])
- rows.append(["free_capacity", cap_f])
- rows.append(["autoexpand", vol["autoexpand"]])
- rows.append(["warning", vol["warning"]])
- rows.append(["grainsize", vol["grainsize"]])
- rows.append(["easy_tier", vol["easy_tier"]])
- rows.append(["compressed_copy", vol["compressed_copy"]])
-
- if "nohdr" in kwargs:
- for index in range(len(rows)):
- rows[index] = " ".join(rows[index][1:])
-
- if "delim" in kwargs:
- for index in range(len(rows)):
- rows[index] = kwargs["delim"].join(rows[index])
-
- return ("%s" % "\n".join(rows), "")
-
- # Make a host
- def _cmd_mkhost(self, **kwargs):
- host_info = {}
- host_info["id"] = self._find_unused_id(self._hosts_list)
-
- if "name" in kwargs:
- host_name = kwargs["name"].strip('\'\"')
- else:
- host_name = "host" + str(host_info["id"])
- host_info["host_name"] = host_name
-
- if "iscsiname" not in kwargs:
- return self._errors["CMMVC5707E"]
- host_info["iscsi_name"] = kwargs["iscsiname"].strip('\'\"')
-
- if self._is_invalid_name(host_name):
- return self._errors["CMMVC6527E"]
-
- if host_name in self._hosts_list:
- return self._errors["CMMVC6035E"]
-
- for k, v in self._hosts_list.iteritems():
- if v["iscsi_name"] == host_info["iscsi_name"]:
- return self._errors["CMMVC6581E"]
-
- self._hosts_list[host_name] = host_info
- return ("Host, id [%s], successfully created" %
- (host_info["id"]), "")
-
- # Remove a host
- def _cmd_rmhost(self, **kwargs):
- if "obj" not in kwargs:
- return self._errors["CMMVC5701E"]
-
- host_name = kwargs["obj"].strip('\'\"')
- if host_name not in self._hosts_list:
- return self._errors["CMMVC5753E"]
-
- for k, v in self._mappings_list.iteritems():
- if (v["host"] == host_name):
- return self._errors["CMMVC5871E"]
-
- del self._hosts_list[host_name]
- return ("", "")
-
- # List information about hosts
- def _cmd_lshost(self, **kwargs):
- if "obj" not in kwargs:
- rows = []
- rows.append(["id", "name", "port_count", "iogrp_count", "status"])
-
- found = False
- for k, host in self._hosts_list.iteritems():
- filterstr = "name=" + host["host_name"]
- if (("filtervalue" not in kwargs) or
- (kwargs["filtervalue"] == filterstr)):
- rows.append([host["id"], host["host_name"], "1", "4",
- "offline"])
- found = True
- if found:
- return self._print_info_cmd(rows=rows, **kwargs)
- else:
- return ("", "")
- else:
- if kwargs["obj"] not in self._hosts_list:
- return self._errors["CMMVC5754E"]
- host = self._hosts_list[kwargs["obj"]]
- rows = []
- rows.append(["id", host["id"]])
- rows.append(["name", host["host_name"]])
- rows.append(["port_count", "1"])
- rows.append(["type", "generic"])
- rows.append(["mask", "1111"])
- rows.append(["iogrp_count", "4"])
- rows.append(["status", "offline"])
- rows.append(["iscsi_name", host["iscsi_name"]])
- rows.append(["node_logged_in_count", "0"])
- rows.append(["state", "offline"])
-
- if "nohdr" in kwargs:
- for index in range(len(rows)):
- rows[index] = " ".join(rows[index][1:])
-
- if "delim" in kwargs:
- for index in range(len(rows)):
- rows[index] = kwargs["delim"].join(rows[index])
-
- return ("%s" % "\n".join(rows), "")
-
- # Create a vdisk-host mapping
- def _cmd_mkvdiskhostmap(self, **kwargs):
- mapping_info = {}
- mapping_info["id"] = self._find_unused_id(self._mappings_list)
-
- if "host" not in kwargs:
- return self._errors["CMMVC5707E"]
- mapping_info["host"] = kwargs["host"].strip('\'\"')
-
- if "scsi" not in kwargs:
- return self._errors["CMMVC5707E"]
- mapping_info["lun"] = kwargs["scsi"].strip('\'\"')
-
- if "obj" not in kwargs:
- return self._errors["CMMVC5707E"]
- mapping_info["vol"] = kwargs["obj"].strip('\'\"')
-
- if not mapping_info["vol"] in self._volumes_list:
- return self._errors["CMMVC5753E"]
-
- if not mapping_info["host"] in self._hosts_list:
- return self._errors["CMMVC5754E"]
-
- if mapping_info["vol"] in self._mappings_list:
- return self._errors["CMMVC6071E"]
-
- for k, v in self._mappings_list.iteritems():
- if ((v["host"] == mapping_info["host"]) and
- (v["lun"] == mapping_info["lun"])):
- return self._errors["CMMVC5879E"]
-
- self._mappings_list[mapping_info["vol"]] = mapping_info
- return ("Virtual Disk to Host map, id [%s], successfully created"
- % (mapping_info["id"]), "")
-
- # Delete a vdisk-host mapping
- def _cmd_rmvdiskhostmap(self, **kwargs):
- if "host" not in kwargs:
- return self._errors["CMMVC5707E"]
- host = kwargs["host"].strip('\'\"')
-
- if "obj" not in kwargs:
- return self._errors["CMMVC5701E"]
- vol = kwargs["obj"].strip('\'\"')
-
- if not vol in self._mappings_list:
- return self._errors["CMMVC5753E"]
-
- if self._mappings_list[vol]["host"] != host:
- return self._errors["CMMVC5753E"]
-
- del self._mappings_list[vol]
- return ("", "")
-
- # List information about vdisk-host mappings
- def _cmd_lshostvdiskmap(self, **kwargs):
- index = 1
- no_hdr = 0
- delimeter = ""
- host_name = kwargs["obj"]
-
- if host_name not in self._hosts_list:
- return self._errors["CMMVC5754E"]
-
- rows = []
- rows.append(["id", "name", "SCSI_id", "vdisk_id", "vdisk_name",
- "vdisk_UID"])
-
- for k, mapping in self._mappings_list.iteritems():
- if (host_name == "") or (mapping["host"] == host_name):
- volume = self._volumes_list[mapping["vol"]]
- rows.append([mapping["id"], mapping["host"],
- mapping["lun"], volume["id"],
- volume["name"], volume["uid"]])
-
- return self._print_info_cmd(rows=rows, **kwargs)
-
- # Create a FlashCopy mapping
- def _cmd_mkfcmap(self, **kwargs):
- source = ""
- target = ""
-
- if "source" not in kwargs:
- return self._errors["CMMVC5707E"]
- source = kwargs["source"].strip('\'\"')
- if not source in self._volumes_list:
- return self._errors["CMMVC5754E"]
-
- if "target" not in kwargs:
- return self._errors["CMMVC5707E"]
- target = kwargs["target"].strip('\'\"')
- if not target in self._volumes_list:
- return self._errors["CMMVC5754E"]
-
- if source == target:
- return self._errors["CMMVC6303E"]
-
- if (self._volumes_list[source]["cap_bytes"] !=
- self._volumes_list[target]["cap_bytes"]):
- return self._errors["CMMVC5924E"]
-
- fcmap_info = {}
- fcmap_info["source"] = source
- fcmap_info["target"] = target
- fcmap_info["id"] = self._find_unused_id(self._fcmappings_list)
- fcmap_info["name"] = "fcmap" + fcmap_info["id"]
- fcmap_info["status"] = "idle_or_copied"
- fcmap_info["progress"] = "0"
- self._fcmappings_list[target] = fcmap_info
-
- return("FlashCopy Mapping, id [" + fcmap_info["id"] +
- "], successfully created", "")
-
- # Same function used for both prestartfcmap and startfcmap
- def _cmd_gen_startfcmap(self, mode, **kwargs):
- if "obj" not in kwargs:
- return self._errors["CMMVC5701E"]
- id_num = kwargs["obj"]
-
- if mode == "pre":
- if self._next_cmd_error["prestartfcmap"] == "bad_id":
- id_num = -1
- self._next_cmd_error["prestartfcmap"] = ""
- else:
- if self._next_cmd_error["startfcmap"] == "bad_id":
- id_num = -1
- self._next_cmd_error["startfcmap"] = ""
-
- for k, fcmap in self._fcmappings_list.iteritems():
- if fcmap["id"] == id_num:
- if mode == "pre":
- fcmap["status"] = "preparing"
- else:
- fcmap["status"] = "copying"
- fcmap["progress"] = "0"
- return ("", "")
- return self._errors["CMMVC5753E"]
-
- # Same function used for both stopfcmap and rmfcmap
- # Assumes it is called with "-force <fc_map_id>"
- def _cmd_stoprmfcmap(self, mode, **kwargs):
- if "obj" not in kwargs:
- return self._errors["CMMVC5701E"]
- id_num = kwargs["obj"]
-
- if self._next_cmd_error["rmfcmap"] == "bad_id":
- id_num = -1
- self._next_cmd_error["rmfcmap"] = ""
-
- to_delete = None
- found = False
- for k, fcmap in self._fcmappings_list.iteritems():
- if fcmap["id"] == id_num:
- found = True
- if mode == "rm":
- to_delete = k
-
- if to_delete:
- del self._fcmappings_list[to_delete]
-
- if found:
- return ("", "")
- else:
- return self._errors["CMMVC5753E"]
-
- def _cmd_lsfcmap(self, **kwargs):
- rows = []
- rows.append(["id", "name", "source_vdisk_id", "source_vdisk_name",
- "target_vdisk_id", "target_vdisk_name", "group_id",
- "group_name", "status", "progress", "copy_rate",
- "clean_progress", "incremental", "partner_FC_id",
- "partner_FC_name", "restoring", "start_time",
- "rc_controlled"])
-
- # Assume we always get a filtervalue argument
- filter_key = kwargs["filtervalue"].split("=")[0]
- filter_value = kwargs["filtervalue"].split("=")[1]
- to_delete = []
- for k, v in self._fcmappings_list.iteritems():
- if str(v[filter_key]) == filter_value:
- source = self._volumes_list[v["source"]]
- target = self._volumes_list[v["target"]]
- old_status = v["status"]
- if old_status == "preparing":
- new_status = "prepared"
- if self._next_cmd_error["lsfcmap"] == "bogus_prepare":
- new_status = "bogus"
- elif (old_status == "copying") and (v["progress"] == "0"):
- new_status = "copying"
- v["progress"] = "50"
- elif (old_status == "copying") and (v["progress"] == "50"):
- new_status = "idle_or_copied"
- to_delete.append(k)
- else:
- new_status = old_status
- v["status"] = new_status
-
- if ((self._next_cmd_error["lsfcmap"] == "speed_up") or
- (self._next_cmd_error["lsfcmap"] == "bogus_prepare")):
- print_status = new_status
- self._next_cmd_error["lsfcmap"] = ""
- else:
- print_status = old_status
-
- rows.append([v["id"], v["name"], source["id"],
- source["name"], target["id"], target["name"], "",
- "", print_status, v["progress"], "50", "100",
- "off", "", "", "no", "", "no"])
-
- for d in to_delete:
- del self._fcmappings_list[k]
-
- return self._print_info_cmd(rows=rows, **kwargs)
-
- # The main function to run commands on the management simulator
- def execute_command(self, cmd, check_exit_code=True):
- try:
- kwargs = self._cmd_to_dict(cmd)
- except IndexError:
- return self._errors["CMMVC5707E"]
-
- command = kwargs["cmd"]
- del kwargs["cmd"]
- arg_list = cmd.split()
-
- if command == "lsmdiskgrp":
- out, err = self._cmd_lsmdiskgrp(**kwargs)
- elif command == "lsnodecanister":
- out, err = self._cmd_lsnodecanister(**kwargs)
- elif command == "lsportip":
- out, err = self._cmd_lsportip(**kwargs)
- elif command == "mkvdisk":
- out, err = self._cmd_mkvdisk(**kwargs)
- elif command == "rmvdisk":
- out, err = self._cmd_rmvdisk(**kwargs)
- elif command == "lsvdisk":
- out, err = self._cmd_lsvdisk(**kwargs)
- elif command == "mkhost":
- out, err = self._cmd_mkhost(**kwargs)
- elif command == "rmhost":
- out, err = self._cmd_rmhost(**kwargs)
- elif command == "lshost":
- out, err = self._cmd_lshost(**kwargs)
- elif command == "mkvdiskhostmap":
- out, err = self._cmd_mkvdiskhostmap(**kwargs)
- elif command == "rmvdiskhostmap":
- out, err = self._cmd_rmvdiskhostmap(**kwargs)
- elif command == "lshostvdiskmap":
- out, err = self._cmd_lshostvdiskmap(**kwargs)
- elif command == "mkfcmap":
- out, err = self._cmd_mkfcmap(**kwargs)
- elif command == "prestartfcmap":
- out, err = self._cmd_gen_startfcmap(mode="pre", **kwargs)
- elif command == "startfcmap":
- out, err = self._cmd_gen_startfcmap(mode="start", **kwargs)
- elif command == "stopfcmap":
- out, err = self._cmd_stoprmfcmap(mode="stop", **kwargs)
- elif command == "rmfcmap":
- out, err = self._cmd_stoprmfcmap(mode="rm", **kwargs)
- elif command == "lsfcmap":
- out, err = self._cmd_lsfcmap(**kwargs)
- else:
- out, err = ("", "ERROR: Unsupported command")
-
- if (check_exit_code) and (len(err) != 0):
- raise exception.ProcessExecutionError(exit_code=1,
- stdout=out,
- stderr=err,
- cmd=' '.join(cmd))
-
- return (out, err)
-
- # After calling this function, the next call to the specified command will
- # result in in the error specified
- def error_injection(self, cmd, error):
- self._next_cmd_error[cmd] = error
-
-
-class StorwizeSVCFakeDriver(storwize_svc.StorwizeSVCDriver):
- def set_fake_storage(self, fake):
- self.fake_storage = fake
-
- def _run_ssh(self, cmd, check_exit_code=True):
- try:
- LOG.debug(_('Run CLI command: %s') % cmd)
- ret = self.fake_storage.execute_command(cmd, check_exit_code)
- (stdout, stderr) = ret
- LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') %
- {'out': stdout, 'err': stderr})
-
- except exception.ProcessExecutionError as e:
- with excutils.save_and_reraise_exception():
- LOG.debug(_('CLI Exception output:\n stdout: %(out)s\n '
- 'stderr: %(err)s') % {'out': e.stdout,
- 'err': e.stderr})
-
- return ret
-
-
-class StorwizeSVCDriverTestCase(test.TestCase):
- def setUp(self):
- super(StorwizeSVCDriverTestCase, self).setUp()
- self.USESIM = 1
- if self.USESIM == 1:
- self.flags(
- san_ip="hostname",
- san_login="user",
- san_password="pass",
- storwize_svc_flashcopy_timeout="20",
- )
- self.sim = StorwizeSVCManagementSimulator("volpool")
- self.driver = StorwizeSVCFakeDriver()
- self.driver.set_fake_storage(self.sim)
- else:
- self.flags(
- san_ip="-1.-1.-1.-1",
- san_login="user",
- san_password="password",
- storwize_svc_volpool_name="pool",
- )
- self.driver = storwize_svc.StorwizeSVCDriver()
-
- self.driver.do_setup(None)
- self.driver.check_for_setup_error()
-
- def test_storwize_svc_volume_tests(self):
- self.flags(storwize_svc_vol_rsize="-1")
- volume = {}
- volume["name"] = "test1_volume%s" % random.randint(10000, 99999)
- volume["size"] = 10
- volume["id"] = 1
- self.driver.create_volume(volume)
- # Make sure that the volume has been created
- is_volume_defined = self.driver._is_volume_defined(volume["name"])
- self.assertEqual(is_volume_defined, True)
- self.driver.delete_volume(volume)
-
- if self.USESIM == 1:
- self.flags(storwize_svc_vol_rsize="2%")
- self.flags(storwize_svc_vol_compression=True)
- self.driver.create_volume(volume)
- is_volume_defined = self.driver._is_volume_defined(volume["name"])
- self.assertEqual(is_volume_defined, True)
- self.driver.delete_volume(volume)
- FLAGS.reset()
-
- def test_storwize_svc_ip_connectivity(self):
- # Check for missing san_ip
- self.flags(san_ip=None)
- self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
-
- if self.USESIM != 1:
- # Check for invalid ip
- self.flags(san_ip="-1.-1.-1.-1")
- self.assertRaises(socket.gaierror,
- self.driver.check_for_setup_error)
-
- # Check for unreachable IP
- self.flags(san_ip="1.1.1.1")
- self.assertRaises(socket.error,
- self.driver.check_for_setup_error)
-
- def test_storwize_svc_connectivity(self):
- # Make sure we detect if the pool doesn't exist
- no_exist_pool = "i-dont-exist-%s" % random.randint(10000, 99999)
- self.flags(storwize_svc_volpool_name=no_exist_pool)
- self.assertRaises(exception.InvalidInput,
- self.driver.check_for_setup_error)
- FLAGS.reset()
-
- # Check the case where the user didn't configure IP addresses
- # as well as receiving unexpected results from the storage
- if self.USESIM == 1:
- self.sim.error_injection("lsnodecanister", "header_mismatch")
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
- self.sim.error_injection("lsnodecanister", "remove_field")
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
- self.sim.error_injection("lsportip", "ip_no_config")
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
- self.sim.error_injection("lsportip", "header_mismatch")
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
- self.sim.error_injection("lsportip", "remove_field")
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.check_for_setup_error)
-
- # Check with bad parameters
- self.flags(san_password=None)
- self.flags(san_private_key=None)
- self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
- FLAGS.reset()
-
- self.flags(storwize_svc_vol_rsize="invalid")
- self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
- FLAGS.reset()
-
- self.flags(storwize_svc_vol_warning="invalid")
- self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
- FLAGS.reset()
-
- self.flags(storwize_svc_vol_autoexpand="invalid")
- self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
- FLAGS.reset()
-
- self.flags(storwize_svc_vol_grainsize=str(42))
- self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
- FLAGS.reset()
-
- self.flags(storwize_svc_flashcopy_timeout=str(601))
- self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
- FLAGS.reset()
-
- self.flags(storwize_svc_vol_compression=True)
- self.flags(storwize_svc_vol_rsize="-1")
- self.assertRaises(exception.InvalidInput,
- self.driver._check_flags)
- FLAGS.reset()
-
- # Finally, check with good parameters
- self.driver.check_for_setup_error()
-
- def test_storwize_svc_flashcopy(self):
- volume1 = {}
- volume1["name"] = "test1_volume%s" % random.randint(10000, 99999)
- volume1["size"] = 10
- volume1["id"] = 10
- self.driver.create_volume(volume1)
-
- snapshot = {}
- snapshot["name"] = "snap_volume%s" % random.randint(10000, 99999)
- snapshot["volume_name"] = volume1["name"]
-
- # Test timeout and volume cleanup
- self.flags(storwize_svc_flashcopy_timeout=str(1))
- self.assertRaises(exception.InvalidSnapshot,
- self.driver.create_snapshot, snapshot)
- is_volume_defined = self.driver._is_volume_defined(snapshot["name"])
- self.assertEqual(is_volume_defined, False)
- FLAGS.reset()
-
- # Test bogus statuses
- if self.USESIM == 1:
- self.sim.error_injection("lsfcmap", "bogus_prepare")
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.create_snapshot, snapshot)
-
- # Test prestartfcmap, startfcmap, and rmfcmap failing
- if self.USESIM == 1:
- self.sim.error_injection("prestartfcmap", "bad_id")
- self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_snapshot, snapshot)
- self.sim.error_injection("lsfcmap", "speed_up")
- self.sim.error_injection("startfcmap", "bad_id")
- self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_snapshot, snapshot)
- self.sim.error_injection("prestartfcmap", "bad_id")
- self.sim.error_injection("rmfcmap", "bad_id")
- self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_snapshot, snapshot)
-
- # Test successful snapshot
- self.driver.create_snapshot(snapshot)
-
- # Ensure snapshot is defined
- is_volume_defined = self.driver._is_volume_defined(snapshot["name"])
- self.assertEqual(is_volume_defined, True)
-
- # Try to create a snapshot from an non-existing volume - should fail
- snapshot2 = {}
- snapshot2["name"] = "snap_volume%s" % random.randint(10000, 99999)
- snapshot2["volume_name"] = "undefined-vol"
- self.assertRaises(exception.VolumeNotFound,
- self.driver.create_snapshot,
- snapshot2)
-
- # Create volume from snapshot
- volume2 = {}
- volume2["name"] = "snap2vol_volume%s" % random.randint(10000, 99999)
-
- # Create volume from snapshot into an existsing volume
- self.assertRaises(exception.InvalidSnapshot,
- self.driver.create_volume_from_snapshot,
- volume1,
- snapshot)
-
- # Try to create a volume from a non-existing snapshot
- self.assertRaises(exception.SnapshotNotFound,
- self.driver.create_volume_from_snapshot,
- volume2,
- snapshot2)
-
- # Fail the snapshot
- if self.USESIM == 1:
- self.sim.error_injection("prestartfcmap", "bad_id")
- self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_volume_from_snapshot, volume2, snapshot)
-
- # Succeed
- if self.USESIM == 1:
- self.sim.error_injection("lsfcmap", "speed_up")
- self.driver.create_volume_from_snapshot(volume2, snapshot)
-
- # Ensure volume is defined
- is_volume_defined = self.driver._is_volume_defined(volume2["name"])
- self.assertEqual(is_volume_defined, True)
-
- self.driver._delete_volume(volume2, True)
- self.driver._delete_snapshot(snapshot, True)
-
- # Check with target with different size
- volume3 = {}
- volume3["name"] = "test3_volume%s" % random.randint(10000, 99999)
- volume3["size"] = 11
- volume3["id"] = 11
- self.driver.create_volume(volume3)
- snapshot["name"] = volume3["name"]
- self.assertRaises(exception.InvalidSnapshot,
- self.driver.create_snapshot, snapshot)
- self.driver._delete_volume(volume1, True)
- self.driver._delete_volume(volume3, True)
-
- # Snapshot volume that doesn't exist
- snapshot = {}
- snapshot["name"] = "snap_volume%s" % random.randint(10000, 99999)
- snapshot["volume_name"] = "no_exist"
- self.assertRaises(exception.VolumeNotFound,
- self.driver.create_snapshot, snapshot)
-
- def test_storwize_svc_volumes(self):
- # Create a first volume
- volume = {}
- volume["name"] = "test1_volume%s" % random.randint(10000, 99999)
- volume["size"] = 10
- volume["id"] = 1
-
- self.driver.create_volume(volume)
-
- self.driver.ensure_export(None, volume)
-
- # Do nothing
- self.driver.create_export(None, volume)
- self.driver.remove_export(None, volume)
- self.assertRaises(NotImplementedError,
- self.driver.check_for_export, None, volume["id"])
-
- # Make sure volume attributes are as they should be
- attributes = self.driver._get_volume_attributes(volume["name"])
- attr_size = float(attributes["capacity"]) / 1073741824 # bytes to GB
- self.assertEqual(attr_size, float(volume["size"]))
- pool = storwize_svc.FLAGS.storwize_svc_volpool_name
- self.assertEqual(attributes["mdisk_grp_name"], pool)
-
- # Try to create the volume again (should fail)
- self.assertRaises(exception.ProcessExecutionError,
- self.driver.create_volume, volume)
-
- # Try to delete a volume that doesn't exist (should not fail)
- vol_no_exist = {"name": "i_dont_exist"}
- self.driver.delete_volume(vol_no_exist)
- # Ensure export for volume that doesn't exist (should not fail)
- self.driver.ensure_export(None, vol_no_exist)
-
- # Delete the volume
- self.driver.delete_volume(volume)
-
- def _create_test_vol(self):
- volume = {}
- volume["name"] = "testparam_volume%s" % random.randint(10000, 99999)
- volume["size"] = 1
- volume["id"] = 1
- self.driver.create_volume(volume)
-
- attrs = self.driver._get_volume_attributes(volume["name"])
- self.driver.delete_volume(volume)
- return attrs
-
- def test_storwize_svc_volume_params(self):
- # Option test matrix
- # Option Value Covered by test #
- # rsize -1 1
- # rsize 2% 2,3
- # warning 0 2
- # warning 80% 3
- # autoexpand True 2
- # autoexpand False 3
- # grainsize 32 2
- # grainsize 256 3
- # compression True 4
- # compression False 2,3
- # easytier True 1,3
- # easytier False 2
-
- # Test 1
- self.flags(storwize_svc_vol_rsize="-1")
- self.flags(storwize_svc_vol_easytier=True)
- attrs = self._create_test_vol()
- self.assertEquals(attrs["free_capacity"], "0")
- self.assertEquals(attrs["easy_tier"], "on")
- FLAGS.reset()
-
- # Test 2
- self.flags(storwize_svc_vol_rsize="2%")
- self.flags(storwize_svc_vol_compression=False)
- self.flags(storwize_svc_vol_warning="0")
- self.flags(storwize_svc_vol_autoexpand=True)
- self.flags(storwize_svc_vol_grainsize="32")
- self.flags(storwize_svc_vol_easytier=False)
- attrs = self._create_test_vol()
- self.assertNotEqual(attrs["capacity"], attrs["real_capacity"])
- self.assertEquals(attrs["compressed_copy"], "no")
- self.assertEquals(attrs["warning"], "0")
- self.assertEquals(attrs["autoexpand"], "on")
- self.assertEquals(attrs["grainsize"], "32")
- self.assertEquals(attrs["easy_tier"], "off")
- FLAGS.reset()
-
- # Test 3
- self.flags(storwize_svc_vol_rsize="2%")
- self.flags(storwize_svc_vol_compression=False)
- self.flags(storwize_svc_vol_warning="80%")
- self.flags(storwize_svc_vol_autoexpand=False)
- self.flags(storwize_svc_vol_grainsize="256")
- self.flags(storwize_svc_vol_easytier=True)
- attrs = self._create_test_vol()
- self.assertNotEqual(attrs["capacity"], attrs["real_capacity"])
- self.assertEquals(attrs["compressed_copy"], "no")
- self.assertEquals(attrs["warning"], "80")
- self.assertEquals(attrs["autoexpand"], "off")
- self.assertEquals(attrs["grainsize"], "256")
- self.assertEquals(attrs["easy_tier"], "on")
- FLAGS.reset()
-
- # Test 4
- self.flags(storwize_svc_vol_rsize="2%")
- self.flags(storwize_svc_vol_compression=True)
- try:
- attrs = self._create_test_vol()
- self.assertNotEqual(attrs["capacity"], attrs["real_capacity"])
- self.assertEquals(attrs["compressed_copy"], "yes")
- except exception.ProcessExecutionError as e:
- if "CMMVC7050E" not in e.stderr:
- raise exception.ProcessExecutionError(exit_code=e.exit_code,
- stdout=e.stdout,
- stderr=e.stderr,
- cmd=e.cmd)
- if self.USESIM == 1:
- self.sim.error_injection("mkvdisk", "no_compression")
- self.assertRaises(exception.ProcessExecutionError,
- self._create_test_vol)
- FLAGS.reset()
-
- def test_storwize_svc_unicode_host_and_volume_names(self):
- volume1 = {}
- volume1["name"] = u"unicode1_volume%s" % random.randint(10000, 99999)
- volume1["size"] = 2
- volume1["id"] = 1
- self.driver.create_volume(volume1)
- # Make sure that the volumes have been created
- is_volume_defined = self.driver._is_volume_defined(volume1["name"])
- self.assertEqual(is_volume_defined, True)
- conn = {}
- conn["initiator"] = u"unicode:init:%s" % random.randint(10000, 99999)
- conn["ip"] = "10.10.10.10" # Bogus ip for testing
- self.driver.initialize_connection(volume1, conn)
- self.driver.terminate_connection(volume1, conn)
- self.driver.delete_volume(volume1)
-
- def test_storwize_svc_host_maps(self):
- # Create two volumes to be used in mappings
- volume1 = {}
- volume1["name"] = "test1_volume%s" % random.randint(10000, 99999)
- volume1["size"] = 2
- volume1["id"] = 1
- self.driver.create_volume(volume1)
- volume2 = {}
- volume2["name"] = "test2_volume%s" % random.randint(10000, 99999)
- volume2["size"] = 2
- volume2["id"] = 1
- self.driver.create_volume(volume2)
-
- # Check case where no hosts exist
- if self.USESIM == 1:
- ret = self.driver._get_host_from_iscsiname("foo")
- self.assertEquals(ret, None)
- ret = self.driver._is_host_defined("foo")
- self.assertEquals(ret, False)
-
- # Make sure that the volumes have been created
- is_volume_defined = self.driver._is_volume_defined(volume1["name"])
- self.assertEqual(is_volume_defined, True)
- is_volume_defined = self.driver._is_volume_defined(volume2["name"])
- self.assertEqual(is_volume_defined, True)
-
- # Initialize connection from the first volume to a host
- # Add some characters to the initiator name that should be converted
- # when used for the host name
- conn = {}
- conn["initiator"] = "test:init:%s" % random.randint(10000, 99999)
- conn["ip"] = "10.10.10.10" # Bogus ip for testing
- self.driver.initialize_connection(volume1, conn)
-
- # Initialize again, should notice it and do nothing
- self.driver.initialize_connection(volume1, conn)
-
- # Try to delete the 1st volume (should fail because it is mapped)
- self.assertRaises(exception.ProcessExecutionError,
- self.driver.delete_volume, volume1)
-
- # Test no preferred node
- self.driver.terminate_connection(volume1, conn)
- if self.USESIM == 1:
- self.sim.error_injection("lsvdisk", "no_pref_node")
- self.driver.initialize_connection(volume1, conn)
-
- # Initialize connection from the second volume to the host with no
- # preferred node set if in simulation mode, otherwise, just
- # another initialize connection.
- if self.USESIM == 1:
- self.sim.error_injection("lsvdisk", "blank_pref_node")
- self.driver.initialize_connection(volume2, conn)
-
- # Try to remove connection from host that doesn't exist (should fail)
- conn_no_exist = {"initiator": "i_dont_exist"}
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.terminate_connection, volume1, conn_no_exist)
-
- # Try to remove connection from volume that isn't mapped (should print
- # message but NOT fail)
- vol_no_exist = {"name": "i_dont_exist"}
- self.driver.terminate_connection(vol_no_exist, conn)
-
- # Remove the mapping from the 1st volume and delete it
- self.driver.terminate_connection(volume1, conn)
- self.driver.delete_volume(volume1)
- vol_def = self.driver._is_volume_defined(volume1["name"])
- self.assertEqual(vol_def, False)
-
- # Make sure our host still exists
- host_name = self.driver._get_host_from_iscsiname(conn["initiator"])
- host_def = self.driver._is_host_defined(host_name)
- self.assertEquals(host_def, True)
-
- # Remove the mapping from the 2nd volume and delete it. The host should
- # be automatically removed because there are no more mappings.
- self.driver.terminate_connection(volume2, conn)
- self.driver.delete_volume(volume2)
- vol_def = self.driver._is_volume_defined(volume2["name"])
- self.assertEqual(vol_def, False)
-
- # Check if our host still exists (it should not)
- ret = self.driver._get_host_from_iscsiname(conn["initiator"])
- self.assertEquals(ret, None)
- ret = self.driver._is_host_defined(host_name)
- self.assertEquals(ret, False)
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
deleted file mode 100644
index 7b14d8a99b..0000000000
--- a/nova/tests/test_volume.py
+++ /dev/null
@@ -1,931 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests for Volume Code.
-
-"""
-
-import cStringIO
-import datetime
-
-import mox
-import os
-import shutil
-import tempfile
-
-from nova import context
-from nova import db
-from nova import exception
-from nova import flags
-from nova.openstack.common import importutils
-from nova.openstack.common.notifier import api as notifier_api
-from nova.openstack.common.notifier import test_notifier
-from nova.openstack.common import rpc
-import nova.policy
-from nova import quota
-from nova import test
-from nova.tests.image import fake as fake_image
-import nova.volume.api
-from nova.volume import iscsi
-
-QUOTAS = quota.QUOTAS
-
-
-FLAGS = flags.FLAGS
-
-
-class VolumeTestCase(test.TestCase):
- """Test Case for volumes."""
-
- def setUp(self):
- super(VolumeTestCase, self).setUp()
- self.compute = importutils.import_object(FLAGS.compute_manager)
- vol_tmpdir = tempfile.mkdtemp()
- self.flags(compute_driver='nova.virt.fake.FakeDriver',
- volumes_dir=vol_tmpdir,
- notification_driver=[test_notifier.__name__])
- self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target)
- self.volume = importutils.import_object(FLAGS.volume_manager)
- self.context = context.get_admin_context()
- instance = db.instance_create(self.context, {})
- self.instance_id = instance['id']
- self.instance_uuid = instance['uuid']
- test_notifier.NOTIFICATIONS = []
- fake_image.stub_out_image_service(self.stubs)
-
- def tearDown(self):
- try:
- shutil.rmtree(FLAGS.volumes_dir)
- except OSError:
- pass
- db.instance_destroy(self.context, self.instance_uuid)
- notifier_api._reset_drivers()
- super(VolumeTestCase, self).tearDown()
-
- def fake_get_target(obj, iqn):
- return 1
-
- @staticmethod
- def _create_volume(size=0, snapshot_id=None, image_id=None, metadata=None):
- """Create a volume object."""
- vol = {}
- vol['size'] = size
- vol['snapshot_id'] = snapshot_id
- vol['image_id'] = image_id
- vol['user_id'] = 'fake'
- vol['project_id'] = 'fake'
- vol['availability_zone'] = FLAGS.storage_availability_zone
- vol['status'] = "creating"
- vol['attach_status'] = "detached"
- if metadata is not None:
- vol['metadata'] = metadata
- return db.volume_create(context.get_admin_context(), vol)
-
- def test_ec2_uuid_mapping(self):
- ec2_vol = db.ec2_volume_create(context.get_admin_context(),
- 'aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaaaaa', 5)
- self.assertEqual(5, ec2_vol['id'])
- self.assertEqual('aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaaaaa',
- db.get_volume_uuid_by_ec2_id(context.get_admin_context(), 5))
-
- ec2_vol = db.ec2_volume_create(context.get_admin_context(),
- 'aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaaaaa', 1)
- self.assertEqual(1, ec2_vol['id'])
-
- ec2_vol = db.ec2_volume_create(context.get_admin_context(),
- 'aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaazzz')
- self.assertEqual(6, ec2_vol['id'])
-
- def test_create_delete_volume(self):
- """Test volume can be created and deleted."""
- # Need to stub out reserve, commit, and rollback
- def fake_reserve(context, expire=None, **deltas):
- return ["RESERVATION"]
-
- def fake_commit(context, reservations):
- pass
-
- def fake_rollback(context, reservations):
- pass
-
- self.stubs.Set(QUOTAS, "reserve", fake_reserve)
- self.stubs.Set(QUOTAS, "commit", fake_commit)
- self.stubs.Set(QUOTAS, "rollback", fake_rollback)
-
- volume = self._create_volume()
- volume_id = volume['id']
- self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
- self.volume.create_volume(self.context, volume_id)
- self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
- self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
- volume_id).id)
-
- self.volume.delete_volume(self.context, volume_id)
- self.assertEquals(len(test_notifier.NOTIFICATIONS), 4)
- self.assertRaises(exception.NotFound,
- db.volume_get,
- self.context,
- volume_id)
-
- def test_create_delete_volume_with_metadata(self):
- """Test volume can be created and deleted."""
- test_meta = {'fake_key': 'fake_value'}
- volume = self._create_volume('0', None, metadata=test_meta)
- volume_id = volume['id']
- self.volume.create_volume(self.context, volume_id)
- result_meta = {
- volume.volume_metadata[0].key: volume.volume_metadata[0].value}
- self.assertEqual(result_meta, test_meta)
-
- self.volume.delete_volume(self.context, volume_id)
- self.assertRaises(exception.NotFound,
- db.volume_get,
- self.context,
- volume_id)
-
- def _do_test_create_over_quota(self, resource, expected):
- """Test volume creation over quota."""
-
- def fake_reserve(context, **deltas):
- kwargs = dict(overs=[resource],
- quotas=dict(gigabytes=1000, volumes=10),
- usages=dict(gigabytes=dict(reserved=1, in_use=999),
- volumes=dict(reserved=1, in_use=9)))
- raise exception.OverQuota(**kwargs)
-
- def fake_commit(context, reservations):
- self.fail('should not commit over quota')
-
- self.stubs.Set(QUOTAS, 'reserve', fake_reserve)
- self.stubs.Set(QUOTAS, 'commit', fake_commit)
-
- volume_api = nova.volume.api.API()
-
- self.assertRaises(expected,
- volume_api.create,
- self.context,
- 2,
- 'name',
- 'description')
-
- def test_create_volumes_over_quota(self):
- self._do_test_create_over_quota('volumes',
- exception.VolumeLimitExceeded)
-
- def test_create_gigabytes_over_quota(self):
- self._do_test_create_over_quota('gigabytes',
- exception.VolumeSizeTooLarge)
-
- def test_delete_busy_volume(self):
- """Test volume survives deletion if driver reports it as busy."""
- volume = self._create_volume()
- volume_id = volume['id']
- self.volume.create_volume(self.context, volume_id)
-
- self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
- self.volume.driver.delete_volume(mox.IgnoreArg()).AndRaise(
- exception.VolumeIsBusy)
- self.mox.ReplayAll()
- res = self.volume.delete_volume(self.context, volume_id)
- self.assertEqual(True, res)
- volume_ref = db.volume_get(context.get_admin_context(), volume_id)
- self.assertEqual(volume_id, volume_ref.id)
- self.assertEqual("available", volume_ref.status)
-
- self.mox.UnsetStubs()
- self.volume.delete_volume(self.context, volume_id)
-
- def test_create_volume_from_snapshot(self):
- """Test volume can be created from a snapshot."""
- volume_src = self._create_volume()
- self.volume.create_volume(self.context, volume_src['id'])
- snapshot_id = self._create_snapshot(volume_src['id'])
- self.volume.create_snapshot(self.context, volume_src['id'],
- snapshot_id)
- volume_dst = self._create_volume(0, snapshot_id)
- self.volume.create_volume(self.context, volume_dst['id'], snapshot_id)
- self.assertEqual(volume_dst['id'],
- db.volume_get(
- context.get_admin_context(),
- volume_dst['id']).id)
- self.assertEqual(snapshot_id, db.volume_get(
- context.get_admin_context(),
- volume_dst['id']).snapshot_id)
-
- self.volume.delete_volume(self.context, volume_dst['id'])
- self.volume.delete_snapshot(self.context, snapshot_id)
- self.volume.delete_volume(self.context, volume_src['id'])
-
- def test_too_big_volume(self):
- """Ensure failure if a too large of a volume is requested."""
- # FIXME(vish): validation needs to move into the data layer in
- # volume_create
- return True
- try:
- volume = self._create_volume('1001')
- self.volume.create_volume(self.context, volume)
- self.fail("Should have thrown TypeError")
- except TypeError:
- pass
-
- def test_run_attach_detach_volume(self):
- """Make sure volume can be attached and detached from instance."""
- inst = {}
- inst['image_id'] = 1
- inst['reservation_id'] = 'r-fakeres'
- inst['launch_time'] = '10'
- inst['user_id'] = 'fake'
- inst['project_id'] = 'fake'
- inst['instance_type_id'] = '2' # m1.tiny
- inst['ami_launch_index'] = 0
- instance = db.instance_create(self.context, {})
- instance_id = instance['id']
- instance_uuid = instance['uuid']
- mountpoint = "/dev/sdf"
- volume = self._create_volume()
- volume_id = volume['id']
- self.volume.create_volume(self.context, volume_id)
- if FLAGS.fake_tests:
- db.volume_attached(self.context, volume_id, instance_uuid,
- mountpoint)
- else:
- self.compute.attach_volume(self.context,
- instance_uuid,
- volume_id,
- mountpoint)
- vol = db.volume_get(context.get_admin_context(), volume_id)
- self.assertEqual(vol['status'], "in-use")
- self.assertEqual(vol['attach_status'], "attached")
- self.assertEqual(vol['mountpoint'], mountpoint)
- self.assertEqual(vol['instance_uuid'], instance_uuid)
- self.assertNotEqual(vol['attach_time'], None)
-
- self.assertRaises(exception.VolumeAttached,
- self.volume.delete_volume,
- self.context,
- volume_id)
- if FLAGS.fake_tests:
- db.volume_detached(self.context, volume_id)
- else:
- self.compute.detach_volume(self.context,
- instance_uuid,
- volume_id)
- vol = db.volume_get(self.context, volume_id)
- self.assertEqual(vol['status'], "available")
- self.assertEqual(vol['attach_time'], None)
-
- self.volume.delete_volume(self.context, volume_id)
- self.assertRaises(exception.VolumeNotFound,
- db.volume_get,
- self.context,
- volume_id)
- db.instance_destroy(self.context, instance_uuid)
-
- def test_concurrent_volumes_get_different_targets(self):
- """Ensure multiple concurrent volumes get different targets."""
- volume_ids = []
- targets = []
-
- def _check(volume_id):
- """Make sure targets aren't duplicated."""
- volume_ids.append(volume_id)
- admin_context = context.get_admin_context()
- iscsi_target = db.volume_get_iscsi_target_num(admin_context,
- volume_id)
- self.assert_(iscsi_target not in targets)
- targets.append(iscsi_target)
-
- total_slots = FLAGS.iscsi_num_targets
- for _index in xrange(total_slots):
- self._create_volume()
- for volume_id in volume_ids:
- self.volume.delete_volume(self.context, volume_id)
-
- def test_multi_node(self):
- # TODO(termie): Figure out how to test with two nodes,
- # each of them having a different FLAG for storage_node
- # This will allow us to test cross-node interactions
- pass
-
- @staticmethod
- def _create_snapshot(volume_id, size='0'):
- """Create a snapshot object."""
- snap = {}
- snap['volume_size'] = size
- snap['user_id'] = 'fake'
- snap['project_id'] = 'fake'
- snap['volume_id'] = volume_id
- snap['status'] = "creating"
- return db.snapshot_create(context.get_admin_context(), snap)['id']
-
- def test_create_delete_snapshot(self):
- """Test snapshot can be created and deleted."""
- volume = self._create_volume()
- self.volume.create_volume(self.context, volume['id'])
- snapshot_id = self._create_snapshot(volume['id'])
- self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
- self.assertEqual(snapshot_id,
- db.snapshot_get(context.get_admin_context(),
- snapshot_id).id)
-
- self.volume.delete_snapshot(self.context, snapshot_id)
- self.assertRaises(exception.NotFound,
- db.snapshot_get,
- self.context,
- snapshot_id)
- self.volume.delete_volume(self.context, volume['id'])
-
- def test_cant_delete_volume_in_use(self):
- """Test volume can't be deleted in invalid stats."""
- # create a volume and assign to host
- volume = self._create_volume()
- self.volume.create_volume(self.context, volume['id'])
- volume['status'] = 'in-use'
- volume['host'] = 'fakehost'
-
- volume_api = nova.volume.api.API()
-
- # 'in-use' status raises InvalidVolume
- self.assertRaises(exception.InvalidVolume,
- volume_api.delete,
- self.context,
- volume)
-
- # clean up
- self.volume.delete_volume(self.context, volume['id'])
-
- def test_force_delete_volume(self):
- """Test volume can be forced to delete."""
- # create a volume and assign to host
- volume = self._create_volume()
- self.volume.create_volume(self.context, volume['id'])
- volume['status'] = 'error_deleting'
- volume['host'] = 'fakehost'
-
- volume_api = nova.volume.api.API()
-
- # 'error_deleting' volumes can't be deleted
- self.assertRaises(exception.InvalidVolume,
- volume_api.delete,
- self.context,
- volume)
-
- # delete with force
- volume_api.delete(self.context, volume, force=True)
-
- # status is deleting
- volume = db.volume_get(context.get_admin_context(), volume['id'])
- self.assertEquals(volume['status'], 'deleting')
-
- # clean up
- self.volume.delete_volume(self.context, volume['id'])
-
- def test_cant_delete_volume_with_snapshots(self):
- """Test snapshot can be created and deleted."""
- volume = self._create_volume()
- self.volume.create_volume(self.context, volume['id'])
- snapshot_id = self._create_snapshot(volume['id'])
- self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
- self.assertEqual(snapshot_id,
- db.snapshot_get(context.get_admin_context(),
- snapshot_id).id)
-
- volume['status'] = 'available'
- volume['host'] = 'fakehost'
-
- volume_api = nova.volume.api.API()
-
- self.assertRaises(exception.InvalidVolume,
- volume_api.delete,
- self.context,
- volume)
- self.volume.delete_snapshot(self.context, snapshot_id)
- self.volume.delete_volume(self.context, volume['id'])
-
- def test_can_delete_errored_snapshot(self):
- """Test snapshot can be created and deleted."""
- volume = self._create_volume()
- self.volume.create_volume(self.context, volume['id'])
- snapshot_id = self._create_snapshot(volume['id'])
- self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
- snapshot = db.snapshot_get(context.get_admin_context(),
- snapshot_id)
-
- volume_api = nova.volume.api.API()
-
- snapshot['status'] = 'badstatus'
- self.assertRaises(exception.InvalidVolume,
- volume_api.delete_snapshot,
- self.context,
- snapshot)
-
- snapshot['status'] = 'error'
- self.volume.delete_snapshot(self.context, snapshot_id)
- self.volume.delete_volume(self.context, volume['id'])
-
- def test_create_snapshot_force(self):
- """Test snapshot in use can be created forcibly."""
-
- def fake_cast(ctxt, topic, msg):
- pass
- self.stubs.Set(rpc, 'cast', fake_cast)
-
- volume = self._create_volume()
- self.volume.create_volume(self.context, volume['id'])
- db.volume_attached(self.context, volume['id'], self.instance_uuid,
- '/dev/sda1')
-
- volume_api = nova.volume.api.API()
- volume = volume_api.get(self.context, volume['id'])
- self.assertRaises(exception.InvalidVolume,
- volume_api.create_snapshot,
- self.context, volume,
- 'fake_name', 'fake_description')
- snapshot_ref = volume_api.create_snapshot_force(self.context,
- volume,
- 'fake_name',
- 'fake_description')
- db.snapshot_destroy(self.context, snapshot_ref['id'])
- db.volume_destroy(self.context, volume['id'])
-
- def test_delete_busy_snapshot(self):
- """Test snapshot can be created and deleted."""
- volume = self._create_volume()
- volume_id = volume['id']
- self.volume.create_volume(self.context, volume_id)
- snapshot_id = self._create_snapshot(volume_id)
- self.volume.create_snapshot(self.context, volume_id, snapshot_id)
-
- self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
- self.volume.driver.delete_snapshot(mox.IgnoreArg()).AndRaise(
- exception.SnapshotIsBusy)
- self.mox.ReplayAll()
- self.volume.delete_snapshot(self.context, snapshot_id)
- snapshot_ref = db.snapshot_get(self.context, snapshot_id)
- self.assertEqual(snapshot_id, snapshot_ref.id)
- self.assertEqual("available", snapshot_ref.status)
-
- self.mox.UnsetStubs()
- self.volume.delete_snapshot(self.context, snapshot_id)
- self.volume.delete_volume(self.context, volume_id)
-
- def test_create_volume_usage_notification(self):
- """Ensure create volume generates appropriate usage notification"""
- volume = self._create_volume()
- volume_id = volume['id']
- self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
- self.volume.create_volume(self.context, volume_id)
- self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
- msg = test_notifier.NOTIFICATIONS[0]
- self.assertEquals(msg['event_type'], 'volume.create.start')
- payload = msg['payload']
- self.assertEquals(payload['status'], 'creating')
- msg = test_notifier.NOTIFICATIONS[1]
- self.assertEquals(msg['priority'], 'INFO')
- self.assertEquals(msg['event_type'], 'volume.create.end')
- payload = msg['payload']
- self.assertEquals(payload['tenant_id'], volume['project_id'])
- self.assertEquals(payload['user_id'], volume['user_id'])
- self.assertEquals(payload['volume_id'], volume['id'])
- self.assertEquals(payload['status'], 'available')
- self.assertEquals(payload['size'], volume['size'])
- self.assertTrue('display_name' in payload)
- self.assertTrue('snapshot_id' in payload)
- self.assertTrue('launched_at' in payload)
- self.assertTrue('created_at' in payload)
- self.volume.delete_volume(self.context, volume_id)
-
- def _do_test_create_volume_with_size(self, size):
- def fake_reserve(context, expire=None, **deltas):
- return ["RESERVATION"]
-
- def fake_commit(context, reservations):
- pass
-
- def fake_rollback(context, reservations):
- pass
-
- self.stubs.Set(QUOTAS, "reserve", fake_reserve)
- self.stubs.Set(QUOTAS, "commit", fake_commit)
- self.stubs.Set(QUOTAS, "rollback", fake_rollback)
-
- volume_api = nova.volume.api.API()
-
- volume = volume_api.create(self.context,
- size,
- 'name',
- 'description')
- self.assertEquals(volume['size'], int(size))
-
- def test_create_volume_int_size(self):
- """Test volume creation with int size."""
- self._do_test_create_volume_with_size(2)
-
- def test_create_volume_string_size(self):
- """Test volume creation with string size."""
- self._do_test_create_volume_with_size('2')
-
- def test_create_volume_with_bad_size(self):
- def fake_reserve(context, expire=None, **deltas):
- return ["RESERVATION"]
-
- def fake_commit(context, reservations):
- pass
-
- def fake_rollback(context, reservations):
- pass
-
- self.stubs.Set(QUOTAS, "reserve", fake_reserve)
- self.stubs.Set(QUOTAS, "commit", fake_commit)
- self.stubs.Set(QUOTAS, "rollback", fake_rollback)
-
- volume_api = nova.volume.api.API()
-
- self.assertRaises(exception.InvalidInput,
- volume_api.create,
- self.context,
- '2Gb',
- 'name',
- 'description')
-
- def test_begin_roll_detaching_volume(self):
- """Test begin_detaching and roll_detaching functions."""
- volume = self._create_volume()
- volume_api = nova.volume.api.API()
- volume_api.begin_detaching(self.context, volume)
- volume = db.volume_get(self.context, volume['id'])
- self.assertEqual(volume['status'], "detaching")
- volume_api.roll_detaching(self.context, volume)
- volume = db.volume_get(self.context, volume['id'])
- self.assertEqual(volume['status'], "in-use")
-
- def _create_volume_from_image(self, expected_status,
- fakeout_copy_image_to_volume=False):
- """Call copy image to volume, Test the status of volume after calling
- copying image to volume."""
- def fake_local_path(volume):
- return dst_path
-
- def fake_copy_image_to_volume(context, volume, image_id):
- pass
-
- dst_fd, dst_path = tempfile.mkstemp()
- os.close(dst_fd)
- self.stubs.Set(self.volume.driver, 'local_path', fake_local_path)
- if fakeout_copy_image_to_volume:
- self.stubs.Set(self.volume, '_copy_image_to_volume',
- fake_copy_image_to_volume)
-
- image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- volume_id = 1
- # creating volume testdata
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'creating',
- 'instance_uuid': None,
- 'host': 'dummy'})
- try:
- self.volume.create_volume(self.context,
- volume_id,
- image_id=image_id)
-
- volume = db.volume_get(self.context, volume_id)
- self.assertEqual(volume['status'], expected_status)
- finally:
- # cleanup
- db.volume_destroy(self.context, volume_id)
- os.unlink(dst_path)
-
- def test_create_volume_from_image_status_downloading(self):
- """Verify that before copying image to volume, it is in downloading
- state."""
- self._create_volume_from_image('downloading', True)
-
- def test_create_volume_from_image_status_available(self):
- """Verify that before copying image to volume, it is in available
- state."""
- self._create_volume_from_image('available')
-
- def test_create_volume_from_image_exception(self):
- """Verify that create volume from image, the volume status is
- 'downloading'."""
- dst_fd, dst_path = tempfile.mkstemp()
- os.close(dst_fd)
-
- self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path)
-
- image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
- # creating volume testdata
- volume_id = 1
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'creating',
- 'host': 'dummy'})
-
- self.assertRaises(exception.ImageNotFound,
- self.volume.create_volume,
- self.context,
- volume_id,
- None,
- image_id)
- volume = db.volume_get(self.context, volume_id)
- self.assertEqual(volume['status'], "error")
- # cleanup
- db.volume_destroy(self.context, volume_id)
- os.unlink(dst_path)
-
- def test_copy_volume_to_image_status_available(self):
- dst_fd, dst_path = tempfile.mkstemp()
- os.close(dst_fd)
-
- def fake_local_path(volume):
- return dst_path
-
- self.stubs.Set(self.volume.driver, 'local_path', fake_local_path)
-
- image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
- # creating volume testdata
- volume_id = 1
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'uploading',
- 'instance_uuid': None,
- 'host': 'dummy'})
-
- try:
- # start test
- self.volume.copy_volume_to_image(self.context,
- volume_id,
- image_id)
-
- volume = db.volume_get(self.context, volume_id)
- self.assertEqual(volume['status'], 'available')
- finally:
- # cleanup
- db.volume_destroy(self.context, volume_id)
- os.unlink(dst_path)
-
- def test_copy_volume_to_image_status_use(self):
- dst_fd, dst_path = tempfile.mkstemp()
- os.close(dst_fd)
-
- def fake_local_path(volume):
- return dst_path
-
- self.stubs.Set(self.volume.driver, 'local_path', fake_local_path)
-
- #image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
- image_id = 'a440c04b-79fa-479c-bed1-0b816eaec379'
- # creating volume testdata
- volume_id = 1
- db.volume_create(self.context,
- {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'uploading',
- 'instance_uuid':
- 'b21f957d-a72f-4b93-b5a5-45b1161abb02',
- 'host': 'dummy'})
-
- try:
- # start test
- self.volume.copy_volume_to_image(self.context,
- volume_id,
- image_id)
-
- volume = db.volume_get(self.context, volume_id)
- self.assertEqual(volume['status'], 'in-use')
- finally:
- # cleanup
- db.volume_destroy(self.context, volume_id)
- os.unlink(dst_path)
-
- def test_copy_volume_to_image_exception(self):
- dst_fd, dst_path = tempfile.mkstemp()
- os.close(dst_fd)
-
- def fake_local_path(volume):
- return dst_path
-
- self.stubs.Set(self.volume.driver, 'local_path', fake_local_path)
-
- image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
- # creating volume testdata
- volume_id = 1
- db.volume_create(self.context, {'id': volume_id,
- 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'display_description': 'Test Desc',
- 'size': 20,
- 'status': 'in-use',
- 'host': 'dummy'})
-
- try:
- # start test
- self.assertRaises(exception.ImageNotFound,
- self.volume.copy_volume_to_image,
- self.context,
- volume_id,
- image_id)
-
- volume = db.volume_get(self.context, volume_id)
- self.assertEqual(volume['status'], 'available')
- finally:
- # cleanup
- db.volume_destroy(self.context, volume_id)
- os.unlink(dst_path)
-
- def test_create_volume_from_exact_sized_image(self):
- """Verify that an image which is exactly the same size as the
- volume, will work correctly."""
- class _FakeImageService:
- def __init__(self, db_driver=None, image_service=None):
- pass
-
- def show(self, context, image_id):
- return {'size': 2 * 1024 * 1024 * 1024}
-
- image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
-
- try:
- volume_id = None
- volume_api = nova.volume.api.API(
- image_service=_FakeImageService())
- volume = volume_api.create(self.context, 2, 'name', 'description',
- image_id=1)
- volume_id = volume['id']
- self.assertEqual(volume['status'], 'creating')
-
- finally:
- # cleanup
- db.volume_destroy(self.context, volume_id)
-
- def test_create_volume_from_oversized_image(self):
- """Verify that an image which is too big will fail correctly."""
- class _FakeImageService:
- def __init__(self, db_driver=None, image_service=None):
- pass
-
- def show(self, context, image_id):
- return {'size': 2 * 1024 * 1024 * 1024 + 1}
-
- image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
-
- volume_api = nova.volume.api.API(image_service=_FakeImageService())
-
- self.assertRaises(exception.InvalidInput,
- volume_api.create,
- self.context, 2,
- 'name', 'description', image_id=1)
-
-
-class DriverTestCase(test.TestCase):
- """Base Test class for Drivers."""
- driver_name = "nova.volume.driver.FakeBaseDriver"
-
- def setUp(self):
- super(DriverTestCase, self).setUp()
- vol_tmpdir = tempfile.mkdtemp()
- self.flags(volume_driver=self.driver_name,
- volumes_dir=vol_tmpdir)
- self.volume = importutils.import_object(FLAGS.volume_manager)
- self.context = context.get_admin_context()
- self.output = ""
- self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target)
-
- def _fake_execute(_command, *_args, **_kwargs):
- """Fake _execute."""
- return self.output, None
- self.volume.driver.set_execute(_fake_execute)
-
- instance = db.instance_create(self.context, {})
- self.instance_id = instance['id']
- self.instance_uuid = instance['uuid']
-
- def tearDown(self):
- try:
- shutil.rmtree(FLAGS.volumes_dir)
- except OSError:
- pass
- super(DriverTestCase, self).tearDown()
-
- def fake_get_target(obj, iqn):
- return 1
-
- def _attach_volume(self):
- """Attach volumes to an instance."""
- return []
-
- def _detach_volume(self, volume_id_list):
- """Detach volumes from an instance."""
- for volume_id in volume_id_list:
- db.volume_detached(self.context, volume_id)
- self.volume.delete_volume(self.context, volume_id)
-
-
-class VolumeDriverTestCase(DriverTestCase):
- """Test case for VolumeDriver"""
- driver_name = "nova.volume.driver.VolumeDriver"
-
- def test_delete_busy_volume(self):
- """Test deleting a busy volume."""
- self.stubs.Set(self.volume.driver, '_volume_not_present',
- lambda x: False)
- self.stubs.Set(self.volume.driver, '_delete_volume',
- lambda x, y: False)
- # Want DriverTestCase._fake_execute to return 'o' so that
- # volume.driver.delete_volume() raises the VolumeIsBusy exception.
- self.output = 'o'
- self.assertRaises(exception.VolumeIsBusy,
- self.volume.driver.delete_volume,
- {'name': 'test1', 'size': 1024})
- # when DriverTestCase._fake_execute returns something other than
- # 'o' volume.driver.delete_volume() does not raise an exception.
- self.output = 'x'
- self.volume.driver.delete_volume({'name': 'test1', 'size': 1024})
-
-
-class ISCSITestCase(DriverTestCase):
- """Test Case for ISCSIDriver"""
- driver_name = "nova.volume.driver.ISCSIDriver"
-
- def _attach_volume(self):
- """Attach volumes to an instance. """
- volume_id_list = []
- for index in xrange(3):
- vol = {}
- vol['size'] = 0
- vol_ref = db.volume_create(self.context, vol)
- self.volume.create_volume(self.context, vol_ref['id'])
- vol_ref = db.volume_get(self.context, vol_ref['id'])
-
- # each volume has a different mountpoint
- mountpoint = "/dev/sd" + chr((ord('b') + index))
- db.volume_attached(self.context, vol_ref['id'], self.instance_uuid,
- mountpoint)
- volume_id_list.append(vol_ref['id'])
-
- return volume_id_list
-
- def test_check_for_export_with_no_volume(self):
- self.volume.check_for_export(self.context, self.instance_id)
-
-
-class VolumePolicyTestCase(test.TestCase):
-
- def setUp(self):
- super(VolumePolicyTestCase, self).setUp()
-
- nova.policy.reset()
- nova.policy.init()
-
- self.context = context.get_admin_context()
-
- def tearDown(self):
- super(VolumePolicyTestCase, self).tearDown()
- nova.policy.reset()
-
- def _set_rules(self, rules):
- nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
-
- def test_check_policy(self):
- self.mox.StubOutWithMock(nova.policy, 'enforce')
- target = {
- 'project_id': self.context.project_id,
- 'user_id': self.context.user_id,
- }
- nova.policy.enforce(self.context, 'volume:attach', target)
- self.mox.ReplayAll()
- nova.volume.api.check_policy(self.context, 'attach')
-
- def test_check_policy_with_target(self):
- self.mox.StubOutWithMock(nova.policy, 'enforce')
- target = {
- 'project_id': self.context.project_id,
- 'user_id': self.context.user_id,
- 'id': 2,
- }
- nova.policy.enforce(self.context, 'volume:attach', target)
- self.mox.ReplayAll()
- nova.volume.api.check_policy(self.context, 'attach', {'id': 2})
diff --git a/nova/tests/test_volume_types.py b/nova/tests/test_volume_types.py
deleted file mode 100644
index 5e178d5e13..0000000000
--- a/nova/tests/test_volume_types.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Unit Tests for volume types code
-"""
-import time
-
-from nova import context
-from nova.db.sqlalchemy import models
-from nova.db.sqlalchemy import session as sql_session
-from nova import exception
-from nova import flags
-from nova.openstack.common import log as logging
-from nova import test
-from nova.volume import volume_types
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger(__name__)
-
-
-class VolumeTypeTestCase(test.TestCase):
- """Test cases for volume type code"""
- def setUp(self):
- super(VolumeTypeTestCase, self).setUp()
-
- self.ctxt = context.get_admin_context()
- self.vol_type1_name = str(int(time.time()))
- self.vol_type1_specs = dict(
- type="physical drive",
- drive_type="SAS",
- size="300",
- rpm="7200",
- visible="True")
-
- def test_volume_type_create_then_destroy(self):
- """Ensure volume types can be created and deleted"""
- prev_all_vtypes = volume_types.get_all_types(self.ctxt)
-
- volume_types.create(self.ctxt,
- self.vol_type1_name,
- self.vol_type1_specs)
- new = volume_types.get_volume_type_by_name(self.ctxt,
- self.vol_type1_name)
-
- LOG.info(_("Given data: %s"), self.vol_type1_specs)
- LOG.info(_("Result data: %s"), new)
-
- for k, v in self.vol_type1_specs.iteritems():
- self.assertEqual(v, new['extra_specs'][k],
- 'one of fields does not match')
-
- new_all_vtypes = volume_types.get_all_types(self.ctxt)
- self.assertEqual(len(prev_all_vtypes) + 1,
- len(new_all_vtypes),
- 'drive type was not created')
-
- volume_types.destroy(self.ctxt, self.vol_type1_name)
- new_all_vtypes = volume_types.get_all_types(self.ctxt)
- self.assertEqual(prev_all_vtypes,
- new_all_vtypes,
- 'drive type was not deleted')
-
- def test_get_all_volume_types(self):
- """Ensures that all volume types can be retrieved"""
- session = sql_session.get_session()
- total_volume_types = session.query(models.VolumeTypes).count()
- vol_types = volume_types.get_all_types(self.ctxt)
- self.assertEqual(total_volume_types, len(vol_types))
-
- def test_non_existent_vol_type_shouldnt_delete(self):
- """Ensures that volume type creation fails with invalid args"""
- self.assertRaises(exception.VolumeTypeNotFoundByName,
- volume_types.destroy, self.ctxt, "sfsfsdfdfs")
-
- def test_repeated_vol_types_shouldnt_raise(self):
- """Ensures that volume duplicates don't raise"""
- new_name = self.vol_type1_name + "dup"
- volume_types.create(self.ctxt, new_name)
- volume_types.destroy(self.ctxt, new_name)
- volume_types.create(self.ctxt, new_name)
-
- def test_invalid_volume_types_params(self):
- """Ensures that volume type creation fails with invalid args"""
- self.assertRaises(exception.InvalidVolumeType,
- volume_types.destroy, self.ctxt, None)
- self.assertRaises(exception.InvalidVolumeType,
- volume_types.get_volume_type, self.ctxt, None)
- self.assertRaises(exception.InvalidVolumeType,
- volume_types.get_volume_type_by_name,
- self.ctxt, None)
-
- def test_volume_type_get_by_id_and_name(self):
- """Ensure volume types get returns same entry"""
- volume_types.create(self.ctxt,
- self.vol_type1_name,
- self.vol_type1_specs)
- new = volume_types.get_volume_type_by_name(self.ctxt,
- self.vol_type1_name)
-
- new2 = volume_types.get_volume_type(self.ctxt, new['id'])
- self.assertEqual(new, new2)
-
- def test_volume_type_search_by_extra_spec(self):
- """Ensure volume types get by extra spec returns correct type"""
- volume_types.create(self.ctxt, "type1", {"key1": "val1",
- "key2": "val2"})
- volume_types.create(self.ctxt, "type2", {"key2": "val2",
- "key3": "val3"})
- volume_types.create(self.ctxt, "type3", {"key3": "another_value",
- "key4": "val4"})
-
- vol_types = volume_types.get_all_types(self.ctxt,
- search_opts={'extra_specs': {"key1": "val1"}})
- LOG.info("vol_types: %s" % vol_types)
- self.assertEqual(len(vol_types), 1)
- self.assertTrue("type1" in vol_types.keys())
- self.assertEqual(vol_types['type1']['extra_specs'],
- {"key1": "val1", "key2": "val2"})
-
- vol_types = volume_types.get_all_types(self.ctxt,
- search_opts={'extra_specs': {"key2": "val2"}})
- LOG.info("vol_types: %s" % vol_types)
- self.assertEqual(len(vol_types), 2)
- self.assertTrue("type1" in vol_types.keys())
- self.assertTrue("type2" in vol_types.keys())
-
- vol_types = volume_types.get_all_types(self.ctxt,
- search_opts={'extra_specs': {"key3": "val3"}})
- LOG.info("vol_types: %s" % vol_types)
- self.assertEqual(len(vol_types), 1)
- self.assertTrue("type2" in vol_types.keys())
-
- def test_volume_type_search_by_extra_spec_multiple(self):
- """Ensure volume types get by extra spec returns correct type"""
- volume_types.create(self.ctxt, "type1", {"key1": "val1",
- "key2": "val2",
- "key3": "val3"})
- volume_types.create(self.ctxt, "type2", {"key2": "val2",
- "key3": "val3"})
- volume_types.create(self.ctxt, "type3", {"key1": "val1",
- "key3": "val3",
- "key4": "val4"})
-
- vol_types = volume_types.get_all_types(self.ctxt,
- search_opts={'extra_specs': {"key1": "val1",
- "key3": "val3"}})
- LOG.info("vol_types: %s" % vol_types)
- self.assertEqual(len(vol_types), 2)
- self.assertTrue("type1" in vol_types.keys())
- self.assertTrue("type3" in vol_types.keys())
- self.assertEqual(vol_types['type1']['extra_specs'],
- {"key1": "val1", "key2": "val2", "key3": "val3"})
- self.assertEqual(vol_types['type3']['extra_specs'],
- {"key1": "val1", "key3": "val3", "key4": "val4"})
diff --git a/nova/tests/test_volume_types_extra_specs.py b/nova/tests/test_volume_types_extra_specs.py
deleted file mode 100644
index ed7840e0a0..0000000000
--- a/nova/tests/test_volume_types_extra_specs.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-# Copyright 2011 University of Southern California
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Unit Tests for volume types extra specs code
-"""
-
-from nova import context
-from nova import db
-from nova import test
-
-
-class VolumeTypeExtraSpecsTestCase(test.TestCase):
-
- def setUp(self):
- super(VolumeTypeExtraSpecsTestCase, self).setUp()
- self.context = context.get_admin_context()
- self.vol_type1 = dict(name="TEST: Regular volume test")
- self.vol_type1_specs = dict(vol_extra1="value1",
- vol_extra2="value2",
- vol_extra3=3)
- self.vol_type1['extra_specs'] = self.vol_type1_specs
- ref = db.volume_type_create(self.context, self.vol_type1)
- self.volume_type1_id = ref.id
- for k, v in self.vol_type1_specs.iteritems():
- self.vol_type1_specs[k] = str(v)
-
- self.vol_type2_noextra = dict(name="TEST: Volume type without extra")
- ref = db.volume_type_create(self.context, self.vol_type2_noextra)
- self.vol_type2_id = ref.id
-
- def tearDown(self):
- # Remove the volume type from the database
- db.volume_type_destroy(context.get_admin_context(),
- self.vol_type1['name'])
- db.volume_type_destroy(context.get_admin_context(),
- self.vol_type2_noextra['name'])
- super(VolumeTypeExtraSpecsTestCase, self).tearDown()
-
- def test_volume_type_specs_get(self):
- expected_specs = self.vol_type1_specs.copy()
- actual_specs = db.volume_type_extra_specs_get(
- context.get_admin_context(),
- self.volume_type1_id)
- self.assertEquals(expected_specs, actual_specs)
-
- def test_volume_type_extra_specs_delete(self):
- expected_specs = self.vol_type1_specs.copy()
- del expected_specs['vol_extra2']
- db.volume_type_extra_specs_delete(context.get_admin_context(),
- self.volume_type1_id,
- 'vol_extra2')
- actual_specs = db.volume_type_extra_specs_get(
- context.get_admin_context(),
- self.volume_type1_id)
- self.assertEquals(expected_specs, actual_specs)
-
- def test_volume_type_extra_specs_update(self):
- expected_specs = self.vol_type1_specs.copy()
- expected_specs['vol_extra3'] = "4"
- db.volume_type_extra_specs_update_or_create(
- context.get_admin_context(),
- self.volume_type1_id,
- dict(vol_extra3=4))
- actual_specs = db.volume_type_extra_specs_get(
- context.get_admin_context(),
- self.volume_type1_id)
- self.assertEquals(expected_specs, actual_specs)
-
- def test_volume_type_extra_specs_create(self):
- expected_specs = self.vol_type1_specs.copy()
- expected_specs['vol_extra4'] = 'value4'
- expected_specs['vol_extra5'] = 'value5'
- db.volume_type_extra_specs_update_or_create(
- context.get_admin_context(),
- self.volume_type1_id,
- dict(vol_extra4="value4",
- vol_extra5="value5"))
- actual_specs = db.volume_type_extra_specs_get(
- context.get_admin_context(),
- self.volume_type1_id)
- self.assertEquals(expected_specs, actual_specs)
-
- def test_volume_type_get_with_extra_specs(self):
- volume_type = db.volume_type_get(
- context.get_admin_context(),
- self.volume_type1_id)
- self.assertEquals(volume_type['extra_specs'],
- self.vol_type1_specs)
-
- volume_type = db.volume_type_get(
- context.get_admin_context(),
- self.vol_type2_id)
- self.assertEquals(volume_type['extra_specs'], {})
-
- def test_volume_type_get_by_name_with_extra_specs(self):
- volume_type = db.volume_type_get_by_name(
- context.get_admin_context(),
- self.vol_type1['name'])
- self.assertEquals(volume_type['extra_specs'],
- self.vol_type1_specs)
-
- volume_type = db.volume_type_get_by_name(
- context.get_admin_context(),
- self.vol_type2_noextra['name'])
- self.assertEquals(volume_type['extra_specs'], {})
-
- def test_volume_type_get_all(self):
- expected_specs = self.vol_type1_specs.copy()
-
- types = db.volume_type_get_all(context.get_admin_context())
-
- self.assertEquals(
- types[self.vol_type1['name']]['extra_specs'], expected_specs)
-
- self.assertEquals(
- types[self.vol_type2_noextra['name']]['extra_specs'], {})
diff --git a/nova/tests/test_volume_utils.py b/nova/tests/test_volume_utils.py
deleted file mode 100644
index 89ad7c3db7..0000000000
--- a/nova/tests/test_volume_utils.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests For miscellaneous util methods used with volume."""
-
-from nova import context
-from nova import db
-from nova import flags
-from nova.openstack.common import importutils
-from nova.openstack.common import log as logging
-from nova.openstack.common.notifier import api as notifier_api
-from nova.openstack.common.notifier import test_notifier
-from nova import test
-from nova.tests import fake_network
-from nova.volume import utils as volume_utils
-
-
-LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
-
-class UsageInfoTestCase(test.TestCase):
-
- def setUp(self):
- super(UsageInfoTestCase, self).setUp()
- self.flags(compute_driver='nova.virt.fake.FakeDriver',
- host='fake',
- notification_driver=[test_notifier.__name__])
- fake_network.set_stub_network_methods(self.stubs)
-
- self.volume = importutils.import_object(FLAGS.volume_manager)
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.snapshot_id = 'fake'
- self.volume_size = 0
- self.context = context.RequestContext(self.user_id, self.project_id)
- test_notifier.NOTIFICATIONS = []
-
- def tearDown(self):
- notifier_api._reset_drivers()
- super(UsageInfoTestCase, self).tearDown()
-
- def _create_volume(self, params={}):
- """Create a test volume"""
- vol = {}
- vol['snapshot_id'] = self.snapshot_id
- vol['user_id'] = self.user_id
- vol['project_id'] = self.project_id
- vol['host'] = FLAGS.host
- vol['availability_zone'] = FLAGS.storage_availability_zone
- vol['status'] = "creating"
- vol['attach_status'] = "detached"
- vol['size'] = self.volume_size
- vol.update(params)
- return db.volume_create(self.context, vol)['id']
-
- def test_notify_usage_exists(self):
- """Ensure 'exists' notification generates appropriate usage data."""
- volume_id = self._create_volume()
- volume = db.volume_get(self.context, volume_id)
- volume_utils.notify_usage_exists(self.context, volume)
- self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
- msg = test_notifier.NOTIFICATIONS[0]
- self.assertEquals(msg['priority'], 'INFO')
- self.assertEquals(msg['event_type'], 'volume.exists')
- payload = msg['payload']
- self.assertEquals(payload['tenant_id'], self.project_id)
- self.assertEquals(payload['user_id'], self.user_id)
- self.assertEquals(payload['snapshot_id'], self.snapshot_id)
- self.assertEquals(payload['volume_id'], volume.id)
- self.assertEquals(payload['size'], self.volume_size)
- for attr in ('display_name', 'created_at', 'launched_at',
- 'status', 'audit_period_beginning',
- 'audit_period_ending'):
- self.assertTrue(attr in payload,
- msg="Key %s not in payload" % attr)
- db.volume_destroy(context.get_admin_context(), volume['id'])
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index a7bfa75482..11e8844c46 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -170,7 +170,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
- vol['availability_zone'] = FLAGS.storage_availability_zone
+ vol['availability_zone'] = FLAGS.node_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
diff --git a/nova/tests/test_xensm.py b/nova/tests/test_xensm.py
deleted file mode 100644
index 2f2108aa4c..0000000000
--- a/nova/tests/test_xensm.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2010 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Test suite for Xen Storage Manager Volume Driver."""
-
-
-from nova import context
-from nova import db
-from nova import exception
-from nova import flags
-from nova.openstack.common import log as logging
-from nova.tests.xenapi import stubs
-from nova.virt.xenapi import fake as xenapi_fake
-from nova.volume import xensm
-
-LOG = logging.getLogger(__name__)
-
-FLAGS = flags.FLAGS
-
-
-class XenSMTestCase(stubs.XenAPITestBase):
- """Unit tests for Xen Storage Manager Volume operations."""
-
- def _get_sm_backend_params(self):
- config_params = ("name_label=testsmbackend "
- "server=localhost "
- "serverpath=/tmp/nfspath")
- params = dict(flavor_id=1,
- sr_uuid=None,
- sr_type='nfs',
- config_params=config_params)
- return params
-
- def setUp(self):
- super(XenSMTestCase, self).setUp()
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
- self.flags(compute_driver='xenapi.XenAPIDriver',
- xenapi_connection_url='http://test_url',
- xenapi_connection_username='test_user',
- xenapi_connection_password='test_pass')
- stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
- xenapi_fake.reset()
- self.driver = xensm.XenSMDriver()
- self.driver.db = db
-
- def _setup_step(self, ctxt):
- # Create a fake backend conf
- params = self._get_sm_backend_params()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- # Call setup, the one time operation that will create a backend SR
- self.driver.do_setup(ctxt)
- return beconf
-
- def test_do_setup(self):
- ctxt = context.get_admin_context()
- beconf = self._setup_step(ctxt)
- beconf = db.sm_backend_conf_get(ctxt, beconf['id'])
- self.assertIsInstance(beconf['sr_uuid'], basestring)
-
- def _create_volume(self, size=0):
- """Create a volume object."""
- vol = {}
- vol['size'] = size
- vol['user_id'] = 'fake'
- vol['project_id'] = 'fake'
- vol['host'] = 'localhost'
- vol['availability_zone'] = FLAGS.storage_availability_zone
- vol['status'] = "creating"
- vol['attach_status'] = "detached"
- return db.volume_create(self.context, vol)
-
- def test_create_volume(self):
- ctxt = context.get_admin_context()
- beconf = self._setup_step(ctxt)
- volume = self._create_volume()
- self.driver.create_volume(volume)
- db.sm_volume_get(ctxt, volume['id'])
-
- def test_local_path(self):
- ctxt = context.get_admin_context()
- volume = self._create_volume()
- val = self.driver.local_path(volume)
- self.assertIsInstance(val, basestring)
-
- def test_delete_volume(self):
- ctxt = context.get_admin_context()
- beconf = self._setup_step(ctxt)
- volume = self._create_volume()
- self.driver.create_volume(volume)
- self.driver.delete_volume(volume)
- self.assertRaises(exception.NotFound,
- db.sm_volume_get,
- ctxt,
- volume['id'])
-
- def test_delete_volume_raises_notfound(self):
- ctxt = context.get_admin_context()
- beconf = self._setup_step(ctxt)
- self.assertRaises(exception.NotFound,
- self.driver.delete_volume,
- {'id': "FA15E-1D"})
-
- def _get_expected_conn(self, beconf, vol):
- expected = {}
- expected['volume_id'] = unicode(vol['id'])
- expected['flavor_id'] = beconf['flavor_id']
- expected['sr_uuid'] = unicode(beconf['sr_uuid'])
- expected['sr_type'] = unicode(beconf['sr_type'])
- return expected
-
- def test_initialize_connection(self):
- ctxt = context.get_admin_context()
- beconf = self._setup_step(ctxt)
- beconf = db.sm_backend_conf_get(ctxt, beconf['id'])
- volume = self._create_volume()
- self.driver.create_volume(volume)
- expected = self._get_expected_conn(beconf, volume)
- conn = self.driver.initialize_connection(volume, 'fakeConnector')
- res = {}
- for key in ['volume_id', 'flavor_id', 'sr_uuid', 'sr_type']:
- res[key] = conn['data'][key]
- self.assertDictMatch(expected, res)
- self.assertEqual(set(conn['data']['introduce_sr_keys']),
- set([u'sr_type', u'server', u'serverpath']))
diff --git a/nova/tests/volume/test_HpSanISCSIDriver.py b/nova/tests/volume/test_HpSanISCSIDriver.py
deleted file mode 100644
index d6d3f34886..0000000000
--- a/nova/tests/volume/test_HpSanISCSIDriver.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# Copyright 2012 OpenStack LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from nova import exception
-from nova.openstack.common import log as logging
-from nova import test
-from nova.volume import san
-
-LOG = logging.getLogger(__name__)
-
-
-class HpSanISCSITestCase(test.TestCase):
-
- def setUp(self):
- super(HpSanISCSITestCase, self).setUp()
- self.stubs.Set(san.HpSanISCSIDriver, "_cliq_run",
- self._fake_cliq_run)
- self.stubs.Set(san.HpSanISCSIDriver, "_get_iscsi_properties",
- self._fake_get_iscsi_properties)
- self.driver = san.HpSanISCSIDriver()
- self.volume_name = "fakevolume"
- self.connector = {'ip': '10.0.0.2',
- 'initiator': 'iqn.1993-08.org.debian:01:222',
- 'host': 'fakehost'}
- self.properties = {'target_discoverd': True,
- 'target_portal': '10.0.1.6:3260',
- 'target_iqn':
- 'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
- 'volume_id': 1}
-
- def _fake_get_iscsi_properties(self, volume):
- return self.properties
-
- def _fake_cliq_run(self, verb, cliq_args):
- """Return fake results for the various methods."""
-
- def create_volume(cliq_args):
- """
- input = "createVolume description="fake description"
- clusterName=Cluster01 volumeName=fakevolume
- thinProvision=0 output=XML size=1GB"
- """
- output = """<gauche version="1.0">
- <response description="Operation succeeded."
- name="CliqSuccess" processingTime="181" result="0"/>
- </gauche>"""
- self.assertEqual(cliq_args['volumeName'], self.volume_name)
- self.assertEqual(cliq_args['thinProvision'], '1')
- self.assertEqual(cliq_args['size'], '1GB')
- return output, None
-
- def delete_volume(cliq_args):
- """
- input = "deleteVolume volumeName=fakevolume prompt=false
- output=XML"
- """
- output = """<gauche version="1.0">
- <response description="Operation succeeded."
- name="CliqSuccess" processingTime="164" result="0"/>
- </gauche>"""
- self.assertEqual(cliq_args['volumeName'], self.volume_name)
- self.assertEqual(cliq_args['prompt'], 'false')
- return output, None
-
- def assign_volume(cliq_args):
- """
- input = "assignVolumeToServer volumeName=fakevolume
- serverName=fakehost
- output=XML"
- """
- output = """<gauche version="1.0">
- <response description="Operation succeeded."
- name="CliqSuccess" processingTime="174" result="0"/>
- </gauche>"""
- self.assertEqual(cliq_args['volumeName'], self.volume_name)
- self.assertEqual(cliq_args['serverName'], self.connector['host'])
- return output, None
-
- def unassign_volume(cliq_args):
- """
- input = "unassignVolumeToServer volumeName=fakevolume
- serverName=fakehost output=XML
- """
- output = """<gauche version="1.0">
- <response description="Operation succeeded."
- name="CliqSuccess" processingTime="205" result="0"/>
- </gauche>"""
- self.assertEqual(cliq_args['volumeName'], self.volume_name)
- self.assertEqual(cliq_args['serverName'], self.connector['host'])
- return output, None
-
- def get_cluster_info(cliq_args):
- """
- input = "getClusterInfo clusterName=Cluster01 searchDepth=1
- verbose=0 output=XML"
- """
- output = """<gauche version="1.0">
- <response description="Operation succeeded." name="CliqSuccess"
- processingTime="1164" result="0">
- <cluster blockSize="1024" description=""
- maxVolumeSizeReplication1="622957690"
- maxVolumeSizeReplication2="311480287"
- minVolumeSize="262144" name="Cluster01"
- pageSize="262144" spaceTotal="633697992"
- storageNodeCount="2" unprovisionedSpace="622960574"
- useVip="true">
- <nsm ipAddress="10.0.1.7" name="111-vsa"/>
- <nsm ipAddress="10.0.1.8" name="112-vsa"/>
- <vip ipAddress="10.0.1.6" subnetMask="255.255.255.0"/>
- </cluster></response></gauche>"""
- return output, None
-
- def get_volume_info(cliq_args):
- """
- input = "getVolumeInfo volumeName=fakevolume output=XML"
- """
- output = """<gauche version="1.0">
- <response description="Operation succeeded." name="CliqSuccess"
- processingTime="87" result="0">
- <volume autogrowPages="4" availability="online"
- blockSize="1024" bytesWritten="0" checkSum="false"
- clusterName="Cluster01" created="2011-02-08T19:56:53Z"
- deleting="false" description="" groupName="Group01"
- initialQuota="536870912" isPrimary="true"
- iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:fakev"
- maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
- minReplication="1" name="vol-b" parity="0" replication="2"
- reserveQuota="536870912" scratchQuota="4194304"
- serialNumber="9fa5c8b2cca54b2948a63d8"
- size="1073741824" stridePages="32" thinProvision="true">
- <status description="OK" value="2"/>
- <permission access="rw" authGroup="api-1"
- chapName="chapusername" chapRequired="true"
- id="25369" initiatorSecret="" iqn=""
- iscsiEnabled="true" loadBalance="true"
- targetSecret="supersecret"/>
- </volume></response></gauche>"""
- return output, None
-
- def test_error(cliq_args):
- output = """<gauche version="1.0">
- <response description="Volume '134234' not found."
- name="CliqVolumeNotFound" processingTime="1083"
- result="8000100c"/>
- </gauche>"""
- return output, None
-
- self.assertEqual(cliq_args['output'], 'XML')
- try:
- verbs = {'createVolume': create_volume,
- 'deleteVolume': delete_volume,
- 'assignVolumeToServer': assign_volume,
- 'unassignVolumeToServer': unassign_volume,
- 'getClusterInfo': get_cluster_info,
- 'getVolumeInfo': get_volume_info,
- 'testError': test_error}
- except KeyError:
- raise NotImplementedError()
-
- return verbs[verb](cliq_args)
-
- def test_create_volume(self):
- volume = {'name': self.volume_name, 'size': 1}
- model_update = self.driver.create_volume(volume)
- expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
- expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
- self.assertEqual(model_update['provider_location'], expected_location)
-
- def test_delete_volume(self):
- volume = {'name': self.volume_name}
- self.driver.delete_volume(volume)
-
- def test_initialize_connection(self):
- volume = {'name': self.volume_name}
- result = self.driver.initialize_connection(volume, self.connector)
- self.assertEqual(result['driver_volume_type'], 'iscsi')
- self.assertDictMatch(result['data'], self.properties)
-
- def test_terminate_connection(self):
- volume = {'name': self.volume_name}
- self.driver.terminate_connection(volume, self.connector)
-
- def test_create_snapshot(self):
- try:
- self.driver.create_snapshot("")
- except NotImplementedError:
- pass
-
- def test_create_volume_from_snapshot(self):
- try:
- self.driver.create_volume_from_snapshot("", "")
- except NotImplementedError:
- pass
-
- def test_cliq_error(self):
- try:
- self.driver._cliq_run_xml("testError", {})
- except exception.NovaException:
- pass
diff --git a/nova/volume/api.py b/nova/volume/api.py
deleted file mode 100644
index 6beb771f3d..0000000000
--- a/nova/volume/api.py
+++ /dev/null
@@ -1,511 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Handles all requests relating to volumes.
-"""
-
-import functools
-
-from nova.db import base
-from nova import exception
-from nova import flags
-from nova.image import glance
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova.openstack.common import rpc
-from nova.openstack.common import timeutils
-import nova.policy
-from nova import quota
-from nova.scheduler import rpcapi as scheduler_rpcapi
-
-volume_host_opt = cfg.BoolOpt('snapshot_same_host',
- default=True,
- help='Create volume from snapshot at the host where snapshot resides')
-
-FLAGS = flags.FLAGS
-FLAGS.register_opt(volume_host_opt)
-flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
-
-LOG = logging.getLogger(__name__)
-GB = 1048576 * 1024
-
-QUOTAS = quota.QUOTAS
-
-
-def wrap_check_policy(func):
- """Check policy corresponding to the wrapped methods prior to execution
-
- This decorator requires the first 3 args of the wrapped function
- to be (self, context, volume)
- """
- @functools.wraps(func)
- def wrapped(self, context, target_obj, *args, **kwargs):
- check_policy(context, func.__name__, target_obj)
- return func(self, context, target_obj, *args, **kwargs)
-
- return wrapped
-
-
-def check_policy(context, action, target_obj=None):
- target = {
- 'project_id': context.project_id,
- 'user_id': context.user_id,
- }
- target.update(target_obj or {})
- _action = 'volume:%s' % action
- nova.policy.enforce(context, _action, target)
-
-
-class API(base.Base):
- """API for interacting with the volume manager."""
-
- def __init__(self, image_service=None, **kwargs):
- self.image_service = (image_service or
- glance.get_default_image_service())
- self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
- super(API, self).__init__(**kwargs)
-
- def create(self, context, size, name, description, snapshot=None,
- image_id=None, volume_type=None, metadata=None,
- availability_zone=None):
- check_policy(context, 'create')
- if snapshot is not None:
- if snapshot['status'] != "available":
- msg = _("status must be available")
- raise exception.InvalidSnapshot(reason=msg)
- if not size:
- size = snapshot['volume_size']
-
- snapshot_id = snapshot['id']
- else:
- snapshot_id = None
-
- def as_int(s):
- try:
- return int(s)
- except ValueError:
- return s
-
- # tolerate size as stringified int
- size = as_int(size)
-
- if not isinstance(size, int) or size <= 0:
- msg = (_("Volume size '%s' must be an integer and greater than 0")
- % size)
- raise exception.InvalidInput(reason=msg)
-
- reservations = None
- try:
- reservations = QUOTAS.reserve(context, volumes=1, gigabytes=size)
- except exception.OverQuota as e:
- overs = e.kwargs['overs']
- usages = e.kwargs['usages']
- quotas = e.kwargs['quotas']
-
- def _consumed(name):
- return (usages[name]['reserved'] + usages[name]['in_use'])
-
- pid = context.project_id
- if 'gigabytes' in overs:
- consumed = _consumed('gigabytes')
- quota = quotas['gigabytes']
- LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
- "%(size)sG volume (%(consumed)dG of %(quota)dG "
- "already consumed)") % locals())
- raise exception.VolumeSizeTooLarge()
- elif 'volumes' in overs:
- consumed = _consumed('volumes')
- LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
- "volume (%(consumed)d volumes already consumed)")
- % locals())
- raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
-
- if image_id:
- # check image existence
- image_meta = self.image_service.show(context, image_id)
- image_size_in_gb = (int(image_meta['size']) + GB - 1) / GB
- #check image size is not larger than volume size.
- if image_size_in_gb > size:
- msg = _('Size of specified image is larger than volume size.')
- raise exception.InvalidInput(reason=msg)
-
- if availability_zone is None:
- availability_zone = FLAGS.storage_availability_zone
-
- if volume_type is None:
- volume_type_id = None
- else:
- volume_type_id = volume_type.get('id', None)
-
- options = {
- 'size': size,
- 'user_id': context.user_id,
- 'project_id': context.project_id,
- 'snapshot_id': snapshot_id,
- 'availability_zone': availability_zone,
- 'status': "creating",
- 'attach_status': "detached",
- 'display_name': name,
- 'display_description': description,
- 'volume_type_id': volume_type_id,
- 'metadata': metadata,
- }
- volume = self.db.volume_create(context, options)
-
- if reservations:
- QUOTAS.commit(context, reservations)
-
- self._cast_create_volume(context, volume['id'],
- snapshot_id, image_id)
- return volume
-
- def _cast_create_volume(self, context, volume_id,
- snapshot_id, image_id):
-
- # NOTE(Rongze Zhu): It is a simple solution for bug 1008866
- # If snapshot_id is set, make the call create volume directly to
- # the volume host where the snapshot resides instead of passing it
- # through the scheduer. So snapshot can be copy to new volume.
-
- if snapshot_id and FLAGS.snapshot_same_host:
- snapshot_ref = self.db.snapshot_get(context, snapshot_id)
- src_volume_ref = self.db.volume_get(context,
- snapshot_ref['volume_id'])
- topic = rpc.queue_get_for(context,
- FLAGS.volume_topic,
- src_volume_ref['host'])
- rpc.cast(context,
- topic,
- {"method": "create_volume",
- "args": {"volume_id": volume_id,
- "snapshot_id": snapshot_id,
- "image_id": image_id}})
-
- else:
- self.scheduler_rpcapi.create_volume(
- context, volume_id, snapshot_id, image_id)
-
- @wrap_check_policy
- def delete(self, context, volume, force=False):
- volume_id = volume['id']
- if not volume['host']:
- # NOTE(vish): scheduling failed, so delete it
- # Note(zhiteng): update volume quota reservation
- try:
- reservations = QUOTAS.reserve(context, volumes=-1,
- gigabytes=-volume['size'])
- except Exception:
- reservations = None
- LOG.exception(_("Failed to update quota for deleting volume."))
-
- self.db.volume_destroy(context, volume_id)
-
- if reservations:
- QUOTAS.commit(context, reservations)
- return
- if not force and volume['status'] not in ["available", "error"]:
- msg = _("Volume status must be available or error")
- raise exception.InvalidVolume(reason=msg)
-
- snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
- if len(snapshots):
- msg = _("Volume still has %d dependent snapshots") % len(snapshots)
- raise exception.InvalidVolume(reason=msg)
-
- now = timeutils.utcnow()
- self.db.volume_update(context, volume_id, {'status': 'deleting',
- 'terminated_at': now})
- host = volume['host']
- rpc.cast(context,
- rpc.queue_get_for(context, FLAGS.volume_topic, host),
- {"method": "delete_volume",
- "args": {"volume_id": volume_id}})
-
- @wrap_check_policy
- def update(self, context, volume, fields):
- self.db.volume_update(context, volume['id'], fields)
-
- def get(self, context, volume_id):
- rv = self.db.volume_get(context, volume_id)
- volume = dict(rv.iteritems())
- check_policy(context, 'get', volume)
- return volume
-
- def get_all(self, context, search_opts=None):
- check_policy(context, 'get_all')
-
- if search_opts is None:
- search_opts = {}
-
- if (context.is_admin and 'all_tenants' in search_opts):
- # Need to remove all_tenants to pass the filtering below.
- del search_opts['all_tenants']
- volumes = self.db.volume_get_all(context)
- else:
- volumes = self.db.volume_get_all_by_project(context,
- context.project_id)
- if search_opts:
- LOG.debug(_("Searching by: %s") % str(search_opts))
-
- def _check_metadata_match(volume, searchdict):
- volume_metadata = {}
- for i in volume.get('volume_metadata'):
- volume_metadata[i['key']] = i['value']
-
- for k, v in searchdict.iteritems():
- if (k not in volume_metadata.keys() or
- volume_metadata[k] != v):
- return False
- return True
-
- # search_option to filter_name mapping.
- filter_mapping = {'metadata': _check_metadata_match}
-
- result = []
- for volume in volumes:
- # go over all filters in the list
- for opt, values in search_opts.iteritems():
- try:
- filter_func = filter_mapping[opt]
- except KeyError:
- # no such filter - ignore it, go to next filter
- continue
- else:
- if filter_func(volume, values):
- result.append(volume)
- break
- volumes = result
- return volumes
-
- def get_snapshot(self, context, snapshot_id):
- check_policy(context, 'get_snapshot')
- rv = self.db.snapshot_get(context, snapshot_id)
- return dict(rv.iteritems())
-
- def get_all_snapshots(self, context, search_opts=None):
- check_policy(context, 'get_all_snapshots')
-
- search_opts = search_opts or {}
-
- if (context.is_admin and 'all_tenants' in search_opts):
- # Need to remove all_tenants to pass the filtering below.
- del search_opts['all_tenants']
- return self.db.snapshot_get_all(context)
- else:
- return self.db.snapshot_get_all_by_project(context,
- context.project_id)
-
- @wrap_check_policy
- def check_attach(self, context, volume):
- # TODO(vish): abstract status checking?
- if volume['status'] != "available":
- msg = _("status must be available")
- raise exception.InvalidVolume(reason=msg)
- if volume['attach_status'] == "attached":
- msg = _("already attached")
- raise exception.InvalidVolume(reason=msg)
-
- @wrap_check_policy
- def check_detach(self, context, volume):
- # TODO(vish): abstract status checking?
- if volume['status'] == "available":
- msg = _("already detached")
- raise exception.InvalidVolume(reason=msg)
-
- @wrap_check_policy
- def reserve_volume(self, context, volume):
- self.db.volume_update(context, volume['id'], {"status": "attaching"})
-
- @wrap_check_policy
- def unreserve_volume(self, context, volume):
- if volume['status'] == "attaching":
- self.db.volume_update(context,
- volume['id'],
- {"status": "available"})
-
- @wrap_check_policy
- def begin_detaching(self, context, volume):
- self.db.volume_update(context, volume['id'], {"status": "detaching"})
-
- @wrap_check_policy
- def roll_detaching(self, context, volume):
- if volume['status'] == "detaching":
- self.db.volume_update(context,
- volume['id'],
- {"status": "in-use"})
-
- @wrap_check_policy
- def attach(self, context, volume, instance_uuid, mountpoint):
- host = volume['host']
- queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
- return rpc.call(context, queue,
- {"method": "attach_volume",
- "args": {"volume_id": volume['id'],
- "instance_uuid": instance_uuid,
- "mountpoint": mountpoint}})
-
- @wrap_check_policy
- def detach(self, context, volume):
- host = volume['host']
- queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
- return rpc.call(context, queue,
- {"method": "detach_volume",
- "args": {"volume_id": volume['id']}})
-
- @wrap_check_policy
- def initialize_connection(self, context, volume, connector):
- host = volume['host']
- queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
- return rpc.call(context, queue,
- {"method": "initialize_connection",
- "args": {"volume_id": volume['id'],
- "connector": connector}})
-
- @wrap_check_policy
- def terminate_connection(self, context, volume, connector):
- self.unreserve_volume(context, volume)
- host = volume['host']
- queue = rpc.queue_get_for(context, FLAGS.volume_topic, host)
- return rpc.call(context, queue,
- {"method": "terminate_connection",
- "args": {"volume_id": volume['id'],
- "connector": connector}})
-
- def _create_snapshot(self, context, volume, name, description,
- force=False):
- check_policy(context, 'create_snapshot', volume)
-
- if ((not force) and (volume['status'] != "available")):
- msg = _("must be available")
- raise exception.InvalidVolume(reason=msg)
-
- options = {
- 'volume_id': volume['id'],
- 'user_id': context.user_id,
- 'project_id': context.project_id,
- 'status': "creating",
- 'progress': '0%',
- 'volume_size': volume['size'],
- 'display_name': name,
- 'display_description': description}
-
- snapshot = self.db.snapshot_create(context, options)
- host = volume['host']
- rpc.cast(context,
- rpc.queue_get_for(context, FLAGS.volume_topic, host),
- {"method": "create_snapshot",
- "args": {"volume_id": volume['id'],
- "snapshot_id": snapshot['id']}})
- return snapshot
-
- def create_snapshot(self, context, volume, name, description):
- return self._create_snapshot(context, volume, name, description,
- False)
-
- def create_snapshot_force(self, context, volume, name, description):
- return self._create_snapshot(context, volume, name, description,
- True)
-
- @wrap_check_policy
- def delete_snapshot(self, context, snapshot):
- if snapshot['status'] not in ["available", "error"]:
- msg = _("Volume Snapshot status must be available or error")
- raise exception.InvalidVolume(reason=msg)
- self.db.snapshot_update(context, snapshot['id'],
- {'status': 'deleting'})
- volume = self.db.volume_get(context, snapshot['volume_id'])
- host = volume['host']
- rpc.cast(context,
- rpc.queue_get_for(context, FLAGS.volume_topic, host),
- {"method": "delete_snapshot",
- "args": {"snapshot_id": snapshot['id']}})
-
- @wrap_check_policy
- def get_volume_metadata(self, context, volume):
- """Get all metadata associated with a volume."""
- rv = self.db.volume_metadata_get(context, volume['id'])
- return dict(rv.iteritems())
-
- @wrap_check_policy
- def delete_volume_metadata(self, context, volume, key):
- """Delete the given metadata item from a volume."""
- self.db.volume_metadata_delete(context, volume['id'], key)
-
- @wrap_check_policy
- def update_volume_metadata(self, context, volume, metadata, delete=False):
- """Updates or creates volume metadata.
-
- If delete is True, metadata items that are not specified in the
- `metadata` argument will be deleted.
-
- """
- if delete:
- _metadata = metadata
- else:
- _metadata = self.get_volume_metadata(context, volume['id'])
- _metadata.update(metadata)
-
- self.db.volume_metadata_update(context, volume['id'], _metadata, True)
- return _metadata
-
- def get_volume_metadata_value(self, volume, key):
- """Get value of particular metadata key."""
- metadata = volume.get('volume_metadata')
- if metadata:
- for i in volume['volume_metadata']:
- if i['key'] == key:
- return i['value']
- return None
-
- def _check_volume_availability(self, context, volume, force):
- """Check if the volume can be used."""
- if volume['status'] not in ['available', 'in-use']:
- msg = _('Volume status must be available/in-use.')
- raise exception.InvalidVolume(reason=msg)
- if not force and 'in-use' == volume['status']:
- msg = _('Volume status is in-use.')
- raise exception.InvalidVolume(reason=msg)
-
- @wrap_check_policy
- def copy_volume_to_image(self, context, volume, metadata, force):
- """Create a new image from the specified volume."""
- self._check_volume_availability(context, volume, force)
-
- recv_metadata = self.image_service.create(context, metadata)
- self.update(context, volume, {'status': 'uploading'})
- rpc.cast(context,
- rpc.queue_get_for(context,
- FLAGS.volume_topic,
- volume['host']),
- {"method": "copy_volume_to_image",
- "args": {"volume_id": volume['id'],
- "image_id": recv_metadata['id']}})
-
- response = {"id": volume['id'],
- "updated_at": volume['updated_at'],
- "status": 'uploading',
- "display_description": volume['display_description'],
- "size": volume['size'],
- "volume_type": volume['volume_type'],
- "image_id": recv_metadata['id'],
- "container_format": recv_metadata['container_format'],
- "disk_format": recv_metadata['disk_format'],
- "image_name": recv_metadata.get('name', None)
- }
- return response
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
deleted file mode 100644
index fafdcd5be5..0000000000
--- a/nova/volume/manager.py
+++ /dev/null
@@ -1,452 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Volume manager manages creating, attaching, detaching, and persistent storage.
-
-Persistent storage volumes keep their state independent of instances. You can
-attach to an instance, terminate the instance, spawn a new instance (even
-one from a different image) and re-attach the volume with the same data
-intact.
-
-**Related Flags**
-
-:volume_topic: What :mod:`rpc` topic to listen to (default: `volume`).
-:volume_manager: The module name of a class derived from
- :class:`manager.Manager` (default:
- :class:`nova.volume.manager.Manager`).
-:storage_availability_zone: Defaults to `nova`.
-:volume_driver: Used by :class:`Manager`. Defaults to
- :class:`nova.volume.driver.ISCSIDriver`.
-:volume_group: Name of the group that will contain exported volumes (default:
- `nova-volumes`)
-:num_shell_tries: Number of times to attempt to run commands (default: 3)
-
-"""
-
-from nova import context
-from nova import exception
-from nova import flags
-from nova.image import glance
-from nova import manager
-from nova.openstack.common import cfg
-from nova.openstack.common import excutils
-from nova.openstack.common import importutils
-from nova.openstack.common import log as logging
-from nova.openstack.common import timeutils
-from nova import quota
-from nova import utils
-from nova.volume import utils as volume_utils
-
-
-LOG = logging.getLogger(__name__)
-
-QUOTAS = quota.QUOTAS
-
-volume_manager_opts = [
- cfg.StrOpt('storage_availability_zone',
- default='nova',
- help='availability zone of this service'),
- cfg.StrOpt('volume_driver',
- default='nova.volume.driver.ISCSIDriver',
- help='Driver to use for volume creation'),
- cfg.BoolOpt('use_local_volumes',
- default=True,
- help='if True, will not discover local volumes'),
- cfg.BoolOpt('volume_force_update_capabilities',
- default=False,
- help='if True will force update capabilities on each check'),
- ]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_manager_opts)
-
-
-class VolumeManager(manager.SchedulerDependentManager):
- """Manages attachable block storage devices."""
- def __init__(self, volume_driver=None, *args, **kwargs):
- """Load the driver from the one specified in args, or from flags."""
- if not volume_driver:
- volume_driver = FLAGS.volume_driver
- self.driver = importutils.import_object(volume_driver)
- super(VolumeManager, self).__init__(service_name='volume',
- *args, **kwargs)
- # NOTE(vish): Implementation specific db handling is done
- # by the driver.
- self.driver.db = self.db
- self._last_volume_stats = []
-
- def init_host(self):
- """Do any initialization that needs to be run if this is a
- standalone service."""
-
- ctxt = context.get_admin_context()
- self.driver.do_setup(ctxt)
- self.driver.check_for_setup_error()
-
- volumes = self.db.volume_get_all_by_host(ctxt, self.host)
- LOG.debug(_("Re-exporting %s volumes"), len(volumes))
- for volume in volumes:
- if volume['status'] in ['available', 'in-use']:
- self.driver.ensure_export(ctxt, volume)
- else:
- LOG.info(_("volume %s: skipping export"), volume['name'])
-
- LOG.debug(_('Resuming any in progress delete operations'))
- for volume in volumes:
- if volume['status'] == 'deleting':
- LOG.info(_("Resuming delete on volume: %s"), volume['id'])
- self.delete_volume(ctxt, volume['id'])
-
- def create_volume(self, context, volume_id, snapshot_id=None,
- image_id=None, reservations=None):
- """Creates and exports the volume."""
- context = context.elevated()
- volume_ref = self.db.volume_get(context, volume_id)
- self._notify_about_volume_usage(context, volume_ref, "create.start")
- LOG.info(_("volume %s: creating"), volume_ref['name'])
-
- self.db.volume_update(context,
- volume_id,
- {'host': self.host})
- # NOTE(vish): so we don't have to get volume from db again
- # before passing it to the driver.
- volume_ref['host'] = self.host
-
- status = 'available'
- model_update = False
-
- try:
- vol_name = volume_ref['name']
- vol_size = volume_ref['size']
- LOG.debug(_("volume %(vol_name)s: creating lv of"
- " size %(vol_size)sG") % locals())
- if snapshot_id is None and image_id is None:
- model_update = self.driver.create_volume(volume_ref)
- elif snapshot_id is not None:
- snapshot_ref = self.db.snapshot_get(context, snapshot_id)
- model_update = self.driver.create_volume_from_snapshot(
- volume_ref,
- snapshot_ref)
- else:
- # create the volume from an image
- image_service, image_id = \
- glance.get_remote_image_service(context,
- image_id)
- image_location = image_service.get_location(context, image_id)
- cloned = self.driver.clone_image(volume_ref, image_location)
- if not cloned:
- model_update = self.driver.create_volume(volume_ref)
- status = 'downloading'
-
- if model_update:
- self.db.volume_update(context, volume_ref['id'], model_update)
-
- LOG.debug(_("volume %s: creating export"), volume_ref['name'])
- model_update = self.driver.create_export(context, volume_ref)
- if model_update:
- self.db.volume_update(context, volume_ref['id'], model_update)
- except Exception:
- with excutils.save_and_reraise_exception():
- self.db.volume_update(context,
- volume_ref['id'], {'status': 'error'})
-
- now = timeutils.utcnow()
- volume_ref = self.db.volume_update(context,
- volume_ref['id'], {'status': status,
- 'launched_at': now})
- LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
- self._reset_stats()
- self._notify_about_volume_usage(context, volume_ref, "create.end")
-
- if image_id and not cloned:
- #copy the image onto the volume.
- self._copy_image_to_volume(context, volume_ref, image_id)
- return volume_id
-
- def delete_volume(self, context, volume_id):
- """Deletes and unexports volume."""
- context = context.elevated()
- volume_ref = self.db.volume_get(context, volume_id)
- if volume_ref['attach_status'] == "attached":
- # Volume is still attached, need to detach first
- raise exception.VolumeAttached(volume_id=volume_id)
- if volume_ref['host'] != self.host:
- raise exception.InvalidVolume(
- reason=_("Volume is not local to this node"))
-
- self._notify_about_volume_usage(context, volume_ref, "delete.start")
- self._reset_stats()
- try:
- LOG.debug(_("volume %s: removing export"), volume_ref['name'])
- self.driver.remove_export(context, volume_ref)
- LOG.debug(_("volume %s: deleting"), volume_ref['name'])
- self.driver.delete_volume(volume_ref)
- except exception.VolumeIsBusy:
- LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
- self.driver.ensure_export(context, volume_ref)
- self.db.volume_update(context, volume_ref['id'],
- {'status': 'available'})
- return True
- except Exception:
- with excutils.save_and_reraise_exception():
- self.db.volume_update(context,
- volume_ref['id'],
- {'status': 'error_deleting'})
-
- # Get reservations
- try:
- reservations = QUOTAS.reserve(context, volumes=-1,
- gigabytes=-volume_ref['size'])
- except Exception:
- reservations = None
- LOG.exception(_("Failed to update usages deleting volume"))
-
- volume_ref = self.db.volume_destroy(context, volume_id)
- LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
- self._notify_about_volume_usage(context, volume_ref, "delete.end")
-
- # Commit the reservations
- if reservations:
- QUOTAS.commit(context, reservations)
-
- return True
-
- def create_snapshot(self, context, volume_id, snapshot_id):
- """Creates and exports the snapshot."""
- context = context.elevated()
- snapshot_ref = self.db.snapshot_get(context, snapshot_id)
- LOG.info(_("snapshot %s: creating"), snapshot_ref['name'])
-
- try:
- snap_name = snapshot_ref['name']
- LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
- model_update = self.driver.create_snapshot(snapshot_ref)
- if model_update:
- self.db.snapshot_update(context, snapshot_ref['id'],
- model_update)
-
- except Exception:
- with excutils.save_and_reraise_exception():
- self.db.snapshot_update(context,
- snapshot_ref['id'],
- {'status': 'error'})
-
- self.db.snapshot_update(context,
- snapshot_ref['id'], {'status': 'available',
- 'progress': '100%'})
- LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
- return snapshot_id
-
- def delete_snapshot(self, context, snapshot_id):
- """Deletes and unexports snapshot."""
- context = context.elevated()
- snapshot_ref = self.db.snapshot_get(context, snapshot_id)
-
- try:
- LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
- self.driver.delete_snapshot(snapshot_ref)
- except exception.SnapshotIsBusy:
- LOG.debug(_("snapshot %s: snapshot is busy"), snapshot_ref['name'])
- self.db.snapshot_update(context,
- snapshot_ref['id'],
- {'status': 'available'})
- return True
- except Exception:
- with excutils.save_and_reraise_exception():
- self.db.snapshot_update(context,
- snapshot_ref['id'],
- {'status': 'error_deleting'})
-
- self.db.snapshot_destroy(context, snapshot_id)
- LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
- return True
-
- def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
- """Updates db to show volume is attached"""
- # TODO(vish): refactor this into a more general "reserve"
- if not utils.is_uuid_like(instance_uuid):
- raise exception.InvalidUUID(instance_uuid)
-
- try:
- self.driver.attach_volume(context,
- volume_id,
- instance_uuid,
- mountpoint)
- except Exception:
- with excutils.save_and_reraise_exception():
- self.db.volume_update(context,
- volume_id,
- {'status': 'error_attaching'})
-
- self.db.volume_attached(context,
- volume_id,
- instance_uuid,
- mountpoint)
-
- def detach_volume(self, context, volume_id):
- """Updates db to show volume is detached"""
- # TODO(vish): refactor this into a more general "unreserve"
- try:
- self.driver.detach_volume(context, volume_id)
- except Exception:
- with excutils.save_and_reraise_exception():
- self.db.volume_update(context,
- volume_id,
- {'status': 'error_detaching'})
-
- self.db.volume_detached(context, volume_id)
-
- def _copy_image_to_volume(self, context, volume, image_id):
- """Downloads Glance image to the specified volume. """
- volume_id = volume['id']
- payload = {'volume_id': volume_id, 'image_id': image_id}
- try:
- image_service, image_id = glance.get_remote_image_service(context,
- image_id)
- self.driver.copy_image_to_volume(context, volume, image_service,
- image_id)
- LOG.debug(_("Downloaded image %(image_id)s to %(volume_id)s "
- "successfully") % locals())
- self.db.volume_update(context, volume_id,
- {'status': 'available'})
- except Exception, error:
- with excutils.save_and_reraise_exception():
- payload['message'] = unicode(error)
- self.db.volume_update(context, volume_id, {'status': 'error'})
-
- def copy_volume_to_image(self, context, volume_id, image_id):
- """Uploads the specified volume to Glance."""
- payload = {'volume_id': volume_id, 'image_id': image_id}
- try:
- volume = self.db.volume_get(context, volume_id)
- self.driver.ensure_export(context.elevated(), volume)
- image_service, image_id = glance.get_remote_image_service(context,
- image_id)
- self.driver.copy_volume_to_image(context, volume, image_service,
- image_id)
- LOG.debug(_("Uploaded volume %(volume_id)s to "
- "image (%(image_id)s) successfully") % locals())
- except Exception, error:
- with excutils.save_and_reraise_exception():
- payload['message'] = unicode(error)
- finally:
- if volume['instance_uuid'] is None:
- self.db.volume_update(context, volume_id,
- {'status': 'available'})
- else:
- self.db.volume_update(context, volume_id,
- {'status': 'in-use'})
-
- def initialize_connection(self, context, volume_id, connector):
- """Prepare volume for connection from host represented by connector.
-
- This method calls the driver initialize_connection and returns
- it to the caller. The connector parameter is a dictionary with
- information about the host that will connect to the volume in the
- following format::
-
- {
- 'ip': ip,
- 'initiator': initiator,
- }
-
- ip: the ip address of the connecting machine
-
- initiator: the iscsi initiator name of the connecting machine.
- This can be None if the connecting machine does not support iscsi
- connections.
-
- driver is responsible for doing any necessary security setup and
- returning a connection_info dictionary in the following format::
-
- {
- 'driver_volume_type': driver_volume_type,
- 'data': data,
- }
-
- driver_volume_type: a string to identify the type of volume. This
- can be used by the calling code to determine the
- strategy for connecting to the volume. This could
- be 'iscsi', 'rbd', 'sheepdog', etc.
-
- data: this is the data that the calling code will use to connect
- to the volume. Keep in mind that this will be serialized to
- json in various places, so it should not contain any non-json
- data types.
- """
- volume_ref = self.db.volume_get(context, volume_id)
- return self.driver.initialize_connection(volume_ref, connector)
-
- def terminate_connection(self, context, volume_id, connector):
- """Cleanup connection from host represented by connector.
-
- The format of connector is the same as for initialize_connection.
- """
- volume_ref = self.db.volume_get(context, volume_id)
- self.driver.terminate_connection(volume_ref, connector)
-
- def check_for_export(self, context, instance_id):
- """Make sure whether volume is exported."""
- instance_ref = self.db.instance_get(context, instance_id)
- volumes = self.db.volume_get_all_by_instance_uuid(context,
- instance_ref['uuid'])
-
- for volume in volumes:
- self.driver.check_for_export(context, volume['id'])
-
- def _volume_stats_changed(self, stat1, stat2):
- if FLAGS.volume_force_update_capabilities:
- return True
- if len(stat1) != len(stat2):
- return True
- for (k, v) in stat1.iteritems():
- if (k, v) not in stat2.iteritems():
- return True
- return False
-
- @manager.periodic_task
- def _report_driver_status(self, context):
- volume_stats = self.driver.get_volume_stats(refresh=True)
- if volume_stats:
- LOG.info(_("Checking volume capabilities"))
-
- if self._volume_stats_changed(self._last_volume_stats,
- volume_stats):
- LOG.info(_("New capabilities found: %s"), volume_stats)
- self._last_volume_stats = volume_stats
-
- # This will grab info about the host and queue it
- # to be sent to the Schedulers.
- self.update_service_capabilities(self._last_volume_stats)
- else:
- # avoid repeating fanouts
- self.update_service_capabilities(None)
-
- def _reset_stats(self):
- LOG.info(_("Clear capabilities"))
- self._last_volume_stats = []
-
- def notification(self, context, event):
- LOG.info(_("Notification {%s} received"), event)
- self._reset_stats()
-
- def _notify_about_volume_usage(self, context, volume, event_suffix,
- extra_usage_info=None):
- volume_utils.notify_about_volume_usage(
- context, volume, event_suffix,
- extra_usage_info=extra_usage_info, host=self.host)
diff --git a/nova/volume/netapp.py b/nova/volume/netapp.py
deleted file mode 100644
index b1b3915ab6..0000000000
--- a/nova/volume/netapp.py
+++ /dev/null
@@ -1,1291 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 NetApp, Inc.
-# Copyright (c) 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Volume driver for NetApp storage systems.
-
-This driver requires NetApp OnCommand 5.0 and one or more Data
-ONTAP 7-mode storage systems with installed iSCSI licenses.
-
-"""
-
-import time
-
-import suds
-from suds import client
-from suds.sax import text
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova.volume import driver
-from nova.volume import volume_types
-
-LOG = logging.getLogger(__name__)
-
-netapp_opts = [
- cfg.StrOpt('netapp_wsdl_url',
- default=None,
- help='URL of the WSDL file for the DFM server'),
- cfg.StrOpt('netapp_login',
- default=None,
- help='User name for the DFM server'),
- cfg.StrOpt('netapp_password',
- default=None,
- help='Password for the DFM server'),
- cfg.StrOpt('netapp_server_hostname',
- default=None,
- help='Hostname for the DFM server'),
- cfg.IntOpt('netapp_server_port',
- default=8088,
- help='Port number for the DFM server'),
- cfg.StrOpt('netapp_storage_service',
- default=None,
- help=('Storage service to use for provisioning '
- '(when volume_type=None)')),
- cfg.StrOpt('netapp_storage_service_prefix',
- default=None,
- help=('Prefix of storage service name to use for '
- 'provisioning (volume_type name will be appended)')),
- cfg.StrOpt('netapp_vfiler',
- default=None,
- help='Vfiler to use for provisioning'),
- ]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(netapp_opts)
-
-
-class DfmDataset(object):
- def __init__(self, id, name, project, type):
- self.id = id
- self.name = name
- self.project = project
- self.type = type
-
-
-class DfmLun(object):
- def __init__(self, dataset, lunpath, id):
- self.dataset = dataset
- self.lunpath = lunpath
- self.id = id
-
-
-class NetAppISCSIDriver(driver.ISCSIDriver):
- """NetApp iSCSI volume driver."""
-
- IGROUP_PREFIX = 'openstack-'
- DATASET_PREFIX = 'OpenStack_'
- DATASET_METADATA_PROJECT_KEY = 'OpenStackProject'
- DATASET_METADATA_VOL_TYPE_KEY = 'OpenStackVolType'
-
- def __init__(self, *args, **kwargs):
- super(NetAppISCSIDriver, self).__init__(*args, **kwargs)
- self.discovered_luns = []
- self.discovered_datasets = []
- self.lun_table = {}
-
- def _check_fail(self, request, response):
- """Utility routine to handle checking ZAPI failures."""
- if 'failed' == response.Status:
- name = request.Name
- reason = response.Reason
- msg = _('API %(name)s failed: %(reason)s')
- raise exception.VolumeBackendAPIException(data=msg % locals())
-
- def _create_client(self, **kwargs):
- """Instantiate a web services client.
-
- This method creates a "suds" client to make web services calls to the
- DFM server. Note that the WSDL file is quite large and may take
- a few seconds to parse.
- """
- wsdl_url = kwargs['wsdl_url']
- LOG.debug(_('Using WSDL: %s') % wsdl_url)
- if kwargs['cache']:
- self.client = client.Client(wsdl_url, username=kwargs['login'],
- password=kwargs['password'])
- else:
- self.client = client.Client(wsdl_url, username=kwargs['login'],
- password=kwargs['password'],
- cache=None)
- soap_url = 'http://%s:%s/apis/soap/v1' % (kwargs['hostname'],
- kwargs['port'])
- LOG.debug(_('Using DFM server: %s') % soap_url)
- self.client.set_options(location=soap_url)
-
- def _set_storage_service(self, storage_service):
- """Set the storage service to use for provisioning."""
- LOG.debug(_('Using storage service: %s') % storage_service)
- self.storage_service = storage_service
-
- def _set_storage_service_prefix(self, storage_service_prefix):
- """Set the storage service prefix to use for provisioning."""
- LOG.debug(_('Using storage service prefix: %s') %
- storage_service_prefix)
- self.storage_service_prefix = storage_service_prefix
-
- def _set_vfiler(self, vfiler):
- """Set the vfiler to use for provisioning."""
- LOG.debug(_('Using vfiler: %s') % vfiler)
- self.vfiler = vfiler
-
- def _check_flags(self):
- """Ensure that the flags we care about are set."""
- required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
- 'netapp_server_hostname', 'netapp_server_port']
- for flag in required_flags:
- if not getattr(FLAGS, flag, None):
- raise exception.InvalidInput(reason=_('%s is not set') % flag)
- if not (FLAGS.netapp_storage_service or
- FLAGS.netapp_storage_service_prefix):
- raise exception.InvalidInput(reason=_('Either '
- 'netapp_storage_service or netapp_storage_service_prefix must '
- 'be set'))
-
- def do_setup(self, context):
- """Setup the NetApp Volume driver.
-
- Called one time by the manager after the driver is loaded.
- Validate the flags we care about and setup the suds (web services)
- client.
- """
- self._check_flags()
- self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
- login=FLAGS.netapp_login, password=FLAGS.netapp_password,
- hostname=FLAGS.netapp_server_hostname,
- port=FLAGS.netapp_server_port, cache=True)
- self._set_storage_service(FLAGS.netapp_storage_service)
- self._set_storage_service_prefix(FLAGS.netapp_storage_service_prefix)
- self._set_vfiler(FLAGS.netapp_vfiler)
-
- def check_for_setup_error(self):
- """Check that the driver is working and can communicate.
-
- Invoke a web services API to make sure we can talk to the server.
- Also perform the discovery of datasets and LUNs from DFM.
- """
- self.client.service.DfmAbout()
- LOG.debug(_("Connected to DFM server"))
- self._discover_luns()
-
- def _get_datasets(self):
- """Get the list of datasets from DFM."""
- server = self.client.service
- res = server.DatasetListInfoIterStart(IncludeMetadata=True)
- tag = res.Tag
- datasets = []
- try:
- while True:
- res = server.DatasetListInfoIterNext(Tag=tag, Maximum=100)
- if not res.Datasets:
- break
- datasets.extend(res.Datasets.DatasetInfo)
- finally:
- server.DatasetListInfoIterEnd(Tag=tag)
- return datasets
-
- def _discover_dataset_luns(self, dataset, volume):
- """Discover all of the LUNs in a dataset."""
- server = self.client.service
- res = server.DatasetMemberListInfoIterStart(
- DatasetNameOrId=dataset.id,
- IncludeExportsInfo=True,
- IncludeIndirect=True,
- MemberType='lun_path')
- tag = res.Tag
- suffix = None
- if volume:
- suffix = '/' + volume
- try:
- while True:
- res = server.DatasetMemberListInfoIterNext(Tag=tag,
- Maximum=100)
- if (not hasattr(res, 'DatasetMembers') or
- not res.DatasetMembers):
- break
- for member in res.DatasetMembers.DatasetMemberInfo:
- if suffix and not member.MemberName.endswith(suffix):
- continue
- # MemberName is the full LUN path in this format:
- # host:/volume/qtree/lun
- lun = DfmLun(dataset, member.MemberName, member.MemberId)
- self.discovered_luns.append(lun)
- finally:
- server.DatasetMemberListInfoIterEnd(Tag=tag)
-
- def _discover_luns(self):
- """Discover the LUNs from DFM.
-
- Discover all of the OpenStack-created datasets and LUNs in the DFM
- database.
- """
- datasets = self._get_datasets()
- self.discovered_datasets = []
- self.discovered_luns = []
- for dataset in datasets:
- if not dataset.DatasetName.startswith(self.DATASET_PREFIX):
- continue
- if (not hasattr(dataset, 'DatasetMetadata') or
- not dataset.DatasetMetadata):
- continue
- project = None
- type = None
- for field in dataset.DatasetMetadata.DfmMetadataField:
- if field.FieldName == self.DATASET_METADATA_PROJECT_KEY:
- project = field.FieldValue
- elif field.FieldName == self.DATASET_METADATA_VOL_TYPE_KEY:
- type = field.FieldValue
- if not project:
- continue
- ds = DfmDataset(dataset.DatasetId, dataset.DatasetName,
- project, type)
- self.discovered_datasets.append(ds)
- self._discover_dataset_luns(ds, None)
- dataset_count = len(self.discovered_datasets)
- lun_count = len(self.discovered_luns)
- msg = _("Discovered %(dataset_count)s datasets and %(lun_count)s LUNs")
- LOG.debug(msg % locals())
- self.lun_table = {}
-
- def _get_job_progress(self, job_id):
- """Get progress of one running DFM job.
-
- Obtain the latest progress report for the job and return the
- list of progress events.
- """
- server = self.client.service
- res = server.DpJobProgressEventListIterStart(JobId=job_id)
- tag = res.Tag
- event_list = []
- try:
- while True:
- res = server.DpJobProgressEventListIterNext(Tag=tag,
- Maximum=100)
- if not hasattr(res, 'ProgressEvents'):
- break
- event_list += res.ProgressEvents.DpJobProgressEventInfo
- finally:
- server.DpJobProgressEventListIterEnd(Tag=tag)
- return event_list
-
- def _wait_for_job(self, job_id):
- """Wait until a job terminates.
-
- Poll the job until it completes or an error is detected. Return the
- final list of progress events if it completes successfully.
- """
- while True:
- events = self._get_job_progress(job_id)
- for event in events:
- if event.EventStatus == 'error':
- msg = _('Job failed: %s') % (event.ErrorMessage)
- raise exception.VolumeBackendAPIException(data=msg)
- if event.EventType == 'job-end':
- return events
- time.sleep(5)
-
- def _dataset_name(self, project, ss_type):
- """Return the dataset name for a given project and volume type."""
- _project = project.replace(' ', '_').replace('-', '_')
- dataset_name = self.DATASET_PREFIX + _project
- if not ss_type:
- return dataset_name
- _type = ss_type.replace(' ', '_').replace('-', '_')
- return dataset_name + '_' + _type
-
- def _get_dataset(self, dataset_name):
- """Lookup a dataset by name in the list of discovered datasets."""
- for dataset in self.discovered_datasets:
- if dataset.name == dataset_name:
- return dataset
- return None
-
- def _create_dataset(self, dataset_name, project, ss_type):
- """Create a new dataset using the storage service.
-
- The export settings are set to create iSCSI LUNs aligned for Linux.
- Returns the ID of the new dataset.
- """
- if ss_type and not self.storage_service_prefix:
- msg = _('Attempt to use volume_type without specifying '
- 'netapp_storage_service_prefix flag.')
- raise exception.VolumeBackendAPIException(data=msg)
- if not (ss_type or self.storage_service):
- msg = _('You must set the netapp_storage_service flag in order to '
- 'create volumes with no volume_type.')
- raise exception.VolumeBackendAPIException(data=msg)
- storage_service = self.storage_service
- if ss_type:
- storage_service = self.storage_service_prefix + ss_type
-
- factory = self.client.factory
-
- lunmap = factory.create('DatasetLunMappingInfo')
- lunmap.IgroupOsType = 'linux'
- export = factory.create('DatasetExportInfo')
- export.DatasetExportProtocol = 'iscsi'
- export.DatasetLunMappingInfo = lunmap
- detail = factory.create('StorageSetInfo')
- detail.DpNodeName = 'Primary data'
- detail.DatasetExportInfo = export
- if hasattr(self, 'vfiler') and self.vfiler:
- detail.ServerNameOrId = self.vfiler
- details = factory.create('ArrayOfStorageSetInfo')
- details.StorageSetInfo = [detail]
- field1 = factory.create('DfmMetadataField')
- field1.FieldName = self.DATASET_METADATA_PROJECT_KEY
- field1.FieldValue = project
- field2 = factory.create('DfmMetadataField')
- field2.FieldName = self.DATASET_METADATA_VOL_TYPE_KEY
- field2.FieldValue = ss_type
- metadata = factory.create('ArrayOfDfmMetadataField')
- metadata.DfmMetadataField = [field1, field2]
-
- res = self.client.service.StorageServiceDatasetProvision(
- StorageServiceNameOrId=storage_service,
- DatasetName=dataset_name,
- AssumeConfirmation=True,
- StorageSetDetails=details,
- DatasetMetadata=metadata)
-
- ds = DfmDataset(res.DatasetId, dataset_name, project, ss_type)
- self.discovered_datasets.append(ds)
- return ds
-
- def _provision(self, name, description, project, ss_type, size):
- """Provision a LUN through provisioning manager.
-
- The LUN will be created inside a dataset associated with the project.
- If the dataset doesn't already exist, we create it using the storage
- service specified in the nova conf.
- """
- dataset_name = self._dataset_name(project, ss_type)
- dataset = self._get_dataset(dataset_name)
- if not dataset:
- dataset = self._create_dataset(dataset_name, project, ss_type)
-
- info = self.client.factory.create('ProvisionMemberRequestInfo')
- info.Name = name
- if description:
- info.Description = description
- info.Size = size
- info.MaximumSnapshotSpace = 2 * long(size)
-
- server = self.client.service
- lock_id = server.DatasetEditBegin(DatasetNameOrId=dataset.id)
- try:
- server.DatasetProvisionMember(EditLockId=lock_id,
- ProvisionMemberRequestInfo=info)
- res = server.DatasetEditCommit(EditLockId=lock_id,
- AssumeConfirmation=True)
- except (suds.WebFault, Exception):
- server.DatasetEditRollback(EditLockId=lock_id)
- msg = _('Failed to provision dataset member')
- raise exception.VolumeBackendAPIException(data=msg)
-
- lun_id = None
- lunpath = None
-
- for info in res.JobIds.JobInfo:
- events = self._wait_for_job(info.JobId)
- for event in events:
- if event.EventType != 'lun-create':
- continue
- lunpath = event.ProgressLunInfo.LunName
- lun_id = event.ProgressLunInfo.LunPathId
-
- if not lun_id:
- msg = _('No LUN was created by the provision job')
- raise exception.VolumeBackendAPIException(data=msg)
-
- lun = DfmLun(dataset, lunpath, lun_id)
- self.discovered_luns.append(lun)
- self.lun_table[name] = lun
-
- def _get_ss_type(self, volume):
- """Get the storage service type for a volume."""
- id = volume['volume_type_id']
- if not id:
- return None
- volume_type = volume_types.get_volume_type(None, id)
- if not volume_type:
- return None
- return volume_type['name']
-
- def _remove_destroy(self, name, project):
- """Remove the LUN from the dataset, also destroying it.
-
- Remove the LUN from the dataset and destroy the actual LUN on the
- storage system.
- """
- lun = self._lookup_lun_for_volume(name, project)
- member = self.client.factory.create('DatasetMemberParameter')
- member.ObjectNameOrId = lun.id
- members = self.client.factory.create('ArrayOfDatasetMemberParameter')
- members.DatasetMemberParameter = [member]
-
- server = self.client.service
- lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id)
- try:
- server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True,
- DatasetMemberParameters=members)
- server.DatasetEditCommit(EditLockId=lock_id,
- AssumeConfirmation=True)
- except (suds.WebFault, Exception):
- server.DatasetEditRollback(EditLockId=lock_id)
- msg = _('Failed to remove and delete dataset member')
- raise exception.VolumeBackendAPIException(data=msg)
-
- def create_volume(self, volume):
- """Driver entry point for creating a new volume."""
- default_size = '104857600' # 100 MB
- gigabytes = 1073741824L # 2^30
- name = volume['name']
- project = volume['project_id']
- display_name = volume['display_name']
- display_description = volume['display_description']
- description = None
- if display_name:
- if display_description:
- description = display_name + "\n" + display_description
- else:
- description = display_name
- elif display_description:
- description = display_description
- if int(volume['size']) == 0:
- size = default_size
- else:
- size = str(int(volume['size']) * gigabytes)
- ss_type = self._get_ss_type(volume)
- self._provision(name, description, project, ss_type, size)
-
- def _lookup_lun_for_volume(self, name, project):
- """Lookup the LUN that corresponds to the give volume.
-
- Initial lookups involve a table scan of all of the discovered LUNs,
- but later lookups are done instantly from the hashtable.
- """
- if name in self.lun_table:
- return self.lun_table[name]
- lunpath_suffix = '/' + name
- for lun in self.discovered_luns:
- if lun.dataset.project != project:
- continue
- if lun.lunpath.endswith(lunpath_suffix):
- self.lun_table[name] = lun
- return lun
- msg = _("No entry in LUN table for volume %s") % (name)
- raise exception.VolumeBackendAPIException(data=msg)
-
- def delete_volume(self, volume):
- """Driver entry point for destroying existing volumes."""
- name = volume['name']
- project = volume['project_id']
- self._remove_destroy(name, project)
-
- def _get_lun_details(self, lun_id):
- """Given the ID of a LUN, get the details about that LUN."""
- server = self.client.service
- res = server.LunListInfoIterStart(ObjectNameOrId=lun_id)
- tag = res.Tag
- try:
- res = server.LunListInfoIterNext(Tag=tag, Maximum=1)
- if hasattr(res, 'Luns') and res.Luns.LunInfo:
- return res.Luns.LunInfo[0]
- finally:
- server.LunListInfoIterEnd(Tag=tag)
- msg = _('Failed to get LUN details for LUN ID %s')
- raise exception.VolumeBackendAPIException(data=msg % lun_id)
-
- def _get_host_details(self, host_id):
- """Given the ID of a host, get the details about it.
-
- A "host" is a storage system here.
- """
- server = self.client.service
- res = server.HostListInfoIterStart(ObjectNameOrId=host_id)
- tag = res.Tag
- try:
- res = server.HostListInfoIterNext(Tag=tag, Maximum=1)
- if hasattr(res, 'Hosts') and res.Hosts.HostInfo:
- return res.Hosts.HostInfo[0]
- finally:
- server.HostListInfoIterEnd(Tag=tag)
- msg = _('Failed to get host details for host ID %s')
- raise exception.VolumeBackendAPIException(data=msg % host_id)
-
- def _get_iqn_for_host(self, host_id):
- """Get the iSCSI Target Name for a storage system."""
- request = self.client.factory.create('Request')
- request.Name = 'iscsi-node-get-name'
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- return response.Results['node-name'][0]
-
- def _api_elem_is_empty(self, elem):
- """Return true if the API element should be considered empty.
-
- Helper routine to figure out if a list returned from a proxy API
- is empty. This is necessary because the API proxy produces nasty
- looking XML.
- """
- if not type(elem) is list:
- return True
- if 0 == len(elem):
- return True
- child = elem[0]
- if isinstance(child, text.Text):
- return True
- if type(child) is str:
- return True
- return False
-
- def _get_target_portal_for_host(self, host_id, host_address):
- """Get iSCSI target portal for a storage system.
-
- Get the iSCSI Target Portal details for a particular IP address
- on a storage system.
- """
- request = self.client.factory.create('Request')
- request.Name = 'iscsi-portal-list-info'
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- portal = {}
- portals = response.Results['iscsi-portal-list-entries']
- if self._api_elem_is_empty(portals):
- return portal
- portal_infos = portals[0]['iscsi-portal-list-entry-info']
- for portal_info in portal_infos:
- portal['address'] = portal_info['ip-address'][0]
- portal['port'] = portal_info['ip-port'][0]
- portal['portal'] = portal_info['tpgroup-tag'][0]
- if host_address == portal['address']:
- break
- return portal
-
- def _get_export(self, volume):
- """Get the iSCSI export details for a volume.
-
- Looks up the LUN in DFM based on the volume and project name, then get
- the LUN's ID. We store that value in the database instead of the iSCSI
- details because we will not have the true iSCSI details until masking
- time (when initialize_connection() is called).
- """
- name = volume['name']
- project = volume['project_id']
- lun = self._lookup_lun_for_volume(name, project)
- return {'provider_location': lun.id}
-
- def ensure_export(self, context, volume):
- """Driver entry point to get the export info for an existing volume."""
- return self._get_export(volume)
-
- def create_export(self, context, volume):
- """Driver entry point to get the export info for a new volume."""
- return self._get_export(volume)
-
- def remove_export(self, context, volume):
- """Driver exntry point to remove an export for a volume.
-
- Since exporting is idempotent in this driver, we have nothing
- to do for unexporting.
- """
- pass
-
- def _find_igroup_for_initiator(self, host_id, initiator_name):
- """Get the igroup for an initiator.
-
- Look for an existing igroup (initiator group) on the storage system
- containing a given iSCSI initiator and return the name of the igroup.
- """
- request = self.client.factory.create('Request')
- request.Name = 'igroup-list-info'
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- igroups = response.Results['initiator-groups']
- if self._api_elem_is_empty(igroups):
- return None
- igroup_infos = igroups[0]['initiator-group-info']
- for igroup_info in igroup_infos:
- if ('iscsi' != igroup_info['initiator-group-type'][0] or
- 'linux' != igroup_info['initiator-group-os-type'][0]):
- continue
- igroup_name = igroup_info['initiator-group-name'][0]
- if not igroup_name.startswith(self.IGROUP_PREFIX):
- continue
- initiators = igroup_info['initiators'][0]['initiator-info']
- for initiator in initiators:
- if initiator_name == initiator['initiator-name'][0]:
- return igroup_name
- return None
-
- def _create_igroup(self, host_id, initiator_name):
- """Create a new igroup.
-
- Create a new igroup (initiator group) on the storage system to hold
- the given iSCSI initiator. The group will only have 1 member and will
- be named "openstack-${initiator_name}".
- """
- igroup_name = self.IGROUP_PREFIX + initiator_name
- request = self.client.factory.create('Request')
- request.Name = 'igroup-create'
- igroup_create_xml = (
- '<initiator-group-name>%s</initiator-group-name>'
- '<initiator-group-type>iscsi</initiator-group-type>'
- '<os-type>linux</os-type><ostype>linux</ostype>')
- request.Args = text.Raw(igroup_create_xml % igroup_name)
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- request = self.client.factory.create('Request')
- request.Name = 'igroup-add'
- igroup_add_xml = (
- '<initiator-group-name>%s</initiator-group-name>'
- '<initiator>%s</initiator>')
- request.Args = text.Raw(igroup_add_xml % (igroup_name, initiator_name))
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- return igroup_name
-
- def _get_lun_mappping(self, host_id, lunpath, igroup_name):
- """Get the mapping between a LUN and an igroup.
-
- Check if a given LUN is already mapped to the given igroup (initiator
- group). If the LUN is mapped, also return the LUN number for the
- mapping.
- """
- request = self.client.factory.create('Request')
- request.Name = 'lun-map-list-info'
- request.Args = text.Raw('<path>%s</path>' % (lunpath))
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- igroups = response.Results['initiator-groups']
- if self._api_elem_is_empty(igroups):
- return {'mapped': False}
- igroup_infos = igroups[0]['initiator-group-info']
- for igroup_info in igroup_infos:
- if igroup_name == igroup_info['initiator-group-name'][0]:
- return {'mapped': True, 'lun_num': igroup_info['lun-id'][0]}
- return {'mapped': False}
-
- def _map_initiator(self, host_id, lunpath, igroup_name):
- """Map a LUN to an igroup.
-
- Map the given LUN to the given igroup (initiator group). Return the LUN
- number that the LUN was mapped to (the filer will choose the lowest
- available number).
- """
- request = self.client.factory.create('Request')
- request.Name = 'lun-map'
- lun_map_xml = ('<initiator-group>%s</initiator-group>'
- '<path>%s</path>')
- request.Args = text.Raw(lun_map_xml % (igroup_name, lunpath))
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- return response.Results['lun-id-assigned'][0]
-
- def _unmap_initiator(self, host_id, lunpath, igroup_name):
- """Unmap the given LUN from the given igroup (initiator group)."""
- request = self.client.factory.create('Request')
- request.Name = 'lun-unmap'
- lun_unmap_xml = ('<initiator-group>%s</initiator-group>'
- '<path>%s</path>')
- request.Args = text.Raw(lun_unmap_xml % (igroup_name, lunpath))
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
-
- def _ensure_initiator_mapped(self, host_id, lunpath, initiator_name):
- """Ensure that a LUN is mapped to a particular initiator.
-
- Check if a LUN is mapped to a given initiator already and create
- the mapping if it is not. A new igroup will be created if needed.
- Returns the LUN number for the mapping between the LUN and initiator
- in both cases.
- """
- lunpath = '/vol/' + lunpath
- igroup_name = self._find_igroup_for_initiator(host_id, initiator_name)
- if not igroup_name:
- igroup_name = self._create_igroup(host_id, initiator_name)
-
- mapping = self._get_lun_mappping(host_id, lunpath, igroup_name)
- if mapping['mapped']:
- return mapping['lun_num']
- return self._map_initiator(host_id, lunpath, igroup_name)
-
- def _ensure_initiator_unmapped(self, host_id, lunpath, initiator_name):
- """Ensure that a LUN is not mapped to a particular initiator.
-
- Check if a LUN is mapped to a given initiator and remove the
- mapping if it is. This does not destroy the igroup.
- """
- lunpath = '/vol/' + lunpath
- igroup_name = self._find_igroup_for_initiator(host_id, initiator_name)
- if not igroup_name:
- return
-
- mapping = self._get_lun_mappping(host_id, lunpath, igroup_name)
- if mapping['mapped']:
- self._unmap_initiator(host_id, lunpath, igroup_name)
-
- def initialize_connection(self, volume, connector):
- """Driver entry point to attach a volume to an instance.
-
- Do the LUN masking on the storage system so the initiator can access
- the LUN on the target. Also return the iSCSI properties so the
- initiator can find the LUN. This implementation does not call
- _get_iscsi_properties() to get the properties because cannot store the
- LUN number in the database. We only find out what the LUN number will
- be during this method call so we construct the properties dictionary
- ourselves.
- """
- initiator_name = connector['initiator']
- lun_id = volume['provider_location']
- if not lun_id:
- msg = _("No LUN ID for volume %s") % volume['name']
- raise exception.VolumeBackendAPIException(data=msg)
- lun = self._get_lun_details(lun_id)
- lun_num = self._ensure_initiator_mapped(lun.HostId, lun.LunPath,
- initiator_name)
- host = self._get_host_details(lun.HostId)
- portal = self._get_target_portal_for_host(host.HostId,
- host.HostAddress)
- if not portal:
- msg = _('Failed to get target portal for filer: %s')
- raise exception.VolumeBackendAPIException(data=msg % host.HostName)
-
- iqn = self._get_iqn_for_host(host.HostId)
- if not iqn:
- msg = _('Failed to get target IQN for filer: %s')
- raise exception.VolumeBackendAPIException(data=msg % host.HostName)
-
- properties = {}
- properties['target_discovered'] = False
- (address, port) = (portal['address'], portal['port'])
- properties['target_portal'] = '%s:%s' % (address, port)
- properties['target_iqn'] = iqn
- properties['target_lun'] = lun_num
- properties['volume_id'] = volume['id']
-
- auth = volume['provider_auth']
- if auth:
- (auth_method, auth_username, auth_secret) = auth.split()
-
- properties['auth_method'] = auth_method
- properties['auth_username'] = auth_username
- properties['auth_password'] = auth_secret
-
- return {
- 'driver_volume_type': 'iscsi',
- 'data': properties,
- }
-
- def terminate_connection(self, volume, connector):
- """Driver entry point to unattach a volume from an instance.
-
- Unmask the LUN on the storage system so the given intiator can no
- longer access it.
- """
- initiator_name = connector['initiator']
- lun_id = volume['provider_location']
- if not lun_id:
- msg = _('No LUN ID for volume %s') % volume['name']
- raise exception.VolumeBackendAPIException(data=msg)
- lun = self._get_lun_details(lun_id)
- self._ensure_initiator_unmapped(lun.HostId, lun.LunPath,
- initiator_name)
-
- def _is_clone_done(self, host_id, clone_op_id, volume_uuid):
- """Check the status of a clone operation.
-
- Return True if done, False otherwise.
- """
- request = self.client.factory.create('Request')
- request.Name = 'clone-list-status'
- clone_list_status_xml = (
- '<clone-id><clone-id-info>'
- '<clone-op-id>%s</clone-op-id>'
- '<volume-uuid>%s</volume-uuid>'
- '</clone-id-info></clone-id>')
- request.Args = text.Raw(clone_list_status_xml % (clone_op_id,
- volume_uuid))
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- status = response.Results['status']
- if self._api_elem_is_empty(status):
- return False
- ops_info = status[0]['ops-info'][0]
- state = ops_info['clone-state'][0]
- return 'completed' == state
-
- def _clone_lun(self, host_id, src_path, dest_path, snap):
- """Create a clone of a NetApp LUN.
-
- The clone initially consumes no space and is not space reserved.
- """
- request = self.client.factory.create('Request')
- request.Name = 'clone-start'
- clone_start_xml = (
- '<source-path>%s</source-path><no-snap>%s</no-snap>'
- '<destination-path>%s</destination-path>')
- if snap:
- no_snap = 'false'
- else:
- no_snap = 'true'
- request.Args = text.Raw(clone_start_xml % (src_path, no_snap,
- dest_path))
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- clone_id = response.Results['clone-id'][0]
- clone_id_info = clone_id['clone-id-info'][0]
- clone_op_id = clone_id_info['clone-op-id'][0]
- volume_uuid = clone_id_info['volume-uuid'][0]
- while not self._is_clone_done(host_id, clone_op_id, volume_uuid):
- time.sleep(5)
-
- def _refresh_dfm_luns(self, host_id):
- """Refresh the LUN list for one filer in DFM."""
- server = self.client.service
- server.DfmObjectRefresh(ObjectNameOrId=host_id, ChildType='lun_path')
- while True:
- time.sleep(15)
- res = server.DfmMonitorTimestampList(HostNameOrId=host_id)
- for timestamp in res.DfmMonitoringTimestamp:
- if 'lun' != timestamp.MonitorName:
- continue
- if timestamp.LastMonitoringTimestamp:
- return
-
- def _destroy_lun(self, host_id, lun_path):
- """Destroy a LUN on the filer."""
- request = self.client.factory.create('Request')
- request.Name = 'lun-offline'
- path_xml = '<path>%s</path>'
- request.Args = text.Raw(path_xml % lun_path)
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
- request = self.client.factory.create('Request')
- request.Name = 'lun-destroy'
- request.Args = text.Raw(path_xml % lun_path)
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
-
- def _resize_volume(self, host_id, vol_name, new_size):
- """Resize the volume by the amount requested."""
- request = self.client.factory.create('Request')
- request.Name = 'volume-size'
- volume_size_xml = (
- '<volume>%s</volume><new-size>%s</new-size>')
- request.Args = text.Raw(volume_size_xml % (vol_name, new_size))
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
-
- def _create_qtree(self, host_id, vol_name, qtree_name):
- """Create a qtree the filer."""
- request = self.client.factory.create('Request')
- request.Name = 'qtree-create'
- qtree_create_xml = (
- '<mode>0755</mode><volume>%s</volume><qtree>%s</qtree>')
- request.Args = text.Raw(qtree_create_xml % (vol_name, qtree_name))
- response = self.client.service.ApiProxy(Target=host_id,
- Request=request)
- self._check_fail(request, response)
-
- def create_snapshot(self, snapshot):
- """Driver entry point for creating a snapshot.
-
- This driver implements snapshots by using efficient single-file
- (LUN) cloning.
- """
- vol_name = snapshot['volume_name']
- snapshot_name = snapshot['name']
- project = snapshot['project_id']
- lun = self._lookup_lun_for_volume(vol_name, project)
- lun_id = lun.id
- lun = self._get_lun_details(lun_id)
- extra_gb = snapshot['volume_size']
- new_size = '+%dg' % extra_gb
- self._resize_volume(lun.HostId, lun.VolumeName, new_size)
- # LunPath is the partial LUN path in this format: volume/qtree/lun
- lun_path = str(lun.LunPath)
- lun_name = lun_path[lun_path.rfind('/') + 1:]
- qtree_path = '/vol/%s/%s' % (lun.VolumeName, lun.QtreeName)
- src_path = '%s/%s' % (qtree_path, lun_name)
- dest_path = '%s/%s' % (qtree_path, snapshot_name)
- self._clone_lun(lun.HostId, src_path, dest_path, True)
-
- def delete_snapshot(self, snapshot):
- """Driver entry point for deleting a snapshot."""
- vol_name = snapshot['volume_name']
- snapshot_name = snapshot['name']
- project = snapshot['project_id']
- lun = self._lookup_lun_for_volume(vol_name, project)
- lun_id = lun.id
- lun = self._get_lun_details(lun_id)
- lun_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName,
- snapshot_name)
- self._destroy_lun(lun.HostId, lun_path)
- extra_gb = snapshot['volume_size']
- new_size = '-%dg' % extra_gb
- self._resize_volume(lun.HostId, lun.VolumeName, new_size)
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Driver entry point for creating a new volume from a snapshot.
-
- Many would call this "cloning" and in fact we use cloning to implement
- this feature.
- """
- vol_size = volume['size']
- snap_size = snapshot['volume_size']
- if vol_size != snap_size:
- msg = _('Cannot create volume of size %(vol_size)s from '
- 'snapshot of size %(snap_size)s')
- raise exception.VolumeBackendAPIException(data=msg % locals())
- vol_name = snapshot['volume_name']
- snapshot_name = snapshot['name']
- project = snapshot['project_id']
- lun = self._lookup_lun_for_volume(vol_name, project)
- lun_id = lun.id
- dataset = lun.dataset
- old_type = dataset.type
- new_type = self._get_ss_type(volume)
- if new_type != old_type:
- msg = _('Cannot create volume of type %(new_type)s from '
- 'snapshot of type %(old_type)s')
- raise exception.VolumeBackendAPIException(data=msg % locals())
- lun = self._get_lun_details(lun_id)
- extra_gb = vol_size
- new_size = '+%dg' % extra_gb
- self._resize_volume(lun.HostId, lun.VolumeName, new_size)
- clone_name = volume['name']
- self._create_qtree(lun.HostId, lun.VolumeName, clone_name)
- src_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName,
- snapshot_name)
- dest_path = '/vol/%s/%s/%s' % (lun.VolumeName, clone_name, clone_name)
- self._clone_lun(lun.HostId, src_path, dest_path, False)
- self._refresh_dfm_luns(lun.HostId)
- self._discover_dataset_luns(dataset, clone_name)
-
- def check_for_export(self, context, volume_id):
- raise NotImplementedError()
-
-
-class NetAppLun(object):
- """Represents a LUN on NetApp storage."""
-
- def __init__(self, handle, name, size, metadata_dict):
- self.handle = handle
- self.name = name
- self.size = size
- self.metadata = metadata_dict
-
- def get_metadata_property(self, prop):
- """Get the metadata property of a LUN."""
- if prop in self.metadata:
- return self.metadata[prop]
- name = self.name
- msg = _("No metadata property %(prop)s defined for the LUN %(name)s")
- LOG.debug(msg % locals())
-
-
-class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
- """NetApp C-mode iSCSI volume driver."""
-
- def __init__(self, *args, **kwargs):
- super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs)
- self.lun_table = {}
-
- def _create_client(self, **kwargs):
- """Instantiate a web services client.
-
- This method creates a "suds" client to make web services calls to the
- DFM server. Note that the WSDL file is quite large and may take
- a few seconds to parse.
- """
- wsdl_url = kwargs['wsdl_url']
- LOG.debug(_('Using WSDL: %s') % wsdl_url)
- if kwargs['cache']:
- self.client = client.Client(wsdl_url, username=kwargs['login'],
- password=kwargs['password'])
- else:
- self.client = client.Client(wsdl_url, username=kwargs['login'],
- password=kwargs['password'],
- cache=None)
-
- def _check_flags(self):
- """Ensure that the flags we care about are set."""
- required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
- 'netapp_server_hostname', 'netapp_server_port']
- for flag in required_flags:
- if not getattr(FLAGS, flag, None):
- msg = _('%s is not set') % flag
- raise exception.InvalidInput(data=msg)
-
- def do_setup(self, context):
- """Setup the NetApp Volume driver.
-
- Called one time by the manager after the driver is loaded.
- Validate the flags we care about and setup the suds (web services)
- client.
- """
- self._check_flags()
- self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
- login=FLAGS.netapp_login, password=FLAGS.netapp_password,
- hostname=FLAGS.netapp_server_hostname,
- port=FLAGS.netapp_server_port, cache=True)
-
- def check_for_setup_error(self):
- """Check that the driver is working and can communicate.
-
- Discovers the LUNs on the NetApp server.
- """
- self.lun_table = {}
- luns = self.client.service.ListLuns()
- for lun in luns:
- meta_dict = {}
- if hasattr(lun, 'Metadata'):
- meta_dict = self._create_dict_from_meta(lun.Metadata)
- discovered_lun = NetAppLun(lun.Handle, lun.Name, lun.Size,
- meta_dict)
- self._add_lun_to_table(discovered_lun)
- LOG.debug(_("Success getting LUN list from server"))
-
- def create_volume(self, volume):
- """Driver entry point for creating a new volume."""
- default_size = '104857600' # 100 MB
- gigabytes = 1073741824L # 2^30
- name = volume['name']
- if int(volume['size']) == 0:
- size = default_size
- else:
- size = str(int(volume['size']) * gigabytes)
- extra_args = {}
- extra_args['OsType'] = 'linux'
- extra_args['QosType'] = self._get_qos_type(volume)
- extra_args['Container'] = volume['project_id']
- extra_args['Display'] = volume['display_name']
- extra_args['Description'] = volume['display_description']
- extra_args['SpaceReserved'] = True
- server = self.client.service
- metadata = self._create_metadata_list(extra_args)
- lun = server.ProvisionLun(Name=name, Size=size,
- Metadata=metadata)
- LOG.debug(_("Created LUN with name %s") % name)
- self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
- lun.Size, self._create_dict_from_meta(lun.Metadata)))
-
- def delete_volume(self, volume):
- """Driver entry point for destroying existing volumes."""
- name = volume['name']
- handle = self._get_lun_handle(name)
- self.client.service.DestroyLun(Handle=handle)
- LOG.debug(_("Destroyed LUN %s") % handle)
- self.lun_table.pop(name)
-
- def ensure_export(self, context, volume):
- """Driver entry point to get the export info for an existing volume."""
- handle = self._get_lun_handle(volume['name'])
- return {'provider_location': handle}
-
- def create_export(self, context, volume):
- """Driver entry point to get the export info for a new volume."""
- handle = self._get_lun_handle(volume['name'])
- return {'provider_location': handle}
-
- def remove_export(self, context, volume):
- """Driver exntry point to remove an export for a volume.
-
- Since exporting is idempotent in this driver, we have nothing
- to do for unexporting.
- """
- pass
-
- def initialize_connection(self, volume, connector):
- """Driver entry point to attach a volume to an instance.
-
- Do the LUN masking on the storage system so the initiator can access
- the LUN on the target. Also return the iSCSI properties so the
- initiator can find the LUN. This implementation does not call
- _get_iscsi_properties() to get the properties because cannot store the
- LUN number in the database. We only find out what the LUN number will
- be during this method call so we construct the properties dictionary
- ourselves.
- """
- initiator_name = connector['initiator']
- handle = volume['provider_location']
- server = self.client.service
- server.MapLun(Handle=handle, InitiatorType="iscsi",
- InitiatorName=initiator_name)
- msg = _("Mapped LUN %(handle)s to the initiator %(initiator_name)s")
- LOG.debug(msg % locals())
-
- target_details_list = server.GetLunTargetDetails(Handle=handle,
- InitiatorType="iscsi", InitiatorName=initiator_name)
- msg = _("Succesfully fetched target details for LUN %(handle)s and "
- "initiator %(initiator_name)s")
- LOG.debug(msg % locals())
-
- if not target_details_list:
- msg = _('Failed to get LUN target details for the LUN %s')
- raise exception.VolumeBackendAPIException(msg % handle)
- target_details = target_details_list[0]
- if not target_details.Address and target_details.Port:
- msg = _('Failed to get target portal for the LUN %s')
- raise exception.VolumeBackendAPIException(msg % handle)
- iqn = target_details.Iqn
- if not iqn:
- msg = _('Failed to get target IQN for the LUN %s')
- raise exception.VolumeBackendAPIException(msg % handle)
-
- properties = {}
- properties['target_discovered'] = False
- (address, port) = (target_details.Address, target_details.Port)
- properties['target_portal'] = '%s:%s' % (address, port)
- properties['target_iqn'] = iqn
- properties['target_lun'] = target_details.LunNumber
- properties['volume_id'] = volume['id']
-
- auth = volume['provider_auth']
- if auth:
- (auth_method, auth_username, auth_secret) = auth.split()
- properties['auth_method'] = auth_method
- properties['auth_username'] = auth_username
- properties['auth_password'] = auth_secret
-
- return {
- 'driver_volume_type': 'iscsi',
- 'data': properties,
- }
-
- def terminate_connection(self, volume, connector):
- """Driver entry point to unattach a volume from an instance.
-
- Unmask the LUN on the storage system so the given intiator can no
- longer access it.
- """
- initiator_name = connector['initiator']
- handle = volume['provider_location']
- self.client.service.UnmapLun(Handle=handle, InitiatorType="iscsi",
- InitiatorName=initiator_name)
- msg = _("Unmapped LUN %(handle)s from the initiator "
- "%(initiator_name)s")
- LOG.debug(msg % locals())
-
- def create_snapshot(self, snapshot):
- """Driver entry point for creating a snapshot.
-
- This driver implements snapshots by using efficient single-file
- (LUN) cloning.
- """
- vol_name = snapshot['volume_name']
- snapshot_name = snapshot['name']
- lun = self.lun_table[vol_name]
- extra_args = {'SpaceReserved': False}
- self._clone_lun(lun.handle, snapshot_name, extra_args)
-
- def delete_snapshot(self, snapshot):
- """Driver entry point for deleting a snapshot."""
- handle = self._get_lun_handle(snapshot['name'])
- self.client.service.DestroyLun(Handle=handle)
- LOG.debug(_("Destroyed LUN %s") % handle)
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Driver entry point for creating a new volume from a snapshot.
-
- Many would call this "cloning" and in fact we use cloning to implement
- this feature.
- """
- snapshot_name = snapshot['name']
- lun = self.lun_table[snapshot_name]
- new_name = volume['name']
- extra_args = {}
- extra_args['OsType'] = 'linux'
- extra_args['QosType'] = self._get_qos_type(volume)
- extra_args['Container'] = volume['project_id']
- extra_args['Display'] = volume['display_name']
- extra_args['Description'] = volume['display_description']
- extra_args['SpaceReserved'] = True
- self._clone_lun(lun.handle, new_name, extra_args)
-
- def check_for_export(self, context, volume_id):
- raise NotImplementedError()
-
- def _get_qos_type(self, volume):
- """Get the storage service type for a volume."""
- type_id = volume['volume_type_id']
- if not type_id:
- return None
- volume_type = volume_types.get_volume_type(None, type_id)
- if not volume_type:
- return None
- return volume_type['name']
-
- def _add_lun_to_table(self, lun):
- """Adds LUN to cache table."""
- if not isinstance(lun, NetAppLun):
- msg = _("Object is not a NetApp LUN.")
- raise exception.VolumeBackendAPIException(data=msg)
- self.lun_table[lun.name] = lun
-
- def _clone_lun(self, handle, new_name, extra_args):
- """Clone LUN with the given handle to the new name."""
- server = self.client.service
- metadata = self._create_metadata_list(extra_args)
- lun = server.CloneLun(Handle=handle, NewName=new_name,
- Metadata=metadata)
- LOG.debug(_("Cloned LUN with new name %s") % new_name)
- self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
- lun.Size, self._create_dict_from_meta(lun.Metadata)))
-
- def _create_metadata_list(self, extra_args):
- """Creates metadata from kwargs."""
- metadata = []
- for key in extra_args.keys():
- meta = self.client.factory.create("Metadata")
- meta.Key = key
- meta.Value = extra_args[key]
- metadata.append(meta)
- return metadata
-
- def _get_lun_handle(self, name):
- """Get the details for a LUN from our cache table."""
- if not name in self.lun_table:
- LOG.warn(_("Could not find handle for LUN named %s") % name)
- return None
- return self.lun_table[name].handle
-
- def _create_dict_from_meta(self, metadata):
- """Creates dictionary from metadata array."""
- meta_dict = {}
- if not metadata:
- return meta_dict
- for meta in metadata:
- meta_dict[meta.Key] = meta.Value
- return meta_dict
diff --git a/nova/volume/netapp_nfs.py b/nova/volume/netapp_nfs.py
deleted file mode 100644
index 27d278aa32..0000000000
--- a/nova/volume/netapp_nfs.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 NetApp, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Volume driver for NetApp NFS storage.
-"""
-
-import os
-import suds
-import time
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova.volume.netapp import netapp_opts
-from nova.volume import nfs
-
-from suds.sax import text
-
-LOG = logging.getLogger(__name__)
-
-netapp_nfs_opts = [
- cfg.IntOpt('synchronous_snapshot_create',
- default=0,
- help='Does snapshot creation call returns immediately')
- ]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(netapp_opts)
-FLAGS.register_opts(netapp_nfs_opts)
-
-
-class NetAppNFSDriver(nfs.NfsDriver):
- """Executes commands relating to Volumes."""
- def __init__(self, *args, **kwargs):
- # NOTE(vish): db is set by Manager
- self._execute = None
- self._context = None
- super(NetAppNFSDriver, self).__init__(*args, **kwargs)
-
- def set_execute(self, execute):
- self._execute = execute
-
- def do_setup(self, context):
- self._context = context
- self.check_for_setup_error()
- self._client = NetAppNFSDriver._get_client()
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met"""
- NetAppNFSDriver._check_dfm_flags()
- super(NetAppNFSDriver, self).check_for_setup_error()
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
- vol_size = volume.size
- snap_size = snapshot.volume_size
-
- if vol_size != snap_size:
- msg = _('Cannot create volume of size %(vol_size)s from '
- 'snapshot of size %(snap_size)s')
- raise exception.NovaException(msg % locals())
-
- self._clone_volume(snapshot.name, volume.name, snapshot.volume_id)
- share = self._get_volume_location(snapshot.volume_id)
-
- return {'provider_location': share}
-
- def create_snapshot(self, snapshot):
- """Creates a snapshot."""
- self._clone_volume(snapshot['volume_name'],
- snapshot['name'],
- snapshot['volume_id'])
-
- def delete_snapshot(self, snapshot):
- """Deletes a snapshot."""
- nfs_mount = self._get_provider_location(snapshot.volume_id)
-
- if self._volume_not_present(nfs_mount, snapshot.name):
- return True
-
- self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
- run_as_root=True)
-
- @staticmethod
- def _check_dfm_flags():
- """Raises error if any required configuration flag for OnCommand proxy
- is missing."""
- required_flags = ['netapp_wsdl_url',
- 'netapp_login',
- 'netapp_password',
- 'netapp_server_hostname',
- 'netapp_server_port']
- for flag in required_flags:
- if not getattr(FLAGS, flag, None):
- raise exception.NovaException(_('%s is not set') % flag)
-
- @staticmethod
- def _get_client():
- """Creates SOAP _client for ONTAP-7 DataFabric Service."""
- client = suds.client.Client(FLAGS.netapp_wsdl_url,
- username=FLAGS.netapp_login,
- password=FLAGS.netapp_password)
- soap_url = 'http://%s:%s/apis/soap/v1' % (
- FLAGS.netapp_server_hostname,
- FLAGS.netapp_server_port)
- client.set_options(location=soap_url)
-
- return client
-
- def _get_volume_location(self, volume_id):
- """Returns NFS mount address as <nfs_ip_address>:<nfs_mount_dir>"""
- nfs_server_ip = self._get_host_ip(volume_id)
- export_path = self._get_export_path(volume_id)
- return (nfs_server_ip + ':' + export_path)
-
- def _clone_volume(self, volume_name, clone_name, volume_id):
- """Clones mounted volume with OnCommand proxy API"""
- host_id = self._get_host_id(volume_id)
- export_path = self._get_full_export_path(volume_id, host_id)
-
- request = self._client.factory.create('Request')
- request.Name = 'clone-start'
-
- clone_start_args = ('<source-path>%s/%s</source-path>'
- '<destination-path>%s/%s</destination-path>')
-
- request.Args = text.Raw(clone_start_args % (export_path,
- volume_name,
- export_path,
- clone_name))
-
- resp = self._client.service.ApiProxy(Target=host_id,
- Request=request)
-
- if resp.Status == 'passed' and FLAGS.synchronous_snapshot_create:
- clone_id = resp.Results['clone-id'][0]
- clone_id_info = clone_id['clone-id-info'][0]
- clone_operation_id = int(clone_id_info['clone-op-id'][0])
-
- self._wait_for_clone_finished(clone_operation_id, host_id)
- elif resp.Status == 'failed':
- raise exception.NovaException(resp.Reason)
-
- def _wait_for_clone_finished(self, clone_operation_id, host_id):
- """
- Polls ONTAP7 for clone status. Returns once clone is finished.
- :param clone_operation_id: Identifier of ONTAP clone operation
- """
- clone_list_options = ('<clone-id>'
- '<clone-id-info>'
- '<clone-op-id>%d</clone-op-id>'
- '<volume-uuid></volume-uuid>'
- '</clone-id>'
- '</clone-id-info>')
-
- request = self._client.factory.create('Request')
- request.Name = 'clone-list-status'
- request.Args = text.Raw(clone_list_options % clone_operation_id)
-
- resp = self._client.service.ApiProxy(Target=host_id, Request=request)
-
- while resp.Status != 'passed':
- time.sleep(1)
- resp = self._client.service.ApiProxy(Target=host_id,
- Request=request)
-
- def _get_provider_location(self, volume_id):
- """
- Returns provider location for given volume
- :param volume_id:
- """
- volume = self.db.volume_get(self._context, volume_id)
- return volume.provider_location
-
- def _get_host_ip(self, volume_id):
- """Returns IP address for the given volume"""
- return self._get_provider_location(volume_id).split(':')[0]
-
- def _get_export_path(self, volume_id):
- """Returns NFS export path for the given volume"""
- return self._get_provider_location(volume_id).split(':')[1]
-
- def _get_host_id(self, volume_id):
- """Returns ID of the ONTAP-7 host"""
- host_ip = self._get_host_ip(volume_id)
- server = self._client.service
-
- resp = server.HostListInfoIterStart(ObjectNameOrId=host_ip)
- tag = resp.Tag
-
- try:
- res = server.HostListInfoIterNext(Tag=tag, Maximum=1)
- if hasattr(res, 'Hosts') and res.Hosts.HostInfo:
- return res.Hosts.HostInfo[0].HostId
- finally:
- server.HostListInfoIterEnd(Tag=tag)
-
- def _get_full_export_path(self, volume_id, host_id):
- """Returns full path to the NFS share, e.g. /vol/vol0/home"""
- export_path = self._get_export_path(volume_id)
- command_args = '<pathname>%s</pathname>'
-
- request = self._client.factory.create('Request')
- request.Name = 'nfs-exportfs-storage-path'
- request.Args = text.Raw(command_args % export_path)
-
- resp = self._client.service.ApiProxy(Target=host_id,
- Request=request)
-
- if resp.Status == 'passed':
- return resp.Results['actual-pathname'][0]
- elif resp.Status == 'failed':
- raise exception.NovaException(resp.Reason)
-
- def _volume_not_present(self, nfs_mount, volume_name):
- """
- Check if volume exists
- """
- try:
- self._try_execute('ls', self._get_volume_path(nfs_mount,
- volume_name))
- except exception.ProcessExecutionError:
- # If the volume isn't present
- return True
- return False
-
- def _try_execute(self, *command, **kwargs):
- # NOTE(vish): Volume commands can partially fail due to timing, but
- # running them a second time on failure will usually
- # recover nicely.
- tries = 0
- while True:
- try:
- self._execute(*command, **kwargs)
- return True
- except exception.ProcessExecutionError:
- tries = tries + 1
- if tries >= FLAGS.num_shell_tries:
- raise
- LOG.exception(_("Recovering from a failed execute. "
- "Try number %s"), tries)
- time.sleep(tries ** 2)
-
- def _get_volume_path(self, nfs_share, volume_name):
- """Get volume path (local fs path) for given volume name on given nfs
- share
- @param nfs_share string, example 172.18.194.100:/var/nfs
- @param volume_name string,
- example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
- """
- return os.path.join(self._get_mount_point_for_share(nfs_share),
- volume_name)
diff --git a/nova/volume/nexenta/__init__.py b/nova/volume/nexenta/__init__.py
deleted file mode 100644
index 3050df8f66..0000000000
--- a/nova/volume/nexenta/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2011 Nexenta Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-:mod:`nexenta` -- Package contains Nexenta-specific modules
-=====================================================================
-
-.. automodule:: nexenta
-.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
-"""
-
-
-class NexentaException(Exception):
- MESSAGE = _('Nexenta SA returned the error')
-
- def __init__(self, error=None):
- super(NexentaException, self).__init__(self.message, error)
-
- def __str__(self):
- return '%s: %s' % self.args
diff --git a/nova/volume/nexenta/jsonrpc.py b/nova/volume/nexenta/jsonrpc.py
deleted file mode 100644
index e0d9c810a9..0000000000
--- a/nova/volume/nexenta/jsonrpc.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2011 Nexenta Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-:mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client
-=====================================================================
-
-.. automodule:: nexenta.jsonrpc
-.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
-"""
-
-import urllib2
-
-from nova.openstack.common import jsonutils
-from nova.openstack.common import log as logging
-from nova.volume import nexenta
-
-LOG = logging.getLogger(__name__)
-
-
-class NexentaJSONException(nexenta.NexentaException):
- pass
-
-
-class NexentaJSONProxy(object):
- def __init__(self, url, user, password, auto=False, obj=None, method=None):
- self.url = url
- self.user = user
- self.password = password
- self.auto = auto
- self.obj = obj
- self.method = method
-
- def __getattr__(self, name):
- if not self.obj:
- obj, method = name, None
- elif not self.method:
- obj, method = self.obj, name
- else:
- obj, method = '%s.%s' % (self.obj, self.method), name
- return NexentaJSONProxy(self.url, self.user, self.password, self.auto,
- obj, method)
-
- def __call__(self, *args):
- data = jsonutils.dumps({'object': self.obj,
- 'method': self.method,
- 'params': args})
- auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1]
- headers = {'Content-Type': 'application/json',
- 'Authorization': 'Basic %s' % (auth,)}
- LOG.debug(_('Sending JSON data: %s'), data)
- request = urllib2.Request(self.url, data, headers)
- response_obj = urllib2.urlopen(request)
- if response_obj.info().status == 'EOF in headers':
- if self.auto and self.url.startswith('http://'):
- LOG.info(_('Auto switching to HTTPS connection to %s'),
- self.url)
- self.url = 'https' + self.url[4:]
- request = urllib2.Request(self.url, data, headers)
- response_obj = urllib2.urlopen(request)
- else:
- LOG.error(_('No headers in server response'))
- raise NexentaJSONException(_('Bad response from server'))
-
- response_data = response_obj.read()
- LOG.debug(_('Got response: %s'), response_data)
- response = jsonutils.loads(response_data)
- if response.get('error') is not None:
- raise NexentaJSONException(response['error'].get('message', ''))
- else:
- return response.get('result')
diff --git a/nova/volume/nexenta/volume.py b/nova/volume/nexenta/volume.py
deleted file mode 100644
index 9bb6364a90..0000000000
--- a/nova/volume/nexenta/volume.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2011 Nexenta Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance
-=====================================================================
-
-.. automodule:: nexenta.volume
-.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
-"""
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova.volume import driver
-from nova.volume import nexenta
-from nova.volume.nexenta import jsonrpc
-
-LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
-nexenta_opts = [
- cfg.StrOpt('nexenta_host',
- default='',
- help='IP address of Nexenta SA'),
- cfg.IntOpt('nexenta_rest_port',
- default=2000,
- help='HTTP port to connect to Nexenta REST API server'),
- cfg.StrOpt('nexenta_rest_protocol',
- default='auto',
- help='Use http or https for REST connection (default auto)'),
- cfg.StrOpt('nexenta_user',
- default='admin',
- help='User name to connect to Nexenta SA'),
- cfg.StrOpt('nexenta_password',
- default='nexenta',
- help='Password to connect to Nexenta SA'),
- cfg.IntOpt('nexenta_iscsi_target_portal_port',
- default=3260,
- help='Nexenta target portal port'),
- cfg.StrOpt('nexenta_volume',
- default='nova',
- help='pool on SA that will hold all volumes'),
- cfg.StrOpt('nexenta_target_prefix',
- default='iqn.1986-03.com.sun:02:nova-',
- help='IQN prefix for iSCSI targets'),
- cfg.StrOpt('nexenta_target_group_prefix',
- default='nova/',
- help='prefix for iSCSI target groups on SA'),
- cfg.StrOpt('nexenta_blocksize',
- default='',
- help='block size for volumes (blank=default,8KB)'),
- cfg.BoolOpt('nexenta_sparse',
- default=False,
- help='flag to create sparse volumes'),
-]
-FLAGS.register_opts(nexenta_opts)
-
-
-class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
- """Executes volume driver commands on Nexenta Appliance."""
-
- def __init__(self):
- super(NexentaDriver, self).__init__()
-
- def do_setup(self, context):
- protocol = FLAGS.nexenta_rest_protocol
- auto = protocol == 'auto'
- if auto:
- protocol = 'http'
- self.nms = jsonrpc.NexentaJSONProxy(
- '%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host,
- FLAGS.nexenta_rest_port),
- FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto)
-
- def check_for_setup_error(self):
- """Verify that the volume for our zvols exists.
-
- :raise: :py:exc:`LookupError`
- """
- if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
- raise LookupError(_("Volume %s does not exist in Nexenta SA"),
- FLAGS.nexenta_volume)
-
- @staticmethod
- def _get_zvol_name(volume_name):
- """Return zvol name that corresponds given volume name."""
- return '%s/%s' % (FLAGS.nexenta_volume, volume_name)
-
- @staticmethod
- def _get_target_name(volume_name):
- """Return iSCSI target name to access volume."""
- return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name)
-
- @staticmethod
- def _get_target_group_name(volume_name):
- """Return Nexenta iSCSI target group name for volume."""
- return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name)
-
- def create_volume(self, volume):
- """Create a zvol on appliance.
-
- :param volume: volume reference
- """
- self.nms.zvol.create(
- self._get_zvol_name(volume['name']),
- '%sG' % (volume['size'],),
- FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse)
-
- def delete_volume(self, volume):
- """Destroy a zvol on appliance.
-
- :param volume: volume reference
- """
- try:
- self.nms.zvol.destroy(self._get_zvol_name(volume['name']), '')
- except nexenta.NexentaException as exc:
- if "zvol has children" in exc.args[1]:
- raise exception.VolumeIsBusy
- else:
- raise
-
- def create_snapshot(self, snapshot):
- """Create snapshot of existing zvol on appliance.
-
- :param snapshot: shapshot reference
- """
- self.nms.zvol.create_snapshot(
- self._get_zvol_name(snapshot['volume_name']),
- snapshot['name'], '')
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Create new volume from other's snapshot on appliance.
-
- :param volume: reference of volume to be created
- :param snapshot: reference of source snapshot
- """
- self.nms.zvol.clone(
- '%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
- snapshot['name']),
- self._get_zvol_name(volume['name']))
-
- def delete_snapshot(self, snapshot):
- """Delete volume's snapshot on appliance.
-
- :param snapshot: shapshot reference
- """
- try:
- self.nms.snapshot.destroy(
- '%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
- snapshot['name']),
- '')
- except nexenta.NexentaException as exc:
- if "snapshot has dependent clones" in exc.args[1]:
- raise exception.SnapshotIsBusy
- else:
- raise
-
- def local_path(self, volume):
- """Return local path to existing local volume.
-
- We never have local volumes, so it raises NotImplementedError.
-
- :raise: :py:exc:`NotImplementedError`
- """
- LOG.error(_("Call to local_path should not happen."
- " Verify that use_local_volumes flag is turned off."))
- raise NotImplementedError
-
- def _do_export(self, _ctx, volume, ensure=False):
- """Do all steps to get zvol exported as LUN 0 at separate target.
-
- :param volume: reference of volume to be exported
- :param ensure: if True, ignore errors caused by already existing
- resources
- :return: iscsiadm-formatted provider location string
- """
- zvol_name = self._get_zvol_name(volume['name'])
- target_name = self._get_target_name(volume['name'])
- target_group_name = self._get_target_group_name(volume['name'])
-
- try:
- self.nms.iscsitarget.create_target({'target_name': target_name})
- except nexenta.NexentaException as exc:
- if not ensure or 'already configured' not in exc.args[1]:
- raise
- else:
- LOG.info(_('Ignored target creation error "%s"'
- ' while ensuring export'), exc)
- try:
- self.nms.stmf.create_targetgroup(target_group_name)
- except nexenta.NexentaException as exc:
- if not ensure or 'already exists' not in exc.args[1]:
- raise
- else:
- LOG.info(_('Ignored target group creation error "%s"'
- ' while ensuring export'), exc)
- try:
- self.nms.stmf.add_targetgroup_member(target_group_name,
- target_name)
- except nexenta.NexentaException as exc:
- if not ensure or 'already exists' not in exc.args[1]:
- raise
- else:
- LOG.info(_('Ignored target group member addition error "%s"'
- ' while ensuring export'), exc)
- try:
- self.nms.scsidisk.create_lu(zvol_name, {})
- except nexenta.NexentaException as exc:
- if not ensure or 'in use' not in exc.args[1]:
- raise
- else:
- LOG.info(_('Ignored LU creation error "%s"'
- ' while ensuring export'), exc)
- try:
- self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
- 'target_group': target_group_name,
- 'lun': '0'})
- except nexenta.NexentaException as exc:
- if not ensure or 'view entry exists' not in exc.args[1]:
- raise
- else:
- LOG.info(_('Ignored LUN mapping entry addition error "%s"'
- ' while ensuring export'), exc)
- return '%s:%s,1 %s' % (FLAGS.nexenta_host,
- FLAGS.nexenta_iscsi_target_portal_port,
- target_name)
-
- def create_export(self, _ctx, volume):
- """Create new export for zvol.
-
- :param volume: reference of volume to be exported
- :return: iscsiadm-formatted provider location string
- """
- loc = self._do_export(_ctx, volume, ensure=False)
- return {'provider_location': loc}
-
- def ensure_export(self, _ctx, volume):
- """Recreate parts of export if necessary.
-
- :param volume: reference of volume to be exported
- """
- self._do_export(_ctx, volume, ensure=True)
-
- def remove_export(self, _ctx, volume):
- """Destroy all resources created to export zvol.
-
- :param volume: reference of volume to be unexported
- """
- zvol_name = self._get_zvol_name(volume['name'])
- target_name = self._get_target_name(volume['name'])
- target_group_name = self._get_target_group_name(volume['name'])
- self.nms.scsidisk.delete_lu(zvol_name)
-
- try:
- self.nms.stmf.destroy_targetgroup(target_group_name)
- except nexenta.NexentaException as exc:
- # We assume that target group is already gone
- LOG.warn(_('Got error trying to destroy target group'
- ' %(target_group)s, assuming it is already gone: %(exc)s'),
- {'target_group': target_group_name, 'exc': exc})
- try:
- self.nms.iscsitarget.delete_target(target_name)
- except nexenta.NexentaException as exc:
- # We assume that target is gone as well
- LOG.warn(_('Got error trying to delete target %(target)s,'
- ' assuming it is already gone: %(exc)s'),
- {'target': target_name, 'exc': exc})
diff --git a/nova/volume/nfs.py b/nova/volume/nfs.py
deleted file mode 100644
index 7a6ca83071..0000000000
--- a/nova/volume/nfs.py
+++ /dev/null
@@ -1,293 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 NetApp, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import errno
-import hashlib
-import os
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova.virt.libvirt import volume_nfs
-from nova.volume import driver
-
-LOG = logging.getLogger(__name__)
-
-volume_opts = [
- cfg.StrOpt('nfs_shares_config',
- default=None,
- help='File with the list of available nfs shares'),
- cfg.StrOpt('nfs_disk_util',
- default='df',
- help='Use du or df for free space calculation'),
- cfg.BoolOpt('nfs_sparsed_volumes',
- default=True,
- help=('Create volumes as sparsed files which take no space.'
- 'If set to False volume is created as regular file.'
- 'In such case volume creation takes a lot of time.'))
-]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_opts)
-FLAGS.register_opts(volume_nfs.volume_opts)
-
-
-class NfsDriver(driver.VolumeDriver):
- """NFS based volume driver. Creates file on NFS share for using it
- as block device on hypervisor."""
-
- def do_setup(self, context):
- """Any initialization the volume driver does while starting"""
- super(NfsDriver, self).do_setup(context)
-
- config = FLAGS.nfs_shares_config
- if not config:
- LOG.warn(_("There's no NFS config file configured "))
- if not config or not os.path.exists(config):
- msg = _("NFS config file doesn't exist")
- LOG.warn(msg)
- raise exception.NfsException(msg)
-
- try:
- self._execute('mount.nfs', check_exit_code=False)
- except OSError as exc:
- if exc.errno == errno.ENOENT:
- raise exception.NfsException('mount.nfs is not installed')
- else:
- raise
-
- def check_for_setup_error(self):
- """Just to override parent behavior"""
- pass
-
- def create_volume(self, volume):
- """Creates a volume"""
-
- self._ensure_shares_mounted()
-
- volume['provider_location'] = self._find_share(volume['size'])
-
- LOG.info(_('casted to %s') % volume['provider_location'])
-
- self._do_create_volume(volume)
-
- return {'provider_location': volume['provider_location']}
-
- def delete_volume(self, volume):
- """Deletes a logical volume."""
-
- if not volume['provider_location']:
- LOG.warn(_('Volume %s does not have provider_location specified, '
- 'skipping'), volume['name'])
- return
-
- self._ensure_share_mounted(volume['provider_location'])
-
- mounted_path = self.local_path(volume)
-
- if not self._path_exists(mounted_path):
- volume = volume['name']
-
- LOG.warn(_('Trying to delete non-existing volume %(volume)s at '
- 'path %(mounted_path)s') % locals())
- return
-
- self._execute('rm', '-f', mounted_path, run_as_root=True)
-
- def ensure_export(self, ctx, volume):
- """Synchronously recreates an export for a logical volume."""
- self._ensure_share_mounted(volume['provider_location'])
-
- def create_export(self, ctx, volume):
- """Exports the volume. Can optionally return a Dictionary of changes
- to the volume object to be persisted."""
- pass
-
- def remove_export(self, ctx, volume):
- """Removes an export for a logical volume."""
- pass
-
- def check_for_export(self, context, volume_id):
- """Make sure volume is exported."""
- pass
-
- def initialize_connection(self, volume, connector):
- """Allow connection to connector and return connection info."""
- data = {'export': volume['provider_location'],
- 'name': volume['name']}
- return {
- 'driver_volume_type': 'nfs',
- 'data': data
- }
-
- def terminate_connection(self, volume, connector):
- """Disallow connection from connector"""
- pass
-
- def local_path(self, volume):
- """Get volume path (mounted locally fs path) for given volume
- :param volume: volume reference
- """
- nfs_share = volume['provider_location']
- return os.path.join(self._get_mount_point_for_share(nfs_share),
- volume['name'])
-
- def _create_sparsed_file(self, path, size):
- """Creates file with 0 disk usage"""
- self._execute('truncate', '-s', self._sizestr(size),
- path, run_as_root=True)
-
- def _create_regular_file(self, path, size):
- """Creates regular file of given size. Takes a lot of time for large
- files"""
- KB = 1024
- MB = KB * 1024
- GB = MB * 1024
-
- block_size_mb = 1
- block_count = size * GB / (block_size_mb * MB)
-
- self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
- 'bs=%dM' % block_size_mb,
- 'count=%d' % block_count,
- run_as_root=True)
-
- def _set_rw_permissions_for_all(self, path):
- """Sets 666 permissions for the path"""
- self._execute('chmod', 'ugo+rw', path, run_as_root=True)
-
- def _do_create_volume(self, volume):
- """Create a volume on given nfs_share
- :param volume: volume reference
- """
- volume_path = self.local_path(volume)
- volume_size = volume['size']
-
- if FLAGS.nfs_sparsed_volumes:
- self._create_sparsed_file(volume_path, volume_size)
- else:
- self._create_regular_file(volume_path, volume_size)
-
- self._set_rw_permissions_for_all(volume_path)
-
- def _ensure_shares_mounted(self):
- """Look for NFS shares in the flags and tries to mount them locally"""
- self._mounted_shares = []
-
- for share in self._load_shares_config():
- try:
- self._ensure_share_mounted(share)
- self._mounted_shares.append(share)
- except Exception, exc:
- LOG.warning('Exception during mounting %s' % (exc,))
-
- LOG.debug('Available shares %s' % str(self._mounted_shares))
-
- def _load_shares_config(self):
- return [share.strip() for share in open(FLAGS.nfs_shares_config)
- if share and not share.startswith('#')]
-
- def _ensure_share_mounted(self, nfs_share):
- """Mount NFS share
- :param nfs_share:
- """
- mount_path = self._get_mount_point_for_share(nfs_share)
- self._mount_nfs(nfs_share, mount_path, ensure=True)
-
- def _find_share(self, volume_size_for):
- """Choose NFS share among available ones for given volume size. Current
- implementation looks for greatest capacity
- :param volume_size_for: int size in Gb
- """
-
- if not self._mounted_shares:
- raise exception.NfsNoSharesMounted()
-
- greatest_size = 0
- greatest_share = None
-
- for nfs_share in self._mounted_shares:
- capacity = self._get_available_capacity(nfs_share)
- if capacity > greatest_size:
- greatest_share = nfs_share
- greatest_size = capacity
-
- if volume_size_for * 1024 * 1024 * 1024 > greatest_size:
- raise exception.NfsNoSuitableShareFound(
- volume_size=volume_size_for)
- return greatest_share
-
- def _get_mount_point_for_share(self, nfs_share):
- """
- :param nfs_share: example 172.18.194.100:/var/nfs
- """
- return os.path.join(FLAGS.nfs_mount_point_base,
- self._get_hash_str(nfs_share))
-
- def _get_available_capacity(self, nfs_share):
- """Calculate available space on the NFS share
- :param nfs_share: example 172.18.194.100:/var/nfs
- """
- mount_point = self._get_mount_point_for_share(nfs_share)
-
- out, _ = self._execute('df', '-P', '-B', '1', mount_point,
- run_as_root=True)
- out = out.splitlines()[1]
-
- available = 0
-
- if FLAGS.nfs_disk_util == 'df':
- available = int(out.split()[3])
- else:
- size = int(out.split()[1])
- out, _ = self._execute('du', '-sb', '--apparent-size',
- '--exclude', '*snapshot*', mount_point,
- run_as_root=True)
- used = int(out.split()[0])
- available = size - used
-
- return available
-
- def _mount_nfs(self, nfs_share, mount_path, ensure=False):
- """Mount NFS share to mount path"""
- if not self._path_exists(mount_path):
- self._execute('mkdir', '-p', mount_path)
-
- try:
- self._execute('mount', '-t', 'nfs', nfs_share, mount_path,
- run_as_root=True)
- except exception.ProcessExecutionError as exc:
- if ensure and 'already mounted' in exc.stderr:
- LOG.warn(_("%s is already mounted"), nfs_share)
- else:
- raise
-
- def _path_exists(self, path):
- """Check given path """
- try:
- self._execute('stat', path, run_as_root=True)
- return True
- except exception.ProcessExecutionError as exc:
- if 'No such file or directory' in exc.stderr:
- return False
- else:
- raise
-
- def _get_hash_str(self, base_str):
- """returns string that represents hash of base_str (in a hex format)"""
- return hashlib.md5(base_str).hexdigest()
diff --git a/nova/volume/san.py b/nova/volume/san.py
deleted file mode 100644
index cf4507f313..0000000000
--- a/nova/volume/san.py
+++ /dev/null
@@ -1,651 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Drivers for san-stored volumes.
-
-The unique thing about a SAN is that we don't expect that we can run the volume
-controller on the SAN hardware. We expect to access it over SSH or some API.
-"""
-
-import base64
-import httplib
-import os
-import paramiko
-import random
-import socket
-import string
-import uuid
-
-from lxml import etree
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import jsonutils
-from nova.openstack.common import log as logging
-from nova import utils
-import nova.volume.driver
-
-
-LOG = logging.getLogger(__name__)
-
-san_opts = [
- cfg.BoolOpt('san_thin_provision',
- default=True,
- help='Use thin provisioning for SAN volumes?'),
- cfg.StrOpt('san_ip',
- default='',
- help='IP address of SAN controller'),
- cfg.StrOpt('san_login',
- default='admin',
- help='Username for SAN controller'),
- cfg.StrOpt('san_password',
- default='',
- help='Password for SAN controller'),
- cfg.StrOpt('san_private_key',
- default='',
- help='Filename of private key to use for SSH authentication'),
- cfg.StrOpt('san_clustername',
- default='',
- help='Cluster name to use for creating volumes'),
- cfg.IntOpt('san_ssh_port',
- default=22,
- help='SSH port to use with SAN'),
- cfg.BoolOpt('san_is_local',
- default=False,
- help='Execute commands locally instead of over SSH; '
- 'use if the volume service is running on the SAN device'),
- cfg.StrOpt('san_zfs_volume_base',
- default='rpool/',
- help='The ZFS path under which to create zvols for volumes.'),
- ]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(san_opts)
-
-
-class SanISCSIDriver(nova.volume.driver.ISCSIDriver):
- """Base class for SAN-style storage volumes
-
- A SAN-style storage value is 'different' because the volume controller
- probably won't run on it, so we need to access is over SSH or another
- remote protocol.
- """
-
- def __init__(self, execute=None, *args, **kwargs):
- if execute is None:
- execute = self._execute
- super(SanISCSIDriver, self).__init__(execute, *args, **kwargs)
- self.run_local = FLAGS.san_is_local
-
- def _build_iscsi_target_name(self, volume):
- return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
-
- def _connect_to_ssh(self):
- ssh = paramiko.SSHClient()
- #TODO(justinsb): We need a better SSH key policy
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- if FLAGS.san_password:
- ssh.connect(FLAGS.san_ip,
- port=FLAGS.san_ssh_port,
- username=FLAGS.san_login,
- password=FLAGS.san_password)
- elif FLAGS.san_private_key:
- privatekeyfile = os.path.expanduser(FLAGS.san_private_key)
- # It sucks that paramiko doesn't support DSA keys
- privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
- ssh.connect(FLAGS.san_ip,
- port=FLAGS.san_ssh_port,
- username=FLAGS.san_login,
- pkey=privatekey)
- else:
- msg = _("Specify san_password or san_private_key")
- raise exception.InvalidInput(reason=msg)
- return ssh
-
- def _execute(self, *cmd, **kwargs):
- if self.run_local:
- return utils.execute(*cmd, **kwargs)
- else:
- check_exit_code = kwargs.pop('check_exit_code', True)
- command = ' '.join(cmd)
- return self._run_ssh(command, check_exit_code)
-
- def _run_ssh(self, command, check_exit_code=True):
- #TODO(justinsb): SSH connection caching (?)
- ssh = self._connect_to_ssh()
-
- #TODO(justinsb): Reintroduce the retry hack
- ret = utils.ssh_execute(ssh, command, check_exit_code=check_exit_code)
-
- ssh.close()
-
- return ret
-
- def ensure_export(self, context, volume):
- """Synchronously recreates an export for a logical volume."""
- pass
-
- def create_export(self, context, volume):
- """Exports the volume."""
- pass
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume."""
- pass
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met."""
- if not self.run_local:
- if not (FLAGS.san_password or FLAGS.san_private_key):
- raise exception.InvalidInput(
- reason=_('Specify san_password or san_private_key'))
-
- # The san_ip must always be set, because we use it for the target
- if not (FLAGS.san_ip):
- raise exception.InvalidInput(reason=_("san_ip must be set"))
-
-
-def _collect_lines(data):
- """Split lines from data into an array, trimming them """
- matches = []
- for line in data.splitlines():
- match = line.strip()
- matches.append(match)
-
- return matches
-
-
-def _get_prefixed_values(data, prefix):
- """Collect lines which start with prefix; with trimming"""
- matches = []
- for line in data.splitlines():
- line = line.strip()
- if line.startswith(prefix):
- match = line[len(prefix):]
- match = match.strip()
- matches.append(match)
-
- return matches
-
-
-class SolarisISCSIDriver(SanISCSIDriver):
- """Executes commands relating to Solaris-hosted ISCSI volumes.
-
- Basic setup for a Solaris iSCSI server:
-
- pkg install storage-server SUNWiscsit
-
- svcadm enable stmf
-
- svcadm enable -r svc:/network/iscsi/target:default
-
- pfexec itadm create-tpg e1000g0 ${MYIP}
-
- pfexec itadm create-target -t e1000g0
-
-
- Then grant the user that will be logging on lots of permissions.
- I'm not sure exactly which though:
-
- zfs allow justinsb create,mount,destroy rpool
-
- usermod -P'File System Management' justinsb
-
- usermod -P'Primary Administrator' justinsb
-
- Also make sure you can login using san_login & san_password/san_private_key
- """
-
- def _execute(self, *cmd, **kwargs):
- new_cmd = ['pfexec']
- new_cmd.extend(cmd)
- return super(SolarisISCSIDriver, self)._execute(*new_cmd,
- **kwargs)
-
- def _view_exists(self, luid):
- (out, _err) = self._execute('/usr/sbin/stmfadm',
- 'list-view', '-l', luid,
- check_exit_code=False)
- if "no views found" in out:
- return False
-
- if "View Entry:" in out:
- return True
-
- msg = _("Cannot parse list-view output: %s") % out
- raise exception.VolumeBackendAPIException(data=msg)
-
- def _get_target_groups(self):
- """Gets list of target groups from host."""
- (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg')
- matches = _get_prefixed_values(out, 'Target group: ')
- LOG.debug("target_groups=%s" % matches)
- return matches
-
- def _target_group_exists(self, target_group_name):
- return target_group_name not in self._get_target_groups()
-
- def _get_target_group_members(self, target_group_name):
- (out, _err) = self._execute('/usr/sbin/stmfadm',
- 'list-tg', '-v', target_group_name)
- matches = _get_prefixed_values(out, 'Member: ')
- LOG.debug("members of %s=%s" % (target_group_name, matches))
- return matches
-
- def _is_target_group_member(self, target_group_name, iscsi_target_name):
- return iscsi_target_name in (
- self._get_target_group_members(target_group_name))
-
- def _get_iscsi_targets(self):
- (out, _err) = self._execute('/usr/sbin/itadm', 'list-target')
- matches = _collect_lines(out)
-
- # Skip header
- if len(matches) != 0:
- assert 'TARGET NAME' in matches[0]
- matches = matches[1:]
-
- targets = []
- for line in matches:
- items = line.split()
- assert len(items) == 3
- targets.append(items[0])
-
- LOG.debug("_get_iscsi_targets=%s" % (targets))
- return targets
-
- def _iscsi_target_exists(self, iscsi_target_name):
- return iscsi_target_name in self._get_iscsi_targets()
-
- def _build_zfs_poolname(self, volume):
- zfs_poolname = '%s%s' % (FLAGS.san_zfs_volume_base, volume['name'])
- return zfs_poolname
-
- def create_volume(self, volume):
- """Creates a volume."""
- if int(volume['size']) == 0:
- sizestr = '100M'
- else:
- sizestr = '%sG' % volume['size']
-
- zfs_poolname = self._build_zfs_poolname(volume)
-
- # Create a zfs volume
- cmd = ['/usr/sbin/zfs', 'create']
- if FLAGS.san_thin_provision:
- cmd.append('-s')
- cmd.extend(['-V', sizestr])
- cmd.append(zfs_poolname)
- self._execute(*cmd)
-
- def _get_luid(self, volume):
- zfs_poolname = self._build_zfs_poolname(volume)
- zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname
-
- (out, _err) = self._execute('/usr/sbin/sbdadm', 'list-lu')
-
- lines = _collect_lines(out)
-
- # Strip headers
- if len(lines) >= 1:
- if lines[0] == '':
- lines = lines[1:]
-
- if len(lines) >= 4:
- assert 'Found' in lines[0]
- assert '' == lines[1]
- assert 'GUID' in lines[2]
- assert '------------------' in lines[3]
-
- lines = lines[4:]
-
- for line in lines:
- items = line.split()
- assert len(items) == 3
- if items[2] == zvol_name:
- luid = items[0].strip()
- return luid
-
- msg = _('LUID not found for %(zfs_poolname)s. '
- 'Output=%(out)s') % locals()
- raise exception.VolumeBackendAPIException(data=msg)
-
- def _is_lu_created(self, volume):
- luid = self._get_luid(volume)
- return luid
-
- def delete_volume(self, volume):
- """Deletes a volume."""
- zfs_poolname = self._build_zfs_poolname(volume)
- self._execute('/usr/sbin/zfs', 'destroy', zfs_poolname)
-
- def local_path(self, volume):
- # TODO(justinsb): Is this needed here?
- escaped_group = FLAGS.volume_group.replace('-', '--')
- escaped_name = volume['name'].replace('-', '--')
- return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
-
- def ensure_export(self, context, volume):
- """Synchronously recreates an export for a logical volume."""
- #TODO(justinsb): On bootup, this is called for every volume.
- # It then runs ~5 SSH commands for each volume,
- # most of which fetch the same info each time
- # This makes initial start stupid-slow
- return self._do_export(volume, force_create=False)
-
- def create_export(self, context, volume):
- return self._do_export(volume, force_create=True)
-
- def _do_export(self, volume, force_create):
- # Create a Logical Unit (LU) backed by the zfs volume
- zfs_poolname = self._build_zfs_poolname(volume)
-
- if force_create or not self._is_lu_created(volume):
- zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname
- self._execute('/usr/sbin/sbdadm', 'create-lu', zvol_name)
-
- luid = self._get_luid(volume)
- iscsi_name = self._build_iscsi_target_name(volume)
- target_group_name = 'tg-%s' % volume['name']
-
- # Create an iSCSI target, mapped to just this volume
- if force_create or not self._target_group_exists(target_group_name):
- self._execute('/usr/sbin/stmfadm', 'create-tg', target_group_name)
-
- # Yes, we add the initiatior before we create it!
- # Otherwise, it complains that the target is already active
- if force_create or not self._is_target_group_member(target_group_name,
- iscsi_name):
- self._execute('/usr/sbin/stmfadm',
- 'add-tg-member', '-g', target_group_name, iscsi_name)
-
- if force_create or not self._iscsi_target_exists(iscsi_name):
- self._execute('/usr/sbin/itadm', 'create-target', '-n', iscsi_name)
-
- if force_create or not self._view_exists(luid):
- self._execute('/usr/sbin/stmfadm',
- 'add-view', '-t', target_group_name, luid)
-
- #TODO(justinsb): Is this always 1? Does it matter?
- iscsi_portal_interface = '1'
- iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface
-
- db_update = {}
- db_update['provider_location'] = ("%s %s" %
- (iscsi_portal,
- iscsi_name))
-
- return db_update
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume."""
-
- # This is the reverse of _do_export
- luid = self._get_luid(volume)
- iscsi_name = self._build_iscsi_target_name(volume)
- target_group_name = 'tg-%s' % volume['name']
-
- if self._view_exists(luid):
- self._execute('/usr/sbin/stmfadm', 'remove-view', '-l', luid, '-a')
-
- if self._iscsi_target_exists(iscsi_name):
- self._execute('/usr/sbin/stmfadm', 'offline-target', iscsi_name)
- self._execute('/usr/sbin/itadm', 'delete-target', iscsi_name)
-
- # We don't delete the tg-member; we delete the whole tg!
-
- if self._target_group_exists(target_group_name):
- self._execute('/usr/sbin/stmfadm', 'delete-tg', target_group_name)
-
- if self._is_lu_created(volume):
- self._execute('/usr/sbin/sbdadm', 'delete-lu', luid)
-
-
-class HpSanISCSIDriver(SanISCSIDriver):
- """Executes commands relating to HP/Lefthand SAN ISCSI volumes.
-
- We use the CLIQ interface, over SSH.
-
- Rough overview of CLIQ commands used:
-
- :createVolume: (creates the volume)
-
- :getVolumeInfo: (to discover the IQN etc)
-
- :getClusterInfo: (to discover the iSCSI target IP address)
-
- :assignVolumeChap: (exports it with CHAP security)
-
- The 'trick' here is that the HP SAN enforces security by default, so
- normally a volume mount would need both to configure the SAN in the volume
- layer and do the mount on the compute layer. Multi-layer operations are
- not catered for at the moment in the nova architecture, so instead we
- share the volume using CHAP at volume creation time. Then the mount need
- only use those CHAP credentials, so can take place exclusively in the
- compute layer.
- """
-
- def _cliq_run(self, verb, cliq_args):
- """Runs a CLIQ command over SSH, without doing any result parsing"""
- cliq_arg_strings = []
- for k, v in cliq_args.items():
- cliq_arg_strings.append(" %s=%s" % (k, v))
- cmd = verb + ''.join(cliq_arg_strings)
-
- return self._run_ssh(cmd)
-
- def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True):
- """Runs a CLIQ command over SSH, parsing and checking the output"""
- cliq_args['output'] = 'XML'
- (out, _err) = self._cliq_run(verb, cliq_args)
-
- LOG.debug(_("CLIQ command returned %s"), out)
-
- result_xml = etree.fromstring(out)
- if check_cliq_result:
- response_node = result_xml.find("response")
- if response_node is None:
- msg = (_("Malformed response to CLIQ command "
- "%(verb)s %(cliq_args)s. Result=%(out)s") %
- locals())
- raise exception.VolumeBackendAPIException(data=msg)
-
- result_code = response_node.attrib.get("result")
-
- if result_code != "0":
- msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. "
- " Result=%(out)s") %
- locals())
- raise exception.VolumeBackendAPIException(data=msg)
-
- return result_xml
-
- def _cliq_get_cluster_info(self, cluster_name):
- """Queries for info about the cluster (including IP)"""
- cliq_args = {}
- cliq_args['clusterName'] = cluster_name
- cliq_args['searchDepth'] = '1'
- cliq_args['verbose'] = '0'
-
- result_xml = self._cliq_run_xml("getClusterInfo", cliq_args)
-
- return result_xml
-
- def _cliq_get_cluster_vip(self, cluster_name):
- """Gets the IP on which a cluster shares iSCSI volumes"""
- cluster_xml = self._cliq_get_cluster_info(cluster_name)
-
- vips = []
- for vip in cluster_xml.findall("response/cluster/vip"):
- vips.append(vip.attrib.get('ipAddress'))
-
- if len(vips) == 1:
- return vips[0]
-
- _xml = etree.tostring(cluster_xml)
- msg = (_("Unexpected number of virtual ips for cluster "
- " %(cluster_name)s. Result=%(_xml)s") %
- locals())
- raise exception.VolumeBackendAPIException(data=msg)
-
- def _cliq_get_volume_info(self, volume_name):
- """Gets the volume info, including IQN"""
- cliq_args = {}
- cliq_args['volumeName'] = volume_name
- result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args)
-
- # Result looks like this:
- #<gauche version="1.0">
- # <response description="Operation succeeded." name="CliqSuccess"
- # processingTime="87" result="0">
- # <volume autogrowPages="4" availability="online" blockSize="1024"
- # bytesWritten="0" checkSum="false" clusterName="Cluster01"
- # created="2011-02-08T19:56:53Z" deleting="false" description=""
- # groupName="Group01" initialQuota="536870912" isPrimary="true"
- # iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b"
- # maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
- # minReplication="1" name="vol-b" parity="0" replication="2"
- # reserveQuota="536870912" scratchQuota="4194304"
- # serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316"
- # size="1073741824" stridePages="32" thinProvision="true">
- # <status description="OK" value="2"/>
- # <permission access="rw"
- # authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
- # chapName="chapusername" chapRequired="true" id="25369"
- # initiatorSecret="" iqn="" iscsiEnabled="true"
- # loadBalance="true" targetSecret="supersecret"/>
- # </volume>
- # </response>
- #</gauche>
-
- # Flatten the nodes into a dictionary; use prefixes to avoid collisions
- volume_attributes = {}
-
- volume_node = result_xml.find("response/volume")
- for k, v in volume_node.attrib.items():
- volume_attributes["volume." + k] = v
-
- status_node = volume_node.find("status")
- if not status_node is None:
- for k, v in status_node.attrib.items():
- volume_attributes["status." + k] = v
-
- # We only consider the first permission node
- permission_node = volume_node.find("permission")
- if not permission_node is None:
- for k, v in status_node.attrib.items():
- volume_attributes["permission." + k] = v
-
- LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") %
- locals())
- return volume_attributes
-
- def create_volume(self, volume):
- """Creates a volume."""
- cliq_args = {}
- cliq_args['clusterName'] = FLAGS.san_clustername
- #TODO(justinsb): Should we default to inheriting thinProvision?
- cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0'
- cliq_args['volumeName'] = volume['name']
- if int(volume['size']) == 0:
- cliq_args['size'] = '100MB'
- else:
- cliq_args['size'] = '%sGB' % volume['size']
-
- self._cliq_run_xml("createVolume", cliq_args)
-
- volume_info = self._cliq_get_volume_info(volume['name'])
- cluster_name = volume_info['volume.clusterName']
- iscsi_iqn = volume_info['volume.iscsiIqn']
-
- #TODO(justinsb): Is this always 1? Does it matter?
- cluster_interface = '1'
-
- cluster_vip = self._cliq_get_cluster_vip(cluster_name)
- iscsi_portal = cluster_vip + ":3260," + cluster_interface
-
- model_update = {}
-
- # NOTE(jdg): LH volumes always at lun 0 ?
- model_update['provider_location'] = ("%s %s %s" %
- (iscsi_portal,
- iscsi_iqn,
- 0))
-
- return model_update
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
- raise NotImplementedError()
-
- def create_snapshot(self, snapshot):
- """Creates a snapshot."""
- raise NotImplementedError()
-
- def delete_volume(self, volume):
- """Deletes a volume."""
- cliq_args = {}
- cliq_args['volumeName'] = volume['name']
- cliq_args['prompt'] = 'false' # Don't confirm
-
- self._cliq_run_xml("deleteVolume", cliq_args)
-
- def local_path(self, volume):
- # TODO(justinsb): Is this needed here?
- msg = _("local_path not supported")
- raise exception.VolumeBackendAPIException(data=msg)
-
- def initialize_connection(self, volume, connector):
- """Assigns the volume to a server.
-
- Assign any created volume to a compute node/host so that it can be
- used from that host. HP VSA requires a volume to be assigned
- to a server.
-
- This driver returns a driver_volume_type of 'iscsi'.
- The format of the driver data is defined in _get_iscsi_properties.
- Example return value::
-
- {
- 'driver_volume_type': 'iscsi'
- 'data': {
- 'target_discovered': True,
- 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
- 'target_portal': '127.0.0.1:3260',
- 'volume_id': 1,
- }
- }
-
- """
- cliq_args = {}
- cliq_args['volumeName'] = volume['name']
- cliq_args['serverName'] = connector['host']
- self._cliq_run_xml("assignVolumeToServer", cliq_args)
-
- iscsi_properties = self._get_iscsi_properties(volume)
- return {
- 'driver_volume_type': 'iscsi',
- 'data': iscsi_properties
- }
-
- def terminate_connection(self, volume, connector):
- """Unassign the volume from the host."""
- cliq_args = {}
- cliq_args['volumeName'] = volume['name']
- cliq_args['serverName'] = connector['host']
- self._cliq_run_xml("unassignVolumeToServer", cliq_args)
diff --git a/nova/volume/solidfire.py b/nova/volume/solidfire.py
deleted file mode 100644
index 8c4ca66845..0000000000
--- a/nova/volume/solidfire.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Drivers for san-stored volumes.
-
-The unique thing about a SAN is that we don't expect that we can run the volume
-controller on the SAN hardware. We expect to access it over SSH or some API.
-"""
-
-import base64
-import httplib
-import json
-import random
-import socket
-import string
-import uuid
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova.volume.san import SanISCSIDriver
-
-
-LOG = logging.getLogger(__name__)
-
-sf_opts = [
- cfg.BoolOpt('sf_emulate_512',
- default=True,
- help='Set 512 byte emulation on volume creation; '),
-
- cfg.StrOpt('sf_mvip',
- default='',
- help='IP address of SolidFire MVIP'),
-
- cfg.StrOpt('sf_login',
- default='admin',
- help='Username for SF Cluster Admin'),
-
- cfg.StrOpt('sf_password',
- default='',
- help='Password for SF Cluster Admin'),
-
- cfg.BoolOpt('sf_allow_tenant_qos',
- default=True,
- help='Allow tenants to specify QOS on create'), ]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(sf_opts)
-
-
-class SolidFire(SanISCSIDriver):
-
- sf_qos_dict = {'slow': {'minIOPS': 100,
- 'maxIOPS': 200,
- 'burstIOPS': 200},
- 'medium': {'minIOPS': 200,
- 'maxIOPS': 400,
- 'burstIOPS': 400},
- 'fast': {'minIOPS': 500,
- 'maxIOPS': 1000,
- 'burstIOPS': 1000},
- 'performant': {'minIOPS': 2000,
- 'maxIOPS': 4000,
- 'burstIOPS': 4000},
- 'off': None}
-
- def __init__(self, *args, **kwargs):
- super(SolidFire, self).__init__(*args, **kwargs)
-
- def _issue_api_request(self, method_name, params):
- """All API requests to SolidFire device go through this method
-
- Simple json-rpc web based API calls.
- each call takes a set of paramaters (dict)
- and returns results in a dict as well.
- """
-
- host = FLAGS.san_ip
- # For now 443 is the only port our server accepts requests on
- port = 443
-
- # NOTE(john-griffith): Probably don't need this, but the idea is
- # we provide a request_id so we can correlate
- # responses with requests
- request_id = int(uuid.uuid4()) # just generate a random number
-
- cluster_admin = FLAGS.san_login
- cluster_password = FLAGS.san_password
-
- command = {'method': method_name,
- 'id': request_id}
-
- if params is not None:
- command['params'] = params
-
- payload = json.dumps(command, ensure_ascii=False)
- payload.encode('utf-8')
- # we use json-rpc, webserver needs to see json-rpc in header
- header = {'Content-Type': 'application/json-rpc; charset=utf-8'}
-
- if cluster_password is not None:
- # base64.encodestring includes a newline character
- # in the result, make sure we strip it off
- auth_key = base64.encodestring('%s:%s' % (cluster_admin,
- cluster_password))[:-1]
- header['Authorization'] = 'Basic %s' % auth_key
-
- LOG.debug(_("Payload for SolidFire API call: %s"), payload)
- connection = httplib.HTTPSConnection(host, port)
- connection.request('POST', '/json-rpc/1.0', payload, header)
- response = connection.getresponse()
- data = {}
-
- if response.status != 200:
- connection.close()
- raise exception.SolidFireAPIException(status=response.status)
-
- else:
- data = response.read()
- try:
- data = json.loads(data)
-
- except (TypeError, ValueError), exc:
- connection.close()
- msg = _("Call to json.loads() raised an exception: %s") % exc
- raise exception.SfJsonEncodeFailure(msg)
-
- connection.close()
-
- LOG.debug(_("Results of SolidFire API call: %s"), data)
- return data
-
- def _get_volumes_by_sfaccount(self, account_id):
- params = {'accountID': account_id}
- data = self._issue_api_request('ListVolumesForAccount', params)
- if 'result' in data:
- return data['result']['volumes']
-
- def _get_sfaccount_by_name(self, sf_account_name):
- sfaccount = None
- params = {'username': sf_account_name}
- data = self._issue_api_request('GetAccountByName', params)
- if 'result' in data and 'account' in data['result']:
- LOG.debug(_('Found solidfire account: %s'), sf_account_name)
- sfaccount = data['result']['account']
- return sfaccount
-
- def _create_sfaccount(self, nova_project_id):
- """Create account on SolidFire device if it doesn't already exist.
-
- We're first going to check if the account already exits, if it does
- just return it. If not, then create it.
- """
-
- sf_account_name = socket.getfqdn() + '-' + nova_project_id
- sfaccount = self._get_sfaccount_by_name(sf_account_name)
- if sfaccount is None:
- LOG.debug(_('solidfire account: %s does not exist, create it...'),
- sf_account_name)
- chap_secret = self._generate_random_string(12)
- params = {'username': sf_account_name,
- 'initiatorSecret': chap_secret,
- 'targetSecret': chap_secret,
- 'attributes': {}}
- data = self._issue_api_request('AddAccount', params)
- if 'result' in data:
- sfaccount = self._get_sfaccount_by_name(sf_account_name)
-
- return sfaccount
-
- def _get_cluster_info(self):
- params = {}
- data = self._issue_api_request('GetClusterInfo', params)
- if 'result' not in data:
- raise exception.SolidFireAPIDataException(data=data)
-
- return data['result']
-
- def _do_export(self, volume):
- """Gets the associated account, retrieves CHAP info and updates."""
-
- sfaccount_name = '%s-%s' % (socket.getfqdn(), volume['project_id'])
- sfaccount = self._get_sfaccount_by_name(sfaccount_name)
-
- model_update = {}
- model_update['provider_auth'] = ('CHAP %s %s'
- % (sfaccount['username'],
- sfaccount['targetSecret']))
-
- return model_update
-
- def _generate_random_string(self, length):
- """Generates random_string to use for CHAP password."""
-
- char_set = string.ascii_uppercase + string.digits
- return ''.join(random.sample(char_set, length))
-
- def _do_volume_create(self, project_id, params):
- cluster_info = self._get_cluster_info()
- iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260'
- sfaccount = self._create_sfaccount(project_id)
- chap_secret = sfaccount['targetSecret']
-
- params['accountID'] = sfaccount['accountID']
- data = self._issue_api_request('CreateVolume', params)
-
- if 'result' not in data or 'volumeID' not in data['result']:
- raise exception.SolidFireAPIDataException(data=data)
-
- volume_id = data['result']['volumeID']
-
- volume_list = self._get_volumes_by_sfaccount(sfaccount['accountID'])
-
- iqn = None
- for v in volume_list:
- if v['volumeID'] == volume_id:
- iqn = v['iqn']
- break
-
- model_update = {}
-
- # NOTE(john-griffith): SF volumes are always at lun 0
- model_update['provider_location'] = ('%s %s %s'
- % (iscsi_portal, iqn, 0))
- model_update['provider_auth'] = ('CHAP %s %s'
- % (sfaccount['username'],
- chap_secret))
-
- return model_update
-
- def create_volume(self, volume):
- """Create volume on SolidFire device.
-
- The account is where CHAP settings are derived from, volume is
- created and exported. Note that the new volume is immediately ready
- for use.
-
- One caveat here is that an existing user account must be specified
- in the API call to create a new volume. We use a set algorithm to
- determine account info based on passed in nova volume object. First
- we check to see if the account already exists (and use it), or if it
- does not already exist, we'll go ahead and create it.
-
- For now, we're just using very basic settings, QOS is
- turned off, 512 byte emulation is off etc. Will be
- looking at extensions for these things later, or
- this module can be hacked to suit needs.
- """
- GB = 1048576 * 1024
- slice_count = 1
- attributes = {}
- qos = {}
- qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
- valid_presets = self.sf_qos_dict.keys()
-
- if FLAGS.sf_allow_tenant_qos and \
- volume.get('volume_metadata')is not None:
-
- #First look to see if they included a preset
- presets = [i.value for i in volume.get('volume_metadata')
- if i.key == 'sf-qos' and i.value in valid_presets]
- if len(presets) > 0:
- if len(presets) > 1:
- LOG.warning(_('More than one valid preset was '
- 'detected, using %s') % presets[0])
- qos = self.sf_qos_dict[presets[0]]
- else:
- #if there was no preset, look for explicit settings
- for i in volume.get('volume_metadata'):
- if i.key in qos_keys:
- qos[i.key] = int(i.value)
-
- params = {'name': 'OS-VOLID-%s' % volume['id'],
- 'accountID': None,
- 'sliceCount': slice_count,
- 'totalSize': volume['size'] * GB,
- 'enable512e': FLAGS.sf_emulate_512,
- 'attributes': attributes,
- 'qos': qos}
-
- return self._do_volume_create(volume['project_id'], params)
-
- def delete_volume(self, volume, is_snapshot=False):
- """Delete SolidFire Volume from device.
-
- SolidFire allows multipe volumes with same name,
- volumeID is what's guaranteed unique.
-
- """
-
- LOG.debug(_("Enter SolidFire delete_volume..."))
- sf_account_name = socket.getfqdn() + '-' + volume['project_id']
- sfaccount = self._get_sfaccount_by_name(sf_account_name)
- if sfaccount is None:
- raise exception.SfAccountNotFound(account_name=sf_account_name)
-
- params = {'accountID': sfaccount['accountID']}
- data = self._issue_api_request('ListVolumesForAccount', params)
- if 'result' not in data:
- raise exception.SolidFireAPIDataException(data=data)
-
- if is_snapshot:
- seek = 'OS-SNAPID-%s' % (volume['id'])
- else:
- seek = 'OS-VOLID-%s' % volume['id']
- #params = {'name': 'OS-VOLID-:%s' % volume['id'],
-
- found_count = 0
- volid = -1
- for v in data['result']['volumes']:
- if v['name'] == seek:
- found_count += 1
- volid = v['volumeID']
-
- if found_count == 0:
- raise exception.VolumeNotFound(volume_id=volume['id'])
-
- if found_count > 1:
- LOG.debug(_("Deleting volumeID: %s"), volid)
- raise exception.DuplicateSfVolumeNames(vol_name=volume['id'])
-
- params = {'volumeID': volid}
- data = self._issue_api_request('DeleteVolume', params)
- if 'result' not in data:
- raise exception.SolidFireAPIDataException(data=data)
-
- LOG.debug(_("Leaving SolidFire delete_volume"))
-
- def ensure_export(self, context, volume):
- LOG.debug(_("Executing SolidFire ensure_export..."))
- return self._do_export(volume)
-
- def create_export(self, context, volume):
- LOG.debug(_("Executing SolidFire create_export..."))
- return self._do_export(volume)
-
- def _do_create_snapshot(self, snapshot, snapshot_name):
- """Creates a snapshot."""
- LOG.debug(_("Enter SolidFire create_snapshot..."))
- sf_account_name = socket.getfqdn() + '-' + snapshot['project_id']
- sfaccount = self._get_sfaccount_by_name(sf_account_name)
- if sfaccount is None:
- raise exception.SfAccountNotFound(account_name=sf_account_name)
-
- params = {'accountID': sfaccount['accountID']}
- data = self._issue_api_request('ListVolumesForAccount', params)
- if 'result' not in data:
- raise exception.SolidFireAPIDataException(data=data)
-
- found_count = 0
- volid = -1
- for v in data['result']['volumes']:
- if v['name'] == 'OS-VOLID-%s' % snapshot['volume_id']:
- found_count += 1
- volid = v['volumeID']
-
- if found_count == 0:
- raise exception.VolumeNotFound(volume_id=snapshot['volume_id'])
- if found_count != 1:
- raise exception.DuplicateSfVolumeNames(
- vol_name='OS-VOLID-%s' % snapshot['volume_id'])
-
- params = {'volumeID': int(volid),
- 'name': snapshot_name,
- 'attributes': {'OriginatingVolume': volid}}
-
- data = self._issue_api_request('CloneVolume', params)
- if 'result' not in data:
- raise exception.SolidFireAPIDataException(data=data)
-
- return (data, sfaccount)
-
- def delete_snapshot(self, snapshot):
- self.delete_volume(snapshot, True)
-
- def create_snapshot(self, snapshot):
- snapshot_name = 'OS-SNAPID-%s' % (
- snapshot['id'])
- (data, sf_account) = self._do_create_snapshot(snapshot, snapshot_name)
-
- def create_volume_from_snapshot(self, volume, snapshot):
- cluster_info = self._get_cluster_info()
- iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260'
- sfaccount = self._create_sfaccount(snapshot['project_id'])
- chap_secret = sfaccount['targetSecret']
- snapshot_name = 'OS-VOLID-%s' % volume['id']
-
- (data, sf_account) = self._do_create_snapshot(snapshot, snapshot_name)
-
- if 'result' not in data or 'volumeID' not in data['result']:
- raise exception.SolidFireAPIDataException(data=data)
-
- volume_id = data['result']['volumeID']
- volume_list = self._get_volumes_by_sfaccount(sf_account['accountID'])
- iqn = None
- for v in volume_list:
- if v['volumeID'] == volume_id:
- iqn = v['iqn']
- break
-
- model_update = {}
-
- # NOTE(john-griffith): SF volumes are always at lun 0
- model_update['provider_location'] = ('%s %s %s'
- % (iscsi_portal, iqn, 0))
- model_update['provider_auth'] = ('CHAP %s %s'
- % (sfaccount['username'],
- chap_secret))
- return model_update
diff --git a/nova/volume/storwize_svc.py b/nova/volume/storwize_svc.py
deleted file mode 100644
index 52a7350184..0000000000
--- a/nova/volume/storwize_svc.py
+++ /dev/null
@@ -1,1233 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 IBM, Inc.
-# Copyright (c) 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# Authors:
-# Ronen Kat <ronenkat@il.ibm.com>
-# Avishay Traeger <avishay@il.ibm.com>
-
-"""
-Volume driver for IBM Storwize V7000 and SVC storage systems.
-
-Notes:
-1. If you specify both a password and a key file, this driver will use the
- key file only.
-2. When using a key file for authentication, it is up to the user or
- system administrator to store the private key in a safe manner.
-3. The defaults for creating volumes are "-rsize 2% -autoexpand
- -grainsize 256 -warning 0". These can be changed in the configuration
- file or by using volume types(recommended only for advanced users).
-
-Limitations:
-1. The driver was not tested with SVC or clustered configurations of Storwize
- V7000.
-2. The driver expects CLI output in English, error messages may be in a
- localized format.
-"""
-
-import random
-import re
-import string
-import time
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import excutils
-from nova.openstack.common import log as logging
-from nova.volume import san
-
-LOG = logging.getLogger(__name__)
-
-storwize_svc_opts = [
- cfg.StrOpt('storwize_svc_volpool_name',
- default='volpool',
- help='Storage system storage pool for volumes'),
- cfg.StrOpt('storwize_svc_vol_rsize',
- default='2%',
- help='Storage system space-efficiency parameter for volumes'),
- cfg.StrOpt('storwize_svc_vol_warning',
- default='0',
- help='Storage system threshold for volume capacity warnings'),
- cfg.BoolOpt('storwize_svc_vol_autoexpand',
- default=True,
- help='Storage system autoexpand parameter for volumes '
- '(True/False)'),
- cfg.StrOpt('storwize_svc_vol_grainsize',
- default='256',
- help='Storage system grain size parameter for volumes '
- '(32/64/128/256)'),
- cfg.BoolOpt('storwize_svc_vol_compression',
- default=False,
- help='Storage system compression option for volumes'),
- cfg.BoolOpt('storwize_svc_vol_easytier',
- default=True,
- help='Enable Easy Tier for volumes'),
- cfg.StrOpt('storwize_svc_flashcopy_timeout',
- default='120',
- help='Maximum number of seconds to wait for FlashCopy to be '
- 'prepared. Maximum value is 600 seconds (10 minutes).'),
-]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(storwize_svc_opts)
-
-
-class StorwizeSVCDriver(san.SanISCSIDriver):
- """IBM Storwize V7000 and SVC iSCSI volume driver."""
-
- def __init__(self, *args, **kwargs):
- super(StorwizeSVCDriver, self).__init__(*args, **kwargs)
- self.iscsi_ipv4_conf = None
- self.iscsi_ipv6_conf = None
-
- # Build cleanup transaltion tables for hosts names to follow valid
- # host names for Storwizew V7000 and SVC storage systems.
- invalid_ch_in_host = ''
- for num in range(0, 128):
- ch = chr(num)
- if ((not ch.isalnum()) and (ch != ' ') and (ch != '.')
- and (ch != '-') and (ch != '_')):
- invalid_ch_in_host = invalid_ch_in_host + ch
- self._string_host_name_filter = string.maketrans(invalid_ch_in_host,
- '-' * len(invalid_ch_in_host))
-
- self._unicode_host_name_filter = dict((ord(unicode(char)), u'-')
- for char in invalid_ch_in_host)
-
- def _get_hdr_dic(self, header, row, delim):
- """Return CLI row data as a dictionary indexed by names from header.
-
- Create a dictionary object from the data row string using the header
- string. The strings are converted to columns using the delimiter in
- delim.
- """
-
- attributes = header.split(delim)
- values = row.split(delim)
- self._driver_assert(len(values) == len(attributes),
- _('_get_hdr_dic: attribute headers and values do not match.\n '
- 'Headers: %(header)s\n Values: %(row)s')
- % {'header': str(header),
- 'row': str(row)})
- dic = {}
- for attribute, value in map(None, attributes, values):
- dic[attribute] = value
- return dic
-
- def _driver_assert(self, assert_condition, exception_message):
- """Internal assertion mechanism for CLI output."""
- if not assert_condition:
- LOG.error(exception_message)
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- def check_for_setup_error(self):
- """Check that we have all configuration details from the storage."""
-
- LOG.debug(_('enter: check_for_setup_error'))
-
- # Validate that the pool exists
- ssh_cmd = 'lsmdiskgrp -delim ! -nohdr'
- out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out) > 0,
- _('check_for_setup_error: failed with unexpected CLI output.\n '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
- search_text = '!%s!' % FLAGS.storwize_svc_volpool_name
- if search_text not in out:
- raise exception.InvalidInput(
- reason=(_('pool %s doesn\'t exist')
- % FLAGS.storwize_svc_volpool_name))
-
- storage_nodes = {}
- # Get the iSCSI names of the Storwize/SVC nodes
- ssh_cmd = 'svcinfo lsnode -delim !'
- out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out) > 0,
- _('check_for_setup_error: failed with unexpected CLI output.\n '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
-
- nodes = out.strip().split('\n')
- self._driver_assert(len(nodes) > 0,
- _('check_for_setup_error: failed with unexpected CLI output.\n '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
- header = nodes.pop(0)
- for node_line in nodes:
- try:
- node_data = self._get_hdr_dic(header, node_line, '!')
- except exception.VolumeBackendAPIException as e:
- with excutils.save_and_reraise_exception():
- LOG.error(_('check_for_setup_error: '
- 'failed with unexpected CLI output.\n '
- 'Command: %(cmd)s\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
- node = {}
- try:
- node['id'] = node_data['id']
- node['name'] = node_data['name']
- node['iscsi_name'] = node_data['iscsi_name']
- node['status'] = node_data['status']
- node['ipv4'] = []
- node['ipv6'] = []
- if node['iscsi_name'] != '':
- storage_nodes[node['id']] = node
- except KeyError as e:
- LOG.error(_('Did not find expected column name in '
- 'svcinfo lsnode: %s') % str(e))
- exception_message = (
- _('check_for_setup_error: Unexpected CLI output.\n '
- 'Details: %(msg)s\n'
- 'Command: %(cmd)s\n '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'msg': str(e),
- 'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
- raise exception.VolumeBackendAPIException(
- data=exception_message)
-
- # Get the iSCSI IP addresses of the Storwize/SVC nodes
- ssh_cmd = 'lsportip -delim !'
- out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out) > 0,
- _('check_for_setup_error: failed with unexpected CLI output.\n '
- 'Command: %(cmd)s\n '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
-
- portips = out.strip().split('\n')
- self._driver_assert(len(portips) > 0,
- _('check_for_setup_error: failed with unexpected CLI output.\n '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
- header = portips.pop(0)
- for portip_line in portips:
- try:
- port_data = self._get_hdr_dic(header, portip_line, '!')
- except exception.VolumeBackendAPIException as e:
- with excutils.save_and_reraise_exception():
- LOG.error(_('check_for_setup_error: '
- 'failed with unexpected CLI output.\n '
- 'Command: %(cmd)s\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
- try:
- port_node_id = port_data['node_id']
- port_ipv4 = port_data['IP_address']
- port_ipv6 = port_data['IP_address_6']
- except KeyError as e:
- LOG.error(_('Did not find expected column name in '
- 'lsportip: %s') % str(e))
- exception_message = (
- _('check_for_setup_error: Unexpected CLI output.\n '
- 'Details: %(msg)s\n'
- 'Command: %(cmd)s\n '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'msg': str(e),
- 'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
- raise exception.VolumeBackendAPIException(
- data=exception_message)
-
- if port_node_id in storage_nodes:
- node = storage_nodes[port_node_id]
- if len(port_ipv4) > 0:
- node['ipv4'].append(port_ipv4)
- if len(port_ipv6) > 0:
- node['ipv6'].append(port_ipv6)
- else:
- raise exception.VolumeBackendAPIException(
- data=_('check_for_setup_error: '
- 'fail to storage configuration: unknown '
- 'storage node %(node_id)s from CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'node_id': port_node_id,
- 'out': str(out),
- 'err': str(err)})
-
- iscsi_ipv4_conf = []
- iscsi_ipv6_conf = []
- for node_key in storage_nodes:
- node = storage_nodes[node_key]
- if 'ipv4' in node and len(node['iscsi_name']) > 0:
- iscsi_ipv4_conf.append({'iscsi_name': node['iscsi_name'],
- 'ip': node['ipv4'],
- 'node_id': node['id']})
- if 'ipv6' in node and len(node['iscsi_name']) > 0:
- iscsi_ipv6_conf.append({'iscsi_name': node['iscsi_name'],
- 'ip': node['ipv6'],
- 'node_id': node['id']})
- if (len(node['ipv4']) == 0) and (len(node['ipv6']) == 0):
- raise exception.VolumeBackendAPIException(
- data=_('check_for_setup_error: '
- 'fail to storage configuration: storage '
- 'node %s has no IP addresses configured')
- % node['id'])
-
- # Make sure we have at least one IPv4 address with a iSCSI name
- # TODO(ronenkat) need to expand this to support IPv6
- self._driver_assert(len(iscsi_ipv4_conf) > 0,
- _('could not obtain IP address and iSCSI name from the storage. '
- 'Please verify that the storage is configured for iSCSI.\n '
- 'Storage nodes: %(nodes)s\n portips: %(portips)s')
- % {'nodes': nodes, 'portips': portips})
-
- self.iscsi_ipv4_conf = iscsi_ipv4_conf
- self.iscsi_ipv6_conf = iscsi_ipv6_conf
-
- LOG.debug(_('leave: check_for_setup_error'))
-
- def _check_num_perc(self, value):
- """Return True if value is either a number or a percentage."""
- if value.endswith('%'):
- value = value[0:-1]
- return value.isdigit()
-
- def _check_flags(self):
- """Ensure that the flags are set properly."""
-
- required_flags = ['san_ip', 'san_ssh_port', 'san_login',
- 'storwize_svc_volpool_name']
- for flag in required_flags:
- if not getattr(FLAGS, flag, None):
- raise exception.InvalidInput(
- reason=_('%s is not set') % flag)
-
- # Ensure that either password or keyfile were set
- if not (FLAGS.san_password or FLAGS.san_private_key):
- raise exception.InvalidInput(
- reason=_('Password or SSH private key is required for '
- 'authentication: set either san_password or '
- 'san_private_key option'))
-
- # Check that rsize is a number or percentage
- rsize = FLAGS.storwize_svc_vol_rsize
- if not self._check_num_perc(rsize) and (rsize != '-1'):
- raise exception.InvalidInput(
- reason=_('Illegal value specified for storwize_svc_vol_rsize: '
- 'set to either a number or a percentage'))
-
- # Check that warning is a number or percentage
- warning = FLAGS.storwize_svc_vol_warning
- if not self._check_num_perc(warning):
- raise exception.InvalidInput(
- reason=_('Illegal value specified for '
- 'storwize_svc_vol_warning: '
- 'set to either a number or a percentage'))
-
- # Check that grainsize is 32/64/128/256
- grainsize = FLAGS.storwize_svc_vol_grainsize
- if grainsize not in ['32', '64', '128', '256']:
- raise exception.InvalidInput(
- reason=_('Illegal value specified for '
- 'storwize_svc_vol_grainsize: set to either '
- '\'32\', \'64\', \'128\', or \'256\''))
-
- # Check that flashcopy_timeout is numeric and 32/64/128/256
- flashcopy_timeout = FLAGS.storwize_svc_flashcopy_timeout
- if not (flashcopy_timeout.isdigit() and int(flashcopy_timeout) > 0 and
- int(flashcopy_timeout) <= 600):
- raise exception.InvalidInput(
- reason=_('Illegal value %s specified for '
- 'storwize_svc_flashcopy_timeout: '
- 'valid values are between 0 and 600')
- % flashcopy_timeout)
-
- # Check that rsize is set
- volume_compression = FLAGS.storwize_svc_vol_compression
- if ((volume_compression == True) and
- (FLAGS.storwize_svc_vol_rsize == '-1')):
- raise exception.InvalidInput(
- reason=_('If compression is set to True, rsize must '
- 'also be set (not equal to -1)'))
-
- def do_setup(self, context):
- """Validate the flags."""
- LOG.debug(_('enter: do_setup'))
- self._check_flags()
- LOG.debug(_('leave: do_setup'))
-
- def create_volume(self, volume):
- """Create a new volume - uses the internal method."""
- return self._create_volume(volume, units='gb')
-
- def _create_volume(self, volume, units='gb'):
- """Create a new volume."""
-
- name = volume['name']
- model_update = None
-
- LOG.debug(_('enter: create_volume: volume %s ') % name)
-
- size = int(volume['size'])
-
- if FLAGS.storwize_svc_vol_autoexpand == True:
- autoex = '-autoexpand'
- else:
- autoex = ''
-
- if FLAGS.storwize_svc_vol_easytier == True:
- easytier = '-easytier on'
- else:
- easytier = '-easytier off'
-
- # Set space-efficient options
- if FLAGS.storwize_svc_vol_rsize.strip() == '-1':
- ssh_cmd_se_opt = ''
- else:
- ssh_cmd_se_opt = ('-rsize %(rsize)s %(autoex)s -warning %(warn)s' %
- {'rsize': FLAGS.storwize_svc_vol_rsize,
- 'autoex': autoex,
- 'warn': FLAGS.storwize_svc_vol_warning})
- if FLAGS.storwize_svc_vol_compression:
- ssh_cmd_se_opt = ssh_cmd_se_opt + ' -compressed'
- else:
- ssh_cmd_se_opt = ssh_cmd_se_opt + (' -grainsize %(grain)s' %
- {'grain': FLAGS.storwize_svc_vol_grainsize})
-
- ssh_cmd = ('mkvdisk -name %(name)s -mdiskgrp %(mdiskgrp)s '
- '-iogrp 0 -size %(size)s -unit '
- '%(unit)s %(easytier)s %(ssh_cmd_se_opt)s'
- % {'name': name,
- 'mdiskgrp': FLAGS.storwize_svc_volpool_name,
- 'size': size, 'unit': units, 'easytier': easytier,
- 'ssh_cmd_se_opt': ssh_cmd_se_opt})
- out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out.strip()) > 0,
- _('create volume %(name)s - did not find '
- 'success message in CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'name': name, 'out': str(out), 'err': str(err)})
-
- # Ensure that the output is as expected
- match_obj = re.search('Virtual Disk, id \[([0-9]+)\], '
- 'successfully created', out)
- # Make sure we got a "successfully created" message with vdisk id
- self._driver_assert(match_obj is not None,
- _('create volume %(name)s - did not find '
- 'success message in CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'name': name, 'out': str(out), 'err': str(err)})
-
- LOG.debug(_('leave: create_volume: volume %(name)s ') % {'name': name})
-
- def delete_volume(self, volume):
- self._delete_volume(volume, False)
-
- def _delete_volume(self, volume, force_opt):
- """Driver entry point for destroying existing volumes."""
-
- name = volume['name']
- LOG.debug(_('enter: delete_volume: volume %(name)s ') % {'name': name})
-
- if force_opt:
- force_flag = '-force'
- else:
- force_flag = ''
-
- volume_defined = self._is_volume_defined(name)
- # Try to delete volume only if found on the storage
- if volume_defined:
- out, err = self._run_ssh('rmvdisk %(force)s %(name)s'
- % {'force': force_flag,
- 'name': name})
- # No output should be returned from rmvdisk
- self._driver_assert(len(out.strip()) == 0,
- _('delete volume %(name)s - non empty output from CLI.\n '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'name': name,
- 'out': str(out),
- 'err': str(err)})
- else:
- # Log that volume does not exist
- LOG.info(_('warning: tried to delete volume %(name)s but '
- 'it does not exist.') % {'name': name})
-
- LOG.debug(_('leave: delete_volume: volume %(name)s ') % {'name': name})
-
- def ensure_export(self, context, volume):
- """Check that the volume exists on the storage.
-
- The system does not "export" volumes as a Linux iSCSI target does,
- and therefore we just check that the volume exists on the storage.
- """
- volume_defined = self._is_volume_defined(volume['name'])
- if not volume_defined:
- LOG.error(_('ensure_export: volume %s not found on storage')
- % volume['name'])
-
- def create_export(self, context, volume):
- model_update = None
- return model_update
-
- def remove_export(self, context, volume):
- pass
-
- def check_for_export(self, context, volume_id):
- raise NotImplementedError()
-
- def initialize_connection(self, volume, connector):
- """Perform the necessary work so that an iSCSI connection can be made.
-
- To be able to create an iSCSI connection from a given iSCSI name to a
- volume, we must:
- 1. Translate the given iSCSI name to a host name
- 2. Create new host on the storage system if it does not yet exist
- 2. Map the volume to the host if it is not already done
- 3. Return iSCSI properties, including the IP address of the preferred
- node for this volume and the LUN number.
- """
- LOG.debug(_('enter: initialize_connection: volume %(vol)s with '
- 'connector %(conn)s') % {'vol': str(volume),
- 'conn': str(connector)})
-
- initiator_name = connector['initiator']
- volume_name = volume['name']
-
- host_name = self._get_host_from_iscsiname(initiator_name)
- # Check if a host is defined for the iSCSI initiator name
- if host_name is None:
- # Host does not exist - add a new host to Storwize/SVC
- host_name = self._create_new_host('host%s' % initiator_name,
- initiator_name)
- # Verify that create_new_host succeeded
- self._driver_assert(host_name is not None,
- _('_create_new_host failed to return the host name.'))
-
- lun_id = self._map_vol_to_host(volume_name, host_name)
-
- # Get preferred path
- # Only IPv4 for now because lack of OpenStack support
- # TODO(ronenkat): Add support for IPv6
- volume_attributes = self._get_volume_attributes(volume_name)
- if (volume_attributes is not None and
- 'preferred_node_id' in volume_attributes):
- preferred_node = volume_attributes['preferred_node_id']
- preferred_node_entry = None
- for node in self.iscsi_ipv4_conf:
- if node['node_id'] == preferred_node:
- preferred_node_entry = node
- break
- if preferred_node_entry is None:
- preferred_node_entry = self.iscsi_ipv4_conf[0]
- LOG.error(_('initialize_connection: did not find preferred '
- 'node %(node)s for volume %(vol)s in iSCSI '
- 'configuration') % {'node': preferred_node,
- 'vol': volume_name})
- else:
- # Get 1st node
- preferred_node_entry = self.iscsi_ipv4_conf[0]
- LOG.error(
- _('initialize_connection: did not find a preferred node '
- 'for volume %s in iSCSI configuration') % volume_name)
-
- properties = {}
- # We didn't use iSCSI discover, as in server-based iSCSI
- properties['target_discovered'] = False
- # We take the first IP address for now. Ideally, OpenStack will
- # support multipath for improved performance.
- properties['target_portal'] = ('%s:%s' %
- (preferred_node_entry['ip'][0], '3260'))
- properties['target_iqn'] = preferred_node_entry['iscsi_name']
- properties['target_lun'] = lun_id
- properties['volume_id'] = volume['id']
-
- LOG.debug(_('leave: initialize_connection:\n volume: %(vol)s\n '
- 'connector %(conn)s\n properties: %(prop)s')
- % {'vol': str(volume),
- 'conn': str(connector),
- 'prop': str(properties)})
-
- return {'driver_volume_type': 'iscsi', 'data': properties, }
-
- def terminate_connection(self, volume, connector):
- """Cleanup after an iSCSI connection has been terminated.
-
- When we clean up a terminated connection between a given iSCSI name
- and volume, we:
- 1. Translate the given iSCSI name to a host name
- 2. Remove the volume-to-host mapping if it exists
- 3. Delete the host if it has no more mappings (hosts are created
- automatically by this driver when mappings are created)
- """
- LOG.debug(_('enter: terminate_connection: volume %(vol)s with '
- 'connector %(conn)s') % {'vol': str(volume),
- 'conn': str(connector)})
-
- vol_name = volume['name']
- initiator_name = connector['initiator']
- host_name = self._get_host_from_iscsiname(initiator_name)
- # Verify that _get_host_from_iscsiname returned the host.
- # This should always succeed as we terminate an existing connection.
- self._driver_assert(host_name is not None,
- _('_get_host_from_iscsiname failed to return the host name '
- 'for iscsi name %s') % initiator_name)
-
- # Check if vdisk-host mapping exists, remove if it does
- mapping_data = self._get_hostvdisk_mappings(host_name)
- if vol_name in mapping_data:
- out, err = self._run_ssh('rmvdiskhostmap -host %s %s'
- % (host_name, vol_name))
- # Verify CLI behaviour - no output is returned from
- # rmvdiskhostmap
- self._driver_assert(len(out.strip()) == 0,
- _('delete mapping of volume %(vol)s to host %(host)s '
- '- non empty output from CLI.\n '
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'vol': vol_name,
- 'host': host_name,
- 'out': str(out),
- 'err': str(err)})
- del mapping_data[vol_name]
- else:
- LOG.error(_('terminate_connection: no mapping of volume '
- '%(vol)s to host %(host)s found') %
- {'vol': vol_name, 'host': host_name})
-
- # If this host has no more mappings, delete it
- if not mapping_data:
- self._delete_host(host_name)
-
- LOG.debug(_('leave: terminate_connection: volume %(vol)s with '
- 'connector %(conn)s') % {'vol': str(volume),
- 'conn': str(connector)})
-
- def _flashcopy_cleanup(self, fc_map_id, source, target):
- """Clean up a failed FlashCopy operation."""
-
- try:
- out, err = self._run_ssh('stopfcmap -force %s' % fc_map_id)
- out, err = self._run_ssh('rmfcmap -force %s' % fc_map_id)
- except exception.ProcessExecutionError as e:
- LOG.error(_('_run_flashcopy: fail to cleanup failed FlashCopy '
- 'mapping %(fc_map_id)% '
- 'from %(source)s to %(target)s.\n'
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'fc_map_id': fc_map_id,
- 'source': source,
- 'target': target,
- 'out': e.stdout,
- 'err': e.stderr})
-
- def _run_flashcopy(self, source, target):
- """Create a FlashCopy mapping from the source to the target."""
-
- LOG.debug(
- _('enter: _run_flashcopy: execute FlashCopy from source '
- '%(source)s to target %(target)s') % {'source': source,
- 'target': target})
-
- fc_map_cli_cmd = ('mkfcmap -source %s -target %s -autodelete '
- '-cleanrate 0' % (source, target))
- out, err = self._run_ssh(fc_map_cli_cmd)
- self._driver_assert(len(out.strip()) > 0,
- _('create FC mapping from %(source)s to %(target)s - '
- 'did not find success message in CLI output.\n'
- ' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
-
- # Ensure that the output is as expected
- match_obj = re.search('FlashCopy Mapping, id \[([0-9]+)\], '
- 'successfully created', out)
- # Make sure we got a "successfully created" message with vdisk id
- self._driver_assert(match_obj is not None,
- _('create FC mapping from %(source)s to %(target)s - '
- 'did not find success message in CLI output.\n'
- ' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
-
- try:
- fc_map_id = match_obj.group(1)
- self._driver_assert(fc_map_id is not None,
- _('create FC mapping from %(source)s to %(target)s - '
- 'did not find mapping id in CLI output.\n'
- ' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
- except IndexError:
- self._driver_assert(False,
- _('create FC mapping from %(source)s to %(target)s - '
- 'did not find mapping id in CLI output.\n'
- ' stdout: %(out)s\n stderr: %(err)s\n')
- % {'source': source,
- 'target': target,
- 'out': str(out),
- 'err': str(err)})
- try:
- out, err = self._run_ssh('prestartfcmap %s' % fc_map_id)
- except exception.ProcessExecutionError as e:
- with excutils.save_and_reraise_exception():
- LOG.error(_('_run_flashcopy: fail to prepare FlashCopy '
- 'from %(source)s to %(target)s.\n'
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'source': source,
- 'target': target,
- 'out': e.stdout,
- 'err': e.stderr})
- self._flashcopy_cleanup(fc_map_id, source, target)
-
- mapping_ready = False
- wait_time = 5
- # Allow waiting of up to timeout (set as parameter)
- max_retries = (int(FLAGS.storwize_svc_flashcopy_timeout)
- / wait_time) + 1
- for try_number in range(1, max_retries):
- mapping_attributes = self._get_flashcopy_mapping_attributes(
- fc_map_id)
- if (mapping_attributes is None or
- 'status' not in mapping_attributes):
- break
- if mapping_attributes['status'] == 'prepared':
- mapping_ready = True
- break
- elif mapping_attributes['status'] != 'preparing':
- # Unexpected mapping status
- exception_msg = (_('unexecpted mapping status %(status)s '
- 'for mapping %(id)s. Attributes: '
- '%(attr)s')
- % {'status': mapping_attributes['status'],
- 'id': fc_map_id,
- 'attr': mapping_attributes})
- raise exception.VolumeBackendAPIException(
- data=exception_msg)
- # Need to wait for mapping to be prepared, wait a few seconds
- time.sleep(wait_time)
-
- if not mapping_ready:
- exception_msg = (_('mapping %(id)s prepare failed to complete '
- 'within the alloted %(to)s seconds timeout. '
- 'Terminating') % {'id': fc_map_id,
- 'to': FLAGS.storwize_svc_flashcopy_timeout})
- LOG.error(_('_run_flashcopy: fail to start FlashCopy '
- 'from %(source)s to %(target)s with '
- 'exception %(ex)s')
- % {'source': source,
- 'target': target,
- 'ex': exception_msg})
- self._flashcopy_cleanup(fc_map_id, source, target)
- raise exception.InvalidSnapshot(
- reason=_('_run_flashcopy: %s') % exception_msg)
-
- try:
- out, err = self._run_ssh('startfcmap %s' % fc_map_id)
- except exception.ProcessExecutionError as e:
- with excutils.save_and_reraise_exception():
- LOG.error(_('_run_flashcopy: fail to start FlashCopy '
- 'from %(source)s to %(target)s.\n'
- 'stdout: %(out)s\n stderr: %(err)s')
- % {'source': source,
- 'target': target,
- 'out': e.stdout,
- 'err': e.stderr})
- self._flashcopy_cleanup(fc_map_id, source, target)
-
- LOG.debug(_('leave: _run_flashcopy: FlashCopy started from '
- '%(source)s to %(target)s') % {'source': source,
- 'target': target})
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Create a new snapshot from volume."""
-
- source_volume = snapshot['name']
- tgt_volume = volume['name']
-
- LOG.debug(_('enter: create_volume_from_snapshot: snapshot %(tgt)s '
- 'from volume %(src)s') % {'tgt': tgt_volume,
- 'src': source_volume})
-
- src_volume_attributes = self._get_volume_attributes(source_volume)
- if src_volume_attributes is None:
- exception_msg = (_('create_volume_from_snapshot: source volume %s '
- 'does not exist') % source_volume)
- LOG.error(exception_msg)
- raise exception.SnapshotNotFound(exception_msg,
- volume_id=source_volume)
-
- self._driver_assert('capacity' in src_volume_attributes,
- _('create_volume_from_snapshot: cannot get source '
- 'volume %(src)s capacity from volume attributes '
- '%(attr)s') % {'src': source_volume,
- 'attr': src_volume_attributes})
- src_volume_size = src_volume_attributes['capacity']
-
- tgt_volume_attributes = self._get_volume_attributes(tgt_volume)
- # Does the snapshot target exist?
- if tgt_volume_attributes is not None:
- exception_msg = (_('create_volume_from_snapshot: target volume %s '
- 'already exists, cannot create') % tgt_volume)
- LOG.error(exception_msg)
- raise exception.InvalidSnapshot(reason=exception_msg)
-
- snapshot_volume = {}
- snapshot_volume['name'] = tgt_volume
- snapshot_volume['size'] = src_volume_size
-
- self._create_volume(snapshot_volume, units='b')
-
- try:
- self._run_flashcopy(source_volume, tgt_volume)
- except Exception:
- with excutils.save_and_reraise_exception():
- # Clean up newly-created snapshot if the FlashCopy failed
- self._delete_volume(snapshot_volume, True)
-
- LOG.debug(
- _('leave: create_volume_from_snapshot: %s created successfully')
- % tgt_volume)
-
- def create_snapshot(self, snapshot):
- """Create a new snapshot using FlashCopy."""
-
- src_volume = snapshot['volume_name']
- tgt_volume = snapshot['name']
-
- # Flag to keep track of created volumes in case FlashCopy
- tgt_volume_created = False
-
- LOG.debug(_('enter: create_snapshot: snapshot %(tgt)s from '
- 'volume %(src)s') % {'tgt': tgt_volume,
- 'src': src_volume})
-
- src_volume_attributes = self._get_volume_attributes(src_volume)
- if src_volume_attributes is None:
- exception_msg = (
- _('create_snapshot: source volume %s does not exist')
- % src_volume)
- LOG.error(exception_msg)
- raise exception.VolumeNotFound(exception_msg,
- volume_id=src_volume)
-
- self._driver_assert('capacity' in src_volume_attributes,
- _('create_volume_from_snapshot: cannot get source '
- 'volume %(src)s capacity from volume attributes '
- '%(attr)s') % {'src': src_volume,
- 'attr': src_volume_attributes})
-
- source_volume_size = src_volume_attributes['capacity']
-
- tgt_volume_attributes = self._get_volume_attributes(tgt_volume)
- # Does the snapshot target exist?
- snapshot_volume = {}
- if tgt_volume_attributes is None:
- # No, create a new snapshot volume
- snapshot_volume['name'] = tgt_volume
- snapshot_volume['size'] = source_volume_size
- self._create_volume(snapshot_volume, units='b')
- tgt_volume_created = True
- else:
- # Yes, target exists, verify exact same size as source
- self._driver_assert('capacity' in tgt_volume_attributes,
- _('create_volume_from_snapshot: cannot get source '
- 'volume %(src)s capacity from volume attributes '
- '%(attr)s') % {'src': tgt_volume,
- 'attr': tgt_volume_attributes})
- target_volume_size = tgt_volume_attributes['capacity']
- if target_volume_size != source_volume_size:
- exception_msg = (
- _('create_snapshot: source %(src)s and target '
- 'volume %(tgt)s have different capacities '
- '(source:%(ssize)s target:%(tsize)s)') %
- {'src': src_volume,
- 'tgt': tgt_volume,
- 'ssize': source_volume_size,
- 'tsize': target_volume_size})
- LOG.error(exception_msg)
- raise exception.InvalidSnapshot(reason=exception_msg)
-
- try:
- self._run_flashcopy(src_volume, tgt_volume)
- except exception.InvalidSnapshot:
- with excutils.save_and_reraise_exception():
- # Clean up newly-created snapshot if the FlashCopy failed
- if tgt_volume_created:
- self._delete_volume(snapshot_volume, True)
-
- LOG.debug(_('leave: create_snapshot: %s created successfully')
- % tgt_volume)
-
- def delete_snapshot(self, snapshot):
- self._delete_snapshot(snapshot, False)
-
- def _delete_snapshot(self, snapshot, force_opt):
- """Delete a snapshot from the storage."""
- LOG.debug(_('enter: delete_snapshot: snapshot %s') % snapshot)
-
- snapshot_defined = self._is_volume_defined(snapshot['name'])
- if snapshot_defined:
- if force_opt:
- self._delete_volume(snapshot, force_opt)
- else:
- self.delete_volume(snapshot)
-
- LOG.debug(_('leave: delete_snapshot: snapshot %s') % snapshot)
-
- def _get_host_from_iscsiname(self, iscsi_name):
- """List the hosts defined in the storage.
-
- Return the host name with the given iSCSI name, or None if there is
- no host name with that iSCSI name.
- """
-
- LOG.debug(_('enter: _get_host_from_iscsiname: iSCSI initiator %s')
- % iscsi_name)
-
- # Get list of host in the storage
- ssh_cmd = 'lshost -delim !'
- out, err = self._run_ssh(ssh_cmd)
-
- if (len(out.strip()) == 0):
- return None
-
- err_msg = _('_get_host_from_iscsiname: '
- 'failed with unexpected CLI output.\n'
- ' command: %(cmd)s\n stdout: %(out)s\n '
- 'stderr: %(err)s') % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)}
- host_lines = out.strip().split('\n')
- self._driver_assert(len(host_lines) > 0, err_msg)
- header = host_lines.pop(0).split('!')
- self._driver_assert('name' in header, err_msg)
- name_index = header.index('name')
-
- hosts = map(lambda x: x.split('!')[name_index], host_lines)
- hostname = None
-
- # For each host, get its details and check for its iSCSI name
- for host in hosts:
- ssh_cmd = 'lshost -delim ! %s' % host
- out, err = self._run_ssh(ssh_cmd)
- self._driver_assert(len(out) > 0,
- _('_get_host_from_iscsiname: '
- 'Unexpected response from CLI output. '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
- for attrib_line in out.split('\n'):
- # If '!' not found, return the string and two empty strings
- attrib_name, foo, attrib_value = attrib_line.partition('!')
- if attrib_name == 'iscsi_name':
- if iscsi_name == attrib_value:
- hostname = host
- break
- if hostname is not None:
- break
-
- LOG.debug(_('leave: _get_host_from_iscsiname: iSCSI initiator %s')
- % iscsi_name)
-
- return hostname
-
- def _create_new_host(self, host_name, initiator_name):
- """Create a new host on the storage system.
-
- We modify the given host name, replace any invalid characters and
- adding a random suffix to avoid conflicts due to the translation. The
- host is associated with the given iSCSI initiator name.
- """
-
- LOG.debug(_('enter: _create_new_host: host %(name)s with iSCSI '
- 'initiator %(init)s') % {'name': host_name,
- 'init': initiator_name})
-
- if isinstance(host_name, unicode):
- host_name = host_name.translate(self._unicode_host_name_filter)
- elif isinstance(host_name, str):
- host_name = host_name.translate(self._string_host_name_filter)
- else:
- msg = _('_create_new_host: cannot clean host name. Host name '
- 'is not unicode or string')
- LOG.error(msg)
- raise exception.NoValidHost(reason=msg)
-
- # Add 5 digit random suffix to the host name to avoid
- # conflicts in host names after removing invalid characters
- # for Storwize/SVC names
- host_name = '%s_%s' % (host_name, random.randint(10000, 99999))
- out, err = self._run_ssh('mkhost -name "%s" -iscsiname "%s"'
- % (host_name, initiator_name))
- self._driver_assert(len(out.strip()) > 0 and
- 'successfully created' in out,
- _('create host %(name)s with iSCSI initiator %(init)s - '
- 'did not find success message in CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'name': host_name,
- 'init': initiator_name,
- 'out': str(out),
- 'err': str(err)})
-
- LOG.debug(_('leave: _create_new_host: host %(host)s with iSCSI '
- 'initiator %(init)s') % {'host': host_name,
- 'init': initiator_name})
-
- return host_name
-
- def _delete_host(self, host_name):
- """Delete a host and associated iSCSI initiator name."""
-
- LOG.debug(_('enter: _delete_host: host %s ') % host_name)
-
- # Check if host exists on system, expect to find the host
- is_defined = self._is_host_defined(host_name)
- if is_defined:
- # Delete host
- out, err = self._run_ssh('rmhost %s ' % host_name)
- else:
- LOG.info(_('warning: tried to delete host %(name)s but '
- 'it does not exist.') % {'name': host_name})
-
- LOG.debug(_('leave: _delete_host: host %s ') % host_name)
-
- def _is_volume_defined(self, volume_name):
- """Check if volume is defined."""
- LOG.debug(_('enter: _is_volume_defined: volume %s ') % volume_name)
- volume_attributes = self._get_volume_attributes(volume_name)
- LOG.debug(_('leave: _is_volume_defined: volume %(vol)s with %(str)s ')
- % {'vol': volume_name,
- 'str': volume_attributes is not None})
- if volume_attributes is None:
- return False
- else:
- return True
-
- def _is_host_defined(self, host_name):
- """Check if a host is defined on the storage."""
-
- LOG.debug(_('enter: _is_host_defined: host %s ') % host_name)
-
- # Get list of hosts with the name %host_name%
- # We expect zero or one line if host does not exist,
- # two lines if it does exist, otherwise error
- out, err = self._run_ssh('lshost -filtervalue name=%s -delim !'
- % host_name)
- if len(out.strip()) == 0:
- return False
-
- lines = out.strip().split('\n')
- self._driver_assert(len(lines) <= 2,
- _('_is_host_defined: Unexpected response from CLI output.\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'out': str(out),
- 'err': str(err)})
-
- if len(lines) == 2:
- host_info = self._get_hdr_dic(lines[0], lines[1], '!')
- host_name_from_storage = host_info['name']
- # Make sure we got the data for the right host
- self._driver_assert(host_name_from_storage == host_name,
- _('Data received for host %(host1)s instead of host '
- '%(host2)s.\n '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'host1': host_name_from_storage,
- 'host2': host_name,
- 'out': str(out),
- 'err': str(err)})
- else: # 0 or 1 lines
- host_name_from_storage = None
-
- LOG.debug(_('leave: _is_host_defined: host %(host)s with %(str)s ') % {
- 'host': host_name,
- 'str': host_name_from_storage is not None})
-
- if host_name_from_storage is None:
- return False
- else:
- return True
-
- def _get_hostvdisk_mappings(self, host_name):
- """Return the defined storage mappings for a host."""
-
- return_data = {}
- ssh_cmd = 'lshostvdiskmap -delim ! %s' % host_name
- out, err = self._run_ssh(ssh_cmd)
-
- mappings = out.strip().split('\n')
- if len(mappings) > 0:
- header = mappings.pop(0)
- for mapping_line in mappings:
- mapping_data = self._get_hdr_dic(header, mapping_line, '!')
- return_data[mapping_data['vdisk_name']] = mapping_data
-
- return return_data
-
- def _map_vol_to_host(self, volume_name, host_name):
- """Create a mapping between a volume to a host."""
-
- LOG.debug(_('enter: _map_vol_to_host: volume %(vol)s to '
- 'host %(host)s') % {'vol': volume_name,
- 'host': host_name})
-
- # Check if this volume is already mapped to this host
- mapping_data = self._get_hostvdisk_mappings(host_name)
-
- mapped_flag = False
- result_lun = '-1'
- if volume_name in mapping_data:
- mapped_flag = True
- result_lun = mapping_data[volume_name]['SCSI_id']
- else:
- lun_used = []
- for k, v in mapping_data.iteritems():
- lun_used.append(int(v['SCSI_id']))
- lun_used.sort()
- # Assume all luns are taken to this point, and then try to find
- # an unused one
- result_lun = str(len(lun_used))
- for index, n in enumerate(lun_used):
- if n > index:
- result_lun = str(index)
-
- # Volume is not mapped to host, create a new LUN
- if not mapped_flag:
- out, err = self._run_ssh('mkvdiskhostmap -host %s -scsi %s %s'
- % (host_name, result_lun, volume_name))
- self._driver_assert(len(out.strip()) > 0 and
- 'successfully created' in out,
- _('_map_vol_to_host: mapping host %(host)s to '
- 'volume %(vol)s with LUN '
- '%(lun)s - did not find success message in CLI output. '
- 'stdout: %(out)s\n stderr: %(err)s\n')
- % {'host': host_name,
- 'vol': volume_name,
- 'lun': result_lun,
- 'out': str(out),
- 'err': str(err)})
-
- LOG.debug(_('leave: _map_vol_to_host: LUN %(lun)s, volume %(vol)s, '
- 'host %(host)s') % {'lun': result_lun, 'vol': volume_name,
- 'host': host_name})
-
- return result_lun
-
- def _get_flashcopy_mapping_attributes(self, fc_map_id):
- """Return the attributes of a FlashCopy mapping.
-
- Returns the attributes for the specified FlashCopy mapping, or
- None if the mapping does not exist.
- An exception is raised if the information from system can not
- be parsed or matched to a single FlashCopy mapping (this case
- should not happen under normal conditions).
- """
-
- LOG.debug(_('enter: _get_flashcopy_mapping_attributes: mapping %s')
- % fc_map_id)
- # Get the lunid to be used
-
- fc_ls_map_cmd = ('lsfcmap -filtervalue id=%s -delim !' % fc_map_id)
- out, err = self._run_ssh(fc_ls_map_cmd)
- self._driver_assert(len(out) > 0,
- _('_get_flashcopy_mapping_attributes: '
- 'Unexpected response from CLI output. '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': fc_ls_map_cmd,
- 'out': str(out),
- 'err': str(err)})
-
- # Get list of FlashCopy mappings
- # We expect zero or one line if mapping does not exist,
- # two lines if it does exist, otherwise error
- lines = out.strip().split('\n')
- self._driver_assert(len(lines) <= 2,
- _('_get_flashcopy_mapping_attributes: '
- 'Unexpected response from CLI output. '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': fc_ls_map_cmd,
- 'out': str(out),
- 'err': str(err)})
-
- if len(lines) == 2:
- attributes = self._get_hdr_dic(lines[0], lines[1], '!')
- else: # 0 or 1 lines
- attributes = None
-
- LOG.debug(_('leave: _get_flashcopy_mapping_attributes: mapping '
- '%(id)s, attributes %(attr)s') %
- {'id': fc_map_id,
- 'attr': attributes})
-
- return attributes
-
- def _get_volume_attributes(self, volume_name):
- """Return volume attributes, or None if volume does not exist
-
- Exception is raised if the information from system can not be
- parsed/matched to a single volume.
- """
-
- LOG.debug(_('enter: _get_volume_attributes: volume %s')
- % volume_name)
- # Get the lunid to be used
-
- try:
- ssh_cmd = 'lsvdisk -bytes -delim ! %s ' % volume_name
- out, err = self._run_ssh(ssh_cmd)
- except exception.ProcessExecutionError as e:
- # Didn't get details from the storage, return None
- LOG.error(_('CLI Exception output:\n command: %(cmd)s\n '
- 'stdout: %(out)s\n stderr: %(err)s') %
- {'cmd': ssh_cmd,
- 'out': e.stdout,
- 'err': e.stderr})
- return None
-
- self._driver_assert(len(out) > 0,
- ('_get_volume_attributes: '
- 'Unexpected response from CLI output. '
- 'Command: %(cmd)s\n stdout: %(out)s\n stderr: %(err)s')
- % {'cmd': ssh_cmd,
- 'out': str(out),
- 'err': str(err)})
- attributes = {}
- for attrib_line in out.split('\n'):
- # If '!' not found, return the string and two empty strings
- attrib_name, foo, attrib_value = attrib_line.partition('!')
- if attrib_name is not None and attrib_name.strip() > 0:
- attributes[attrib_name] = attrib_value
-
- LOG.debug(_('leave: _get_volume_attributes:\n volume %(vol)s\n '
- 'attributes: %(attr)s')
- % {'vol': volume_name,
- 'attr': str(attributes)})
-
- return attributes
diff --git a/nova/volume/utils.py b/nova/volume/utils.py
deleted file mode 100644
index c1367114df..0000000000
--- a/nova/volume/utils.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 OpenStack, LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Volume-related Utilities and helpers."""
-
-from nova import flags
-from nova.openstack.common import log as logging
-from nova.openstack.common.notifier import api as notifier_api
-from nova.openstack.common import timeutils
-from nova import utils
-
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger(__name__)
-
-
-def notify_usage_exists(context, volume_ref, current_period=False):
- """ Generates 'exists' notification for a volume for usage auditing
- purposes.
-
- Generates usage for last completed period, unless 'current_period'
- is True."""
- begin, end = utils.last_completed_audit_period()
- if current_period:
- audit_start = end
- audit_end = timeutils.utcnow()
- else:
- audit_start = begin
- audit_end = end
-
- extra_usage_info = dict(audit_period_beginning=str(audit_start),
- audit_period_ending=str(audit_end))
-
- notify_about_volume_usage(
- context, volume_ref, 'exists', extra_usage_info=extra_usage_info)
-
-
-def _usage_from_volume(context, volume_ref, **kw):
- def null_safe_str(s):
- return str(s) if s else ''
-
- usage_info = dict(
- tenant_id=volume_ref['project_id'],
- user_id=volume_ref['user_id'],
- volume_id=volume_ref['id'],
- volume_type=volume_ref['volume_type'],
- display_name=volume_ref['display_name'],
- launched_at=null_safe_str(volume_ref['launched_at']),
- created_at=null_safe_str(volume_ref['created_at']),
- status=volume_ref['status'],
- snapshot_id=volume_ref['snapshot_id'],
- size=volume_ref['size'])
-
- usage_info.update(kw)
- return usage_info
-
-
-def notify_about_volume_usage(context, volume, event_suffix,
- extra_usage_info=None, host=None):
- if not host:
- host = FLAGS.host
-
- if not extra_usage_info:
- extra_usage_info = {}
-
- usage_info = _usage_from_volume(
- context, volume, **extra_usage_info)
-
- notifier_api.notify(context, 'volume.%s' % host,
- 'volume.%s' % event_suffix,
- notifier_api.INFO, usage_info)
diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py
deleted file mode 100644
index 67d8244659..0000000000
--- a/nova/volume/volume_types.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# Copyright (c) 2010 Citrix Systems, Inc.
-# Copyright 2011 Ken Pepple
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Built-in volume type properties."""
-
-from nova import context
-from nova import db
-from nova import exception
-from nova import flags
-from nova.openstack.common import log as logging
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger(__name__)
-
-
-def create(context, name, extra_specs={}):
- """Creates volume types."""
- try:
- db.volume_type_create(context,
- dict(name=name,
- extra_specs=extra_specs))
- except exception.DBError, e:
- LOG.exception(_('DB error: %s') % e)
- raise exception.VolumeTypeCreateFailed(name=name,
- extra_specs=extra_specs)
-
-
-def destroy(context, name):
- """Marks volume types as deleted."""
- if name is None:
- msg = _("name cannot be None")
- raise exception.InvalidVolumeType(reason=msg)
- else:
- db.volume_type_destroy(context, name)
-
-
-def get_all_types(context, inactive=0, search_opts={}):
- """Get all non-deleted volume_types.
-
- Pass true as argument if you want deleted volume types returned also.
-
- """
- vol_types = db.volume_type_get_all(context, inactive)
-
- if search_opts:
- LOG.debug(_("Searching by: %s") % str(search_opts))
-
- def _check_extra_specs_match(vol_type, searchdict):
- for k, v in searchdict.iteritems():
- if (k not in vol_type['extra_specs'].keys()
- or vol_type['extra_specs'][k] != v):
- return False
- return True
-
- # search_option to filter_name mapping.
- filter_mapping = {'extra_specs': _check_extra_specs_match}
-
- result = {}
- for type_name, type_args in vol_types.iteritems():
- # go over all filters in the list
- for opt, values in search_opts.iteritems():
- try:
- filter_func = filter_mapping[opt]
- except KeyError:
- # no such filter - ignore it, go to next filter
- continue
- else:
- if filter_func(type_args, values):
- result[type_name] = type_args
- break
- vol_types = result
- return vol_types
-
-
-def get_volume_type(ctxt, id):
- """Retrieves single volume type by id."""
- if id is None:
- msg = _("id cannot be None")
- raise exception.InvalidVolumeType(reason=msg)
-
- if ctxt is None:
- ctxt = context.get_admin_context()
-
- return db.volume_type_get(ctxt, id)
-
-
-def get_volume_type_by_name(context, name):
- """Retrieves single volume type by name."""
- if name is None:
- msg = _("name cannot be None")
- raise exception.InvalidVolumeType(reason=msg)
-
- return db.volume_type_get_by_name(context, name)
-
-
-def is_key_value_present(volume_type_id, key, value, volume_type=None):
- if volume_type_id is None:
- return False
-
- if volume_type is None:
- volume_type = get_volume_type(context.get_admin_context(),
- volume_type_id)
- if (volume_type.get('extra_specs') is None or
- volume_type['extra_specs'].get(key) != value):
- return False
- else:
- return True
diff --git a/nova/volume/xensm.py b/nova/volume/xensm.py
deleted file mode 100644
index 7e9a4b0ee4..0000000000
--- a/nova/volume/xensm.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# Copyright (c) 2011 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import log as logging
-from nova import utils
-from nova.virt.xenapi import driver as xenapi_conn
-from nova.virt.xenapi import volumeops
-import nova.volume.driver
-
-LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
-
-class XenSMDriver(nova.volume.driver.VolumeDriver):
-
- def _convert_config_params(self, conf_str):
- params = dict([item.split("=") for item in conf_str.split()])
- return params
-
- def _get_introduce_sr_keys(self, params):
- if 'name_label' in params:
- del params['name_label']
- keys = params.keys()
- keys.append('sr_type')
- return keys
-
- def _create_storage_repo(self, context, backend_ref):
- """Either creates or introduces SR on host
- depending on whether it exists in xapi db."""
- params = self._convert_config_params(backend_ref['config_params'])
- if 'name_label' in params:
- label = params['name_label']
- del params['name_label']
- else:
- label = 'SR-' + str(backend_ref['id'])
-
- params['sr_type'] = backend_ref['sr_type']
-
- if backend_ref['sr_uuid'] is None:
- # run the sr create command
- try:
- LOG.debug(_('SR name = %s') % label)
- LOG.debug(_('Params: %s') % str(params))
- sr_uuid = self._volumeops.create_sr(label, params)
- # update sr_uuid and created in db
- except Exception as ex:
- LOG.debug(_("Failed to create sr %s...continuing") %
- str(backend_ref['id']))
- msg = _('Create failed')
- raise exception.VolumeBackendAPIException(data=msg)
-
- LOG.debug(_('SR UUID of new SR is: %s') % sr_uuid)
- try:
- self.db.sm_backend_conf_update(context,
- backend_ref['id'],
- dict(sr_uuid=sr_uuid))
- except Exception as ex:
- LOG.exception(ex)
- msg = _("Failed to update db")
- raise exception.VolumeBackendAPIException(data=msg)
-
- else:
- # sr introduce, if not already done
- try:
- self._volumeops.introduce_sr(backend_ref['sr_uuid'], label,
- params)
- except Exception as ex:
- LOG.exception(ex)
- LOG.debug(_("Failed to introduce sr %s...continuing")
- % str(backend_ref['id']))
-
- def _create_storage_repos(self, context):
- """Create/Introduce storage repositories at start."""
- backends = self.db.sm_backend_conf_get_all(context)
- for backend in backends:
- try:
- self._create_storage_repo(context, backend)
- except Exception as ex:
- LOG.exception(ex)
- msg = _('Failed to reach backend %d') % backend['id']
- raise exception.VolumeBackendAPIException(data=msg)
-
- def __init__(self, *args, **kwargs):
- """Connect to the hypervisor."""
-
- # This driver leverages Xen storage manager, and hence requires
- # hypervisor to be Xen
- if not FLAGS.compute_driver.endswith('XenAPIDriver'):
- msg = (_('XenSMDriver requires xenapi connection, using %s') %
- FLAGS.compute_driver)
- raise exception.VolumeBackendAPIException(data=msg)
-
- url = FLAGS.xenapi_connection_url
- username = FLAGS.xenapi_connection_username
- password = FLAGS.xenapi_connection_password
- try:
- session = xenapi_conn.XenAPISession(url, username, password)
- self._volumeops = volumeops.VolumeOps(session)
- except Exception as ex:
- LOG.exception(ex)
- msg = _("Failed to initiate session")
- raise exception.VolumeBackendAPIException(data=msg)
-
- super(XenSMDriver, self).__init__(execute=utils.execute,
- sync_exec=utils.execute,
- *args, **kwargs)
-
- def do_setup(self, ctxt):
- """Setup includes creating or introducing storage repos
- existing in the database and destroying deleted ones."""
-
- # TODO(renukaapte) purge storage repos
- self.ctxt = ctxt
- self._create_storage_repos(ctxt)
-
- def create_volume(self, volume):
- """Creates a logical volume. Can optionally return a Dictionary of
- changes to the volume object to be persisted."""
-
- # For now the scheduling logic will be to try to fit the volume in
- # the first available backend.
- # TODO(renukaapte) better scheduling once APIs are in place
- sm_vol_rec = None
- backends = self.db.sm_backend_conf_get_all(self.ctxt)
- for backend in backends:
- # Ensure that storage repo exists, if not create.
- # This needs to be done because if nova compute and
- # volume are both running on this host, then, as a
- # part of detach_volume, compute could potentially forget SR
- self._create_storage_repo(self.ctxt, backend)
- sm_vol_rec = self._volumeops.create_volume_for_sm(volume,
- backend['sr_uuid'])
- if sm_vol_rec:
- LOG.debug(_('Volume will be created in backend - %d')
- % backend['id'])
- break
-
- if sm_vol_rec:
- # Update db
- sm_vol_rec['id'] = volume['id']
- sm_vol_rec['backend_id'] = backend['id']
- try:
- self.db.sm_volume_create(self.ctxt, sm_vol_rec)
- except Exception as ex:
- LOG.exception(ex)
- msg = _("Failed to update volume in db")
- raise exception.VolumeBackendAPIException(data=msg)
-
- else:
- msg = _('Unable to create volume')
- raise exception.VolumeBackendAPIException(data=msg)
-
- def delete_volume(self, volume):
-
- vol_rec = self.db.sm_volume_get(self.ctxt, volume['id'])
- if not vol_rec:
- raise exception.NotFound(_("Volume %s does not exist"),
- volume['id'])
- try:
- # If compute runs on this node, detach could have disconnected SR
- backend_ref = self.db.sm_backend_conf_get(self.ctxt,
- vol_rec['backend_id'])
- self._create_storage_repo(self.ctxt, backend_ref)
- self._volumeops.delete_volume_for_sm(vol_rec['vdi_uuid'])
- except Exception as ex:
- LOG.exception(ex)
- msg = _("Failed to delete vdi")
- raise exception.VolumeBackendAPIException(data=msg)
-
- try:
- self.db.sm_volume_delete(self.ctxt, volume['id'])
- except Exception as ex:
- LOG.exception(ex)
- msg = _("Failed to delete volume in db")
- raise exception.VolumeBackendAPIException(data=msg)
-
- def local_path(self, volume):
- return str(volume['id'])
-
- def undiscover_volume(self, volume):
- """Undiscover volume on a remote host."""
- pass
-
- def discover_volume(self, context, volume):
- return str(volume['id'])
-
- def check_for_setup_error(self):
- pass
-
- def create_export(self, context, volume):
- """Exports the volume."""
- pass
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume."""
- pass
-
- def ensure_export(self, context, volume):
- """Safely, synchronously recreates an export for a logical volume."""
- pass
-
- def initialize_connection(self, volume, connector):
- try:
- xensm_properties = dict(self.db.sm_volume_get(self.ctxt,
- volume['id']))
- except Exception as ex:
- LOG.exception(ex)
- msg = _("Failed to find volume in db")
- raise exception.VolumeBackendAPIException(data=msg)
-
- # Keep the volume id key consistent with what ISCSI driver calls it
- xensm_properties['volume_id'] = xensm_properties['id']
- del xensm_properties['id']
-
- try:
- backend_conf = self.db.sm_backend_conf_get(self.ctxt,
- xensm_properties['backend_id'])
- except Exception as ex:
- LOG.exception(ex)
- msg = _("Failed to find backend in db")
- raise exception.VolumeBackendAPIException(data=msg)
-
- params = self._convert_config_params(backend_conf['config_params'])
-
- xensm_properties['flavor_id'] = backend_conf['flavor_id']
- xensm_properties['sr_uuid'] = backend_conf['sr_uuid']
- xensm_properties['sr_type'] = backend_conf['sr_type']
- xensm_properties.update(params)
- _introduce_sr_keys = self._get_introduce_sr_keys(params)
- xensm_properties['introduce_sr_keys'] = _introduce_sr_keys
- return {
- 'driver_volume_type': 'xensm',
- 'data': xensm_properties
- }
-
- def terminate_connection(self, volume, connector):
- pass
diff --git a/setup.py b/setup.py
index 93ef7853ef..2d6201a12b 100644
--- a/setup.py
+++ b/setup.py
@@ -49,7 +49,6 @@ setuptools.setup(name='nova',
'bin/nova-api-ec2',
'bin/nova-api-metadata',
'bin/nova-api-os-compute',
- 'bin/nova-api-os-volume',
'bin/nova-rpc-zmq-receiver',
'bin/nova-cert',
'bin/nova-clear-rabbit-queues',
@@ -63,8 +62,6 @@ setuptools.setup(name='nova',
'bin/nova-objectstore',
'bin/nova-rootwrap',
'bin/nova-scheduler',
- 'bin/nova-volume',
- 'bin/nova-volume-usage-audit',
'bin/nova-xvpvncproxy',
],
py_modules=[])