summaryrefslogtreecommitdiff
path: root/nova/virt/powervm
diff options
context:
space:
mode:
authoresberglu <esberglu@us.ibm.com>2018-02-09 13:59:08 -0600
committeresberglu <esberglu@us.ibm.com>2018-04-25 14:35:12 -0500
commit3bb59e393f51eeef7299e331a033bd9385867e31 (patch)
tree16a983509ded4063068f63cfcc5e46587f9fe616 /nova/virt/powervm
parent2c5da2212c3fa3e589c4af171486a2097fd8c54e (diff)
downloadnova-3bb59e393f51eeef7299e331a033bd9385867e31.tar.gz
PowerVM Driver: Snapshot
Add instance snapshot support for the PowerVM virt driver. Blueprint: powervm-snapshot Change-Id: I2691b09d95691915dc1065284d25ad22db41d32b
Diffstat (limited to 'nova/virt/powervm')
-rw-r--r--nova/virt/powervm/disk/ssp.py130
-rw-r--r--nova/virt/powervm/driver.py47
-rw-r--r--nova/virt/powervm/image.py61
-rw-r--r--nova/virt/powervm/mgmt.py175
-rw-r--r--nova/virt/powervm/tasks/image.py81
-rw-r--r--nova/virt/powervm/tasks/storage.py142
6 files changed, 632 insertions, 4 deletions
diff --git a/nova/virt/powervm/disk/ssp.py b/nova/virt/powervm/disk/ssp.py
index 047795dd0c..ec4dae418d 100644
--- a/nova/virt/powervm/disk/ssp.py
+++ b/nova/virt/powervm/disk/ssp.py
@@ -1,4 +1,4 @@
-# Copyright 2015, 2017 IBM Corp.
+# Copyright 2015, 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -24,9 +24,11 @@ from pypowervm.tasks import storage as tsk_stg
import pypowervm.util as pvm_u
import pypowervm.wrappers.cluster as pvm_clust
import pypowervm.wrappers.storage as pvm_stg
+import pypowervm.wrappers.virtual_io_server as pvm_vios
from nova import exception
from nova import image
+from nova.virt.powervm import mgmt
from nova.virt.powervm import vm
@@ -77,6 +79,7 @@ class SSPDiskAdapter(object):
capabilities = {
'shared_storage': True,
+ 'snapshot': True,
}
def __init__(self, adapter, host_uuid):
@@ -87,6 +90,7 @@ class SSPDiskAdapter(object):
"""
self._adapter = adapter
self._host_uuid = host_uuid
+ self.mp_uuid = mgmt.mgmt_uuid(self._adapter)
try:
self._clust = pvm_clust.Cluster.get(self._adapter)[0]
self._ssp = pvm_stg.SSP.get_by_href(
@@ -135,7 +139,7 @@ class SSPDiskAdapter(object):
match_func=match_func)
# Remove the mapping from *each* VIOS on the LPAR's host.
- # The LPAR's host has to be self.host_uuid, else the PowerVM API will
+ # The LPAR's host has to be self._host_uuid, else the PowerVM API will
# fail.
#
# Note - this may not be all the VIOSes on the system...just the ones
@@ -225,7 +229,7 @@ class SSPDiskAdapter(object):
return tsk_map.add_map(vios_w, mapping)
# Add the mapping to *each* VIOS on the LPAR's host.
- # The LPAR's host has to be self.host_uuid, else the PowerVM API will
+ # The LPAR's host has to be self._host_uuid, else the PowerVM API will
# fail.
#
# Note: this may not be all the VIOSes on the system - just the ones
@@ -245,10 +249,128 @@ class SSPDiskAdapter(object):
"""
ret = []
for n in self._clust.nodes:
- # Skip any nodes that we don't have the vios uuid or uri
+ # Skip any nodes that we don't have the VIOS uuid or uri
if not (n.vios_uuid and n.vios_uri):
continue
if self._host_uuid == pvm_u.get_req_path_uuid(
n.vios_uri, preserve_case=True, root=True):
ret.append(n.vios_uuid)
return ret
+
+ def get_bootdisk_path(self, instance, vios_uuid):
+ """Get the local path for an instance's boot disk.
+
+ :param instance: nova.objects.instance.Instance object owning the
+ requested disk.
+ :param vios_uuid: PowerVM UUID of the VIOS to search for mappings.
+ :return: Local path for instance's boot disk.
+ """
+ vm_uuid = vm.get_pvm_uuid(instance)
+ match_func = self._disk_match_func(DiskType.BOOT, instance)
+ vios_wrap = pvm_vios.VIOS.get(self._adapter, uuid=vios_uuid,
+ xag=[pvm_const.XAG.VIO_SMAP])
+ maps = tsk_map.find_maps(vios_wrap.scsi_mappings,
+ client_lpar_id=vm_uuid, match_func=match_func)
+ if maps:
+ return maps[0].server_adapter.backing_dev_name
+ return None
+
+ def connect_instance_disk_to_mgmt(self, instance):
+ """Connect an instance's boot disk to the management partition.
+
+ :param instance: The instance whose boot disk is to be mapped.
+ :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped
+ :return vios: The EntryWrapper of the VIOS from which the mapping was
+ made.
+ :raise InstanceDiskMappingFailed: If the mapping could not be done.
+ """
+ for stg_elem, vios in self._get_bootdisk_iter(instance):
+ msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name}
+
+ # Create a new mapping. NOTE: If there's an existing mapping on
+ # the other VIOS but not this one, we'll create a second mapping
+ # here. It would take an extreme sequence of events to get to that
+ # point, and the second mapping would be harmless anyway. The
+ # alternative would be always checking all VIOSes for existing
+ # mappings, which increases the response time of the common case by
+ # an entire GET of VIOS+VIO_SMAP.
+ LOG.debug("Mapping boot disk %(disk_name)s to the management "
+ "partition from Virtual I/O Server %(vios_name)s.",
+ msg_args, instance=instance)
+ try:
+ tsk_map.add_vscsi_mapping(self._host_uuid, vios, self.mp_uuid,
+ stg_elem)
+ # If that worked, we're done. add_vscsi_mapping logged.
+ return stg_elem, vios
+ except pvm_exc.Error:
+ LOG.exception("Failed to map boot disk %(disk_name)s to the "
+ "management partition from Virtual I/O Server "
+ "%(vios_name)s.", msg_args, instance=instance)
+ # Try the next hit, if available.
+ # We either didn't find the boot dev, or failed all attempts to map it.
+ raise exception.InstanceDiskMappingFailed(instance_name=instance.name)
+
+ def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
+ """Disconnect a disk from the management partition.
+
+ :param vios_uuid: The UUID of the Virtual I/O Server serving the
+ mapping.
+ :param disk_name: The name of the disk to unmap.
+ """
+ tsk_map.remove_lu_mapping(self._adapter, vios_uuid, self.mp_uuid,
+ disk_names=[disk_name])
+ LOG.info("Unmapped boot disk %(disk_name)s from the management "
+ "partition from Virtual I/O Server %(vios_uuid)s.",
+ {'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
+ 'vios_uuid': vios_uuid})
+
+ @staticmethod
+ def _disk_match_func(disk_type, instance):
+ """Return a matching function to locate the disk for an instance.
+
+ :param disk_type: One of the DiskType enum values.
+ :param instance: The instance whose disk is to be found.
+ :return: Callable suitable for the match_func parameter of the
+ pypowervm.tasks.scsi_mapper.find_maps method.
+ """
+ disk_name = SSPDiskAdapter._get_disk_name(disk_type, instance)
+ return tsk_map.gen_match_func(pvm_stg.LU, names=[disk_name])
+
+ @staticmethod
+ def _get_disk_name(disk_type, instance, short=False):
+ """Generate a name for a virtual disk associated with an instance.
+
+ :param disk_type: One of the DiskType enum values.
+ :param instance: The instance for which the disk is to be created.
+ :param short: If True, the generated name will be limited to 15
+ characters (the limit for virtual disk). If False, it
+ will be limited by the API (79 characters currently).
+ :return: The sanitized file name for the disk.
+ """
+ prefix = '%s_' % (disk_type[0] if short else disk_type)
+ base = ('%s_%s' % (instance.name[:8], instance.uuid[:4]) if short
+ else instance.name)
+ return pvm_u.sanitize_file_name_for_api(
+ base, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short
+ else pvm_const.MaxLen.FILENAME_DEFAULT)
+
+ def _get_bootdisk_iter(self, instance):
+ """Return an iterator of (storage_elem, VIOS) tuples for the instance.
+
+ storage_elem is a pypowervm storage element wrapper associated with
+ the instance boot disk and VIOS is the wrapper of the Virtual I/O
+ server owning that storage element.
+
+ :param instance: nova.objects.instance.Instance object owning the
+ requested disk.
+ :return: Iterator of tuples of (storage_elem, VIOS).
+ """
+ lpar_wrap = vm.get_instance_wrapper(self._adapter, instance)
+ match_func = self._disk_match_func(DiskType.BOOT, instance)
+ for vios_uuid in self._vios_uuids:
+ vios_wrap = pvm_vios.VIOS.get(
+ self._adapter, uuid=vios_uuid, xag=[pvm_const.XAG.VIO_SMAP])
+ for scsi_map in tsk_map.find_maps(
+ vios_wrap.scsi_mappings, client_lpar_id=lpar_wrap.id,
+ match_func=match_func):
+ yield scsi_map.backing_storage, vios_wrap
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index f2b40061ba..7aa247ad3b 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -27,6 +27,7 @@ from pypowervm.wrappers import managed_system as pvm_ms
import six
from taskflow.patterns import linear_flow as tf_lf
+from nova.compute import task_states
from nova import conf as cfg
from nova.console import type as console_type
from nova import exception as exc
@@ -37,6 +38,7 @@ from nova.virt import driver
from nova.virt.powervm.disk import ssp
from nova.virt.powervm import host as pvm_host
from nova.virt.powervm.tasks import base as tf_base
+from nova.virt.powervm.tasks import image as tf_img
from nova.virt.powervm.tasks import network as tf_net
from nova.virt.powervm.tasks import storage as tf_stg
from nova.virt.powervm.tasks import vm as tf_vm
@@ -296,6 +298,51 @@ class PowerVMDriver(driver.ComputeDriver):
# Convert to a Nova exception
raise exc.InstanceTerminationFailure(reason=six.text_type(e))
+ def snapshot(self, context, instance, image_id, update_task_state):
+ """Snapshots the specified instance.
+
+ :param context: security context
+ :param instance: nova.objects.instance.Instance
+ :param image_id: Reference to a pre-created image that will hold the
+ snapshot.
+ :param update_task_state: Callback function to update the task_state
+ on the instance while the snapshot operation progresses. The
+ function takes a task_state argument and an optional
+ expected_task_state kwarg which defaults to
+ nova.compute.task_states.IMAGE_SNAPSHOT. See
+ nova.objects.instance.Instance.save for expected_task_state usage.
+ """
+ # TODO(esberglu) Add check for disk driver snapshot capability when
+ # additional disk drivers are implemented.
+ self._log_operation('snapshot', instance)
+
+ # Define the flow.
+ flow = tf_lf.Flow("snapshot")
+
+ # Notify that we're starting the process.
+ flow.add(tf_img.UpdateTaskState(update_task_state,
+ task_states.IMAGE_PENDING_UPLOAD))
+
+ # Connect the instance's boot disk to the management partition, and
+ # scan the scsi bus and bring the device into the management partition.
+ flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))
+
+ # Notify that the upload is in progress.
+ flow.add(tf_img.UpdateTaskState(
+ update_task_state, task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD))
+
+ # Stream the disk to glance.
+ flow.add(tf_img.StreamToGlance(context, self.image_api, image_id,
+ instance))
+
+ # Disconnect the boot disk from the management partition and delete the
+ # device.
+ flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))
+
+ # Run the flow.
+ tf_base.run(flow, instance=instance)
+
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
diff --git a/nova/virt/powervm/image.py b/nova/virt/powervm/image.py
new file mode 100644
index 0000000000..929cec7b04
--- /dev/null
+++ b/nova/virt/powervm/image.py
@@ -0,0 +1,61 @@
+# Copyright 2015, 2018 IBM Corp.
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utilities related to glance image management for the PowerVM driver."""
+
+from nova import utils
+
+
+def stream_blockdev_to_glance(context, image_api, image_id, metadata, devpath):
+ """Stream the entire contents of a block device to a glance image.
+
+ :param context: Nova security context.
+ :param image_api: Handle to the glance image API.
+ :param image_id: UUID of the prepared glance image.
+ :param metadata: Dictionary of metadata for the image.
+ :param devpath: String path to device file of block device to be uploaded,
+ e.g. "/dev/sde".
+ """
+ # Make the device file owned by the current user for the duration of the
+ # operation.
+ with utils.temporary_chown(devpath), open(devpath, 'rb') as stream:
+ # Stream it. This is synchronous.
+ image_api.update(context, image_id, metadata, stream)
+
+
+def generate_snapshot_metadata(context, image_api, image_id, instance):
+ """Generate a metadata dictionary for an instance snapshot.
+
+ :param context: Nova security context.
+ :param image_api: Handle to the glance image API.
+ :param image_id: UUID of the prepared glance image.
+ :param instance: The Nova instance whose disk is to be snapshotted.
+ :return: A dict of metadata suitable for image_api.upload.
+ """
+ image = image_api.get(context, image_id)
+ metadata = {
+ 'name': image['name'],
+ 'is_public': False,
+ 'status': 'active',
+ 'disk_format': 'raw',
+ 'container_format': 'bare',
+ 'properties': {
+ 'image_location': 'snapshot',
+ 'image_state': 'available',
+ 'owner_id': instance.project_id,
+ }
+ }
+ return metadata
diff --git a/nova/virt/powervm/mgmt.py b/nova/virt/powervm/mgmt.py
new file mode 100644
index 0000000000..dc295f534f
--- /dev/null
+++ b/nova/virt/powervm/mgmt.py
@@ -0,0 +1,175 @@
+# Copyright 2015, 2018 IBM Corp.
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utilities related to the PowerVM management partition.
+
+The management partition is a special LPAR that runs the PowerVM REST API
+service. It itself appears through the REST API as a LogicalPartition of type
+aixlinux, but with the is_mgmt_partition property set to True.
+The PowerVM Nova Compute service runs on the management partition.
+"""
+import glob
+import os
+from os import path
+
+from oslo_concurrency import lockutils
+from oslo_log import log as logging
+from pypowervm.tasks import partition as pvm_par
+import retrying
+
+from nova import exception
+from nova.privsep import path as priv_path
+
+
+LOG = logging.getLogger(__name__)
+
+_MP_UUID = None
+
+
+@lockutils.synchronized("mgmt_lpar_uuid")
+def mgmt_uuid(adapter):
+ """Returns the management partitions UUID."""
+ global _MP_UUID
+ if not _MP_UUID:
+ _MP_UUID = pvm_par.get_this_partition(adapter).uuid
+ return _MP_UUID
+
+
+def discover_vscsi_disk(mapping, scan_timeout=300):
+ """Bring a mapped device into the management partition and find its name.
+
+ Based on a VSCSIMapping, scan the appropriate virtual SCSI host bus,
+ causing the operating system to discover the mapped device. Find and
+ return the path of the newly-discovered device based on its UDID in the
+ mapping.
+
+ Note: scanning the bus will cause the operating system to discover *all*
+ devices on that bus. However, this method will only return the path for
+ the specific device from the input mapping, based on its UDID.
+
+ :param mapping: The pypowervm.wrappers.virtual_io_server.VSCSIMapping
+ representing the mapping of the desired disk to the
+ management partition.
+ :param scan_timeout: The maximum number of seconds after scanning to wait
+ for the specified device to appear.
+ :return: The udev-generated ("/dev/sdX") name of the discovered disk.
+ :raise NoDiskDiscoveryException: If the disk did not appear after the
+ specified timeout.
+ :raise UniqueDiskDiscoveryException: If more than one disk appears with the
+ expected UDID.
+ """
+ # Calculate the Linux slot number from the client adapter slot number.
+ lslot = 0x30000000 | mapping.client_adapter.lpar_slot_num
+ # We'll match the device ID based on the UDID, which is actually the last
+ # 32 chars of the field we get from PowerVM.
+ udid = mapping.backing_storage.udid[-32:]
+
+ LOG.debug("Trying to discover VSCSI disk with UDID %(udid)s on slot "
+ "%(slot)x.", {'udid': udid, 'slot': lslot})
+
+ # Find the special file to scan the bus, and scan it.
+ # This glob should yield exactly one result, but use the loop just in case.
+ for scanpath in glob.glob(
+ '/sys/bus/vio/devices/%x/host*/scsi_host/host*/scan' % lslot):
+ # Writing '- - -' to this sysfs file triggers bus rescan
+ priv_path.writefile(scanpath, 'a', '- - -')
+
+ # Now see if our device showed up. If so, we can reliably match it based
+ # on its Linux ID, which ends with the disk's UDID.
+ dpathpat = '/dev/disk/by-id/*%s' % udid
+
+ # The bus scan is asynchronous. Need to poll, waiting for the device to
+ # spring into existence. Stop when glob finds at least one device, or
+ # after the specified timeout. Sleep 1/4 second between polls.
+ @retrying.retry(retry_on_result=lambda result: not result, wait_fixed=250,
+ stop_max_delay=scan_timeout * 1000)
+ def _poll_for_dev(globpat):
+ return glob.glob(globpat)
+ try:
+ disks = _poll_for_dev(dpathpat)
+ except retrying.RetryError as re:
+ raise exception.NoDiskDiscoveryException(
+ bus=lslot, udid=udid, polls=re.last_attempt.attempt_number,
+ timeout=scan_timeout)
+ # If we get here, _poll_for_dev returned a nonempty list. If not exactly
+ # one entry, this is an error.
+ if len(disks) != 1:
+ raise exception.UniqueDiskDiscoveryException(path_pattern=dpathpat,
+ count=len(disks))
+
+ # The by-id path is a symlink. Resolve to the /dev/sdX path
+ dpath = path.realpath(disks[0])
+ LOG.debug("Discovered VSCSI disk with UDID %(udid)s on slot %(slot)x at "
+ "path %(devname)s.",
+ {'udid': udid, 'slot': lslot, 'devname': dpath})
+ return dpath
+
+
+def remove_block_dev(devpath, scan_timeout=10):
+ """Remove a block device from the management partition.
+
+ This method causes the operating system of the management partition to
+ delete the device special files associated with the specified block device.
+
+ :param devpath: Any path to the block special file associated with the
+ device to be removed.
+ :param scan_timeout: The maximum number of seconds after scanning to wait
+ for the specified device to disappear.
+ :raise InvalidDevicePath: If the specified device or its 'delete' special
+ file cannot be found.
+ :raise DeviceDeletionException: If the deletion was attempted, but the
+ device special file is still present
+ afterward.
+ """
+ # Resolve symlinks, if any, to get to the /dev/sdX path
+ devpath = path.realpath(devpath)
+ try:
+ os.stat(devpath)
+ except OSError:
+ raise exception.InvalidDevicePath(path=devpath)
+ devname = devpath.rsplit('/', 1)[-1]
+ delpath = '/sys/block/%s/device/delete' % devname
+ try:
+ os.stat(delpath)
+ except OSError:
+ raise exception.InvalidDevicePath(path=delpath)
+ LOG.debug("Deleting block device %(devpath)s from the management "
+ "partition via special file %(delpath)s.",
+ {'devpath': devpath, 'delpath': delpath})
+ # Writing '1' to this sysfs file deletes the block device and rescans.
+ priv_path.writefile(delpath, 'a', '1')
+
+ # The bus scan is asynchronous. Need to poll, waiting for the device to
+ # disappear. Stop when stat raises OSError (dev file not found) - which is
+ # success - or after the specified timeout (which is failure). Sleep 1/4
+ # second between polls.
+ @retrying.retry(retry_on_result=lambda result: result, wait_fixed=250,
+ stop_max_delay=scan_timeout * 1000)
+ def _poll_for_del(statpath):
+ try:
+ os.stat(statpath)
+ return True
+ except OSError:
+ # Device special file is absent, as expected
+ return False
+ try:
+ _poll_for_del(devpath)
+ except retrying.RetryError as re:
+ # stat just kept returning (dev file continued to exist).
+ raise exception.DeviceDeletionException(
+ devpath=devpath, polls=re.last_attempt.attempt_number,
+ timeout=scan_timeout)
+ # Else stat raised - the device disappeared - all done.
diff --git a/nova/virt/powervm/tasks/image.py b/nova/virt/powervm/tasks/image.py
new file mode 100644
index 0000000000..4f8fe4ba18
--- /dev/null
+++ b/nova/virt/powervm/tasks/image.py
@@ -0,0 +1,81 @@
+# Copyright 2015, 2018 IBM Corp.
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+from taskflow import task
+
+from nova.virt.powervm import image
+
+
+LOG = logging.getLogger(__name__)
+
+
+class UpdateTaskState(task.Task):
+
+ def __init__(self, update_task_state, task_state, expected_state=None):
+ """Invoke the update_task_state callback with the desired arguments.
+
+ :param update_task_state: update_task_state callable passed into
+ snapshot.
+ :param task_state: The new task state (from nova.compute.task_states)
+ to set.
+ :param expected_state: Optional. The expected state of the task prior
+ to this request.
+ """
+ self.update_task_state = update_task_state
+ self.task_state = task_state
+ self.kwargs = {}
+ if expected_state is not None:
+ # We only want to pass expected state if it's not None! That's so
+ # we take the update_task_state method's default.
+ self.kwargs['expected_state'] = expected_state
+ super(UpdateTaskState, self).__init__(
+ name='update_task_state_%s' % task_state)
+
+ def execute(self):
+ self.update_task_state(self.task_state, **self.kwargs)
+
+
+class StreamToGlance(task.Task):
+
+ """Task around streaming a block device to glance."""
+
+ def __init__(self, context, image_api, image_id, instance):
+ """Initialize the flow for streaming a block device to glance.
+
+ Requires: disk_path: Path to the block device file for the instance's
+ boot disk.
+ :param context: Nova security context.
+ :param image_api: Handle to the glance API.
+ :param image_id: UUID of the prepared glance image.
+ :param instance: Instance whose backing device is being captured.
+ """
+ self.context = context
+ self.image_api = image_api
+ self.image_id = image_id
+ self.instance = instance
+ super(StreamToGlance, self).__init__(name='stream_to_glance',
+ requires='disk_path')
+
+ def execute(self, disk_path):
+ metadata = image.generate_snapshot_metadata(
+ self.context, self.image_api, self.image_id, self.instance)
+ LOG.info("Starting stream of boot device (local blockdev %(devpath)s) "
+ "to glance image %(img_id)s.",
+ {'devpath': disk_path, 'img_id': self.image_id},
+ instance=self.instance)
+ image.stream_blockdev_to_glance(self.context, self.image_api,
+ self.image_id, metadata, disk_path)
diff --git a/nova/virt/powervm/tasks/storage.py b/nova/virt/powervm/tasks/storage.py
index 5b72ac010c..839046be54 100644
--- a/nova/virt/powervm/tasks/storage.py
+++ b/nova/virt/powervm/tasks/storage.py
@@ -14,10 +14,13 @@
from oslo_log import log as logging
from pypowervm import exceptions as pvm_exc
+from pypowervm.tasks import scsi_mapper as pvm_smap
from taskflow import task
from taskflow.types import failure as task_fail
+from nova import exception
from nova.virt.powervm import media
+from nova.virt.powervm import mgmt
LOG = logging.getLogger(__name__)
@@ -205,3 +208,142 @@ class DeleteVOpt(task.Task):
def execute(self):
media_builder = media.ConfigDrivePowerVM(self.adapter)
media_builder.dlt_vopt(self.instance, stg_ftsk=self.stg_ftsk)
+
+
+class InstanceDiskToMgmt(task.Task):
+
+ """The task to connect an instance's disk to the management partition."
+
+ This task will connect the instance's disk to the management partition and
+ discover it. We do these two pieces together because their reversion
+ happens in the same order.
+ """
+
+ def __init__(self, disk_dvr, instance):
+ """Create the Task for connecting boot disk to mgmt partition.
+
+ Provides:
+ stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
+ connected.
+ vios_wrap: The Virtual I/O Server wrapper from which the storage
+ element was mapped.
+ disk_path: The local path to the mapped-and-discovered device, e.g.
+ '/dev/sde'.
+
+ :param disk_dvr: The disk driver.
+ :param instance: The nova instance whose boot disk is to be connected.
+ """
+ super(InstanceDiskToMgmt, self).__init__(
+ name='instance_disk_to_mgmt',
+ provides=['stg_elem', 'vios_wrap', 'disk_path'])
+ self.disk_dvr = disk_dvr
+ self.instance = instance
+ self.stg_elem = None
+ self.vios_wrap = None
+ self.disk_path = None
+
+ def execute(self):
+ """Map the instance's boot disk and discover it."""
+
+ # Search for boot disk on the NovaLink partition.
+ if self.disk_dvr.mp_uuid in self.disk_dvr._vios_uuids:
+ dev_name = self.disk_dvr.get_bootdisk_path(
+ self.instance, self.disk_dvr.mp_uuid)
+ if dev_name is not None:
+ return None, None, dev_name
+
+ self.stg_elem, self.vios_wrap = (
+ self.disk_dvr.connect_instance_disk_to_mgmt(self.instance))
+ new_maps = pvm_smap.find_maps(
+ self.vios_wrap.scsi_mappings, client_lpar_id=self.disk_dvr.mp_uuid,
+ stg_elem=self.stg_elem)
+ if not new_maps:
+ raise exception.NewMgmtMappingNotFoundException(
+ stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name)
+
+ # new_maps should be length 1, but even if it's not - i.e. we somehow
+ # matched more than one mapping of the same dev to the management
+ # partition from the same VIOS - it is safe to use the first one.
+ mapping = new_maps[0]
+ # Scan the SCSI bus, discover the disk, find its canonical path.
+ LOG.info("Discovering device and path for mapping of %(dev_name)s "
+ "on the management partition.",
+ {'dev_name': self.stg_elem.name}, instance=self.instance)
+ self.disk_path = mgmt.discover_vscsi_disk(mapping)
+ return self.stg_elem, self.vios_wrap, self.disk_path
+
+ def revert(self, result, flow_failures):
+ """Unmap the disk and then remove it from the management partition.
+
+ We use this order to avoid rediscovering the device in case some other
+ thread scans the SCSI bus between when we remove and when we unmap.
+ """
+ if self.vios_wrap is None or self.stg_elem is None:
+ # We never even got connected - nothing to do.
+ return
+ LOG.warning("Unmapping boot disk %(disk_name)s from the management "
+ "partition via Virtual I/O Server %(vioname)s.",
+ {'disk_name': self.stg_elem.name,
+ 'vioname': self.vios_wrap.name}, instance=self.instance)
+ self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid,
+ self.stg_elem.name)
+
+ if self.disk_path is None:
+ # We did not discover the disk - nothing else to do.
+ return
+ LOG.warning("Removing disk %(dpath)s from the management partition.",
+ {'dpath': self.disk_path}, instance=self.instance)
+ try:
+ mgmt.remove_block_dev(self.disk_path)
+ except pvm_exc.Error:
+ # Don't allow revert exceptions to interrupt the revert flow.
+ LOG.exception("Remove disk failed during revert. Ignoring.",
+ instance=self.instance)
+
+
+class RemoveInstanceDiskFromMgmt(task.Task):
+
+ """Unmap and remove an instance's boot disk from the mgmt partition."""
+
+ def __init__(self, disk_dvr, instance):
+ """Create task to unmap and remove an instance's boot disk from mgmt.
+
+ Requires (from InstanceDiskToMgmt):
+ stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
+ connected.
+ vios_wrap: The Virtual I/O Server wrapper.
+ (pypowervm.wrappers.virtual_io_server.VIOS) from which the
+ storage element was mapped.
+ disk_path: The local path to the mapped-and-discovered device, e.g.
+ '/dev/sde'.
+ :param disk_dvr: The disk driver.
+ :param instance: The nova instance whose boot disk is to be connected.
+ """
+ self.disk_dvr = disk_dvr
+ self.instance = instance
+ super(RemoveInstanceDiskFromMgmt, self).__init__(
+ name='remove_inst_disk_from_mgmt',
+ requires=['stg_elem', 'vios_wrap', 'disk_path'])
+
+ def execute(self, stg_elem, vios_wrap, disk_path):
+ """Unmap and remove an instance's boot disk from the mgmt partition.
+
+ Input parameters ('requires') provided by InstanceDiskToMgmt task.
+ :param stg_elem: The storage element wrapper (pypowervm LU, PV, etc.)
+ to be disconnected.
+ :param vios_wrap: The Virtual I/O Server wrapper from which the
+ mapping is to be removed.
+ :param disk_path: The local path to the disk device to be removed, e.g.
+ '/dev/sde'
+ """
+ # stg_elem is None if boot disk was not mapped to management partition.
+ if stg_elem is None:
+ return
+ LOG.info("Unmapping boot disk %(disk_name)s from the management "
+ "partition via Virtual I/O Server %(vios_name)s.",
+ {'disk_name': stg_elem.name, 'vios_name': vios_wrap.name},
+ instance=self.instance)
+ self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
+ LOG.info("Removing disk %(disk_path)s from the management partition.",
+ {'disk_path': disk_path}, instance=self.instance)
+ mgmt.remove_block_dev(disk_path)