diff options
35 files changed, 767 insertions, 147 deletions
diff --git a/.zuul.yaml b/.zuul.yaml index f47655f87..04836e2fe 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,22 +1,4 @@ - job: - name: glance-code-constants-check - parent: tox - description: | - Tests to catch when code constants have gotten out of sync. - vars: - tox_envlist: gateonly - irrelevant-files: - - ^(test-|)requirements.txt$ - - ^.*\.rst$ - - ^api-ref/.*$ - - ^doc/.*$ - - ^etc/.*$ - - ^releasenotes/.*$ - - ^setup.cfg$ - - ^tox.ini$ - - ^\.zuul\.yaml$ - -- job: name: glance-tox-oslo-tips-base parent: tox abstract: true @@ -321,7 +303,6 @@ - openstack-tox-functional-py38-fips - openstack-tox-functional-py39 - glance-tox-functional-py39-rbac-defaults - - glance-code-constants-check - glance-ceph-thin-provisioning: voting: false irrelevant-files: &tempest-irrelevant-files diff --git a/README.rst b/README.rst index 57d520321..a9ea03b3a 100644 --- a/README.rst +++ b/README.rst @@ -2,23 +2,6 @@ OpenStack Glance ================ -.. image:: https://governance.openstack.org/tc/badges/glance.svg - :target: https://governance.openstack.org/tc/reference/tags/index.html - :alt: The following tags have been asserted for the Glance project: - "project:official", - "tc:approved-release", - "stable:follows-policy", - "tc:starter-kit:compute", - "vulnerability:managed", - "assert:supports-upgrade", - "assert:follows-standard-deprecation". - Follow the link for an explanation of these tags. -.. NOTE(rosmaita): the alt text above will have to be updated when - additional tags are asserted for Glance. (The SVG in the - governance repo is updated automatically.) - -.. Change things from this point on - Glance is an OpenStack project that provides services and associated libraries to store, browse, share, distribute and manage bootable disk images, other data closely associated with initializing compute resources, diff --git a/api-ref/source/v2/metadefs-namespaces-tags.inc b/api-ref/source/v2/metadefs-namespaces-tags.inc index 176cbb415..1aba3a328 100644 --- a/api-ref/source/v2/metadefs-namespaces-tags.inc +++ b/api-ref/source/v2/metadefs-namespaces-tags.inc @@ -212,7 +212,7 @@ Response Parameters Response Example ---------------- -.. literalinclude:: samples/metadef-tag-create-response.json +.. literalinclude:: samples/metadef-tags-create-response.json :language: json diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst index 4766e1aea..9d13ac492 100644 --- a/doc/source/admin/index.rst +++ b/doc/source/admin/index.rst @@ -25,3 +25,4 @@ Glance Administration Guide useful-image-properties requirements quotas + os_hash_algo diff --git a/doc/source/admin/os_hash_algo.rst b/doc/source/admin/os_hash_algo.rst new file mode 100644 index 000000000..04a8c1deb --- /dev/null +++ b/doc/source/admin/os_hash_algo.rst @@ -0,0 +1,38 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================= +Secure Hash Algorithm Support (Multihash) +========================================= + +The Secure Hash Algorithm feature supplements the current ‘checksum’ +image property with a self-describing secure hash. + +The self-description consists of two new image properties: + +``os_hash_algo`` + Contains the name of the secure hash algorithm used to generate the value on + the image + +``os_hash_value`` + The hexdigest computed by applying the secure hash algorithm named in the + ``os_hash_algo`` property to the image data + +Hash Algorithm Configuration +============================ + +``os_hash_algo`` will be populated by the value of the configuration option +``hashing_algorithm`` in the ``glance.conf`` file. The ``os_hash_value`` value +will be populated by the hexdigest computed when the algorithm is applied to +the uploaded or imported image data. + +These are read-only image properties and are not user-modifiable. + +The default secure hash algorithm is SHA-512. It should be suitable for most +applications. + +The multihash is computed only for new images. There is no provision for +computing the multihash for existing images. diff --git a/doc/source/admin/useful-image-properties.rst b/doc/source/admin/useful-image-properties.rst index 934e1f1a1..6c9c2643e 100644 --- a/doc/source/admin/useful-image-properties.rst +++ b/doc/source/admin/useful-image-properties.rst @@ -637,7 +637,7 @@ Here is a list of useful image properties and the values they expect. The valid options depend on the configured hypervisor. * ``KVM`` and ``QEMU``: ``e1000``, ``e1000e``, ``ne2k_pci``, ``pcnet``, - ``rtl8139``, ``virtio`` and vmxnet3``. + ``rtl8139``, ``virtio`` and ``vmxnet3``. * VMware: ``e1000``, ``e1000e``, ``VirtualE1000``, ``VirtualE1000e``, ``VirtualPCNet32``, ``VirtualVmxnet`` and ``VirtualVmxnet3``. * Xen: ``e1000``, ``netfront``, ``ne2k_pci``, ``pcnet``, and diff --git a/doc/source/configuration/configuring.rst b/doc/source/configuration/configuring.rst index 5298fcd3b..80917d423 100644 --- a/doc/source/configuration/configuring.rst +++ b/doc/source/configuration/configuring.rst @@ -735,9 +735,8 @@ Debian-based distributions. `This option is specific to the RBD storage backend.` Sets the RADOS user to authenticate as. This is only needed - when `RADOS authentication <https://docs.ceph.com/docs/emperor/rados/operations/authentication/>`_ - is `enabled. - <https://docs.ceph.com/docs/emperor/rados/operations/authentication/#enabling-cephx>`_ + when `RADOS authentication <https://docs.ceph.com/en/latest/rados/configuration/auth-config-ref/>`_ + is enabled. A keyring must be set for this user in the Ceph configuration file, e.g. with a user ``glance``:: diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index a8c186d26..f5ddaabdb 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -14,3 +14,4 @@ Glance User Guide glanceclient glancemetadefcatalogapi signature + os_hash_algo diff --git a/doc/source/user/os_hash_algo.rst b/doc/source/user/os_hash_algo.rst new file mode 100644 index 000000000..8e5c44dde --- /dev/null +++ b/doc/source/user/os_hash_algo.rst @@ -0,0 +1,78 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================= +Secure Hash Algorithm Support (Multihash) +========================================= + +The Secure Hash Algorithm feature adds image properties that may be used to +verify image integrity based on its hash. + +The Secure Hash consists of two new image properties: + +``os_hash_algo`` + Contains the name of the secure hash algorithm + used to generate the value on the image + +``os_hash_value`` + The hexdigest computed by applying the + secure hash algorithm named in the ``os_hash_algo`` property to + the image data + +Image Verification +================== + +When Secure Hash is used, the Glance image properties will include the two +fields ``os_hash_algo`` and ``os_hash_value``. These two fields provide the +hashing algorithm used to calculate the secure hash, along with the hash value +calculated for the image. + +These values can be used to verify the image integrity when used. For example, +an image and its properties may be viewed with the following:: + + $ glance image-show fa33e3cd-5fe4-46df-a604-1e9b9438b420 + +------------------+----------------------------------------------------------------------------------+ + | Property | Value | + +------------------+----------------------------------------------------------------------------------+ + | checksum | ffa3dd42fae539dcd8fe72d429bc677b | + | container_format | bare | + | created_at | 2019-06-05T13:39:46Z | + | disk_format | qcow2 | + | id | fa33e3cd-5fe4-46df-a604-1e9b9438b420 | + | min_disk | 10 | + | min_ram | 1024 | + | name | fedora-30 | + | os_hash_algo | sha512 | + | os_hash_value | d9f99d22a6b6ea1e8b93379dd2080f51a7ed6885aa7d4c2f2262ea1054935e02c47b45f9b56aa7f5 | + | | 5e61d149d06f4ff6de03efde24f9d6774baf35f08c5e9d92 | + | os_hidden | False | + | owner | 0e82e8f863a4485fabfbed1b5b856cd7 | + | protected | False | + | size | 332267520 | + | status | active | + | tags | [] | + | updated_at | 2019-06-07T11:41:12Z | + | virtual_size | Not available | + | visibility | public | + +------------------+----------------------------------------------------------------------------------+ + +From that output, we can see the ``os_hash_algo`` property shows that +**sha512** was used to generate the multihash. The ``os_hash_value`` then shows +the generated hash value is:: + + d9f99d22a6b6ea1e8b93379dd2080f51a7ed6885aa7d4c2f2262ea1054935e02c47b45f9b56aa7f55e61d149d06f4ff6de03efde24f9d6774baf35f08c5e9d92 + +When downloading the image, you may now use these values to be able to verify +the integrity of the image. For example:: + + $ glance image-download fa33e3cd-5fe4-46df-a604-1e9b9438b420 --file fedora-30 + $ sha512sum fedora-30 + d9f99d22a6b6ea1e8b93379dd2080f51a7ed6885aa7d4c2f2262ea1054935e02c47b45f9b56aa7f55e61d149d06f4ff6de03efde24f9d6774baf35f08c5e9d92 + +Using the ``sha512sum`` command, we are able to calculate the hash locally on +the image and verify it matches what was expected. If the output were not to +match, that would indicate the image has somehow been modified or corrupted +since being uploaded to Glance, and should likely not be used. diff --git a/glance/async_/flows/plugins/image_conversion.py b/glance/async_/flows/plugins/image_conversion.py index 32c7b7fe0..e977764fa 100644 --- a/glance/async_/flows/plugins/image_conversion.py +++ b/glance/async_/flows/plugins/image_conversion.py @@ -116,6 +116,29 @@ class _ConvertImage(task.Task): virtual_size = metadata.get('virtual-size', 0) action.set_image_attribute(virtual_size=virtual_size) + if 'backing-filename' in metadata: + LOG.warning('Refusing to process QCOW image with a backing file') + raise RuntimeError( + 'QCOW images with backing files are not allowed') + + if metadata.get('format') == 'vmdk': + create_type = metadata.get( + 'format-specific', {}).get( + 'data', {}).get('create-type') + allowed = CONF.image_format.vmdk_allowed_types + if not create_type: + raise RuntimeError(_('Unable to determine VMDK create-type')) + if not len(allowed): + LOG.warning(_('Refusing to process VMDK file as ' + 'vmdk_allowed_types is empty')) + raise RuntimeError(_('Image is a VMDK, but no VMDK createType ' + 'is specified')) + if create_type not in allowed: + LOG.warning(_('Refusing to process VMDK file with create-type ' + 'of %r which is not in allowed set of: %s'), + create_type, ','.join(allowed)) + raise RuntimeError(_('Invalid VMDK create-type specified')) + if source_format == target_format: LOG.debug("Source is already in target format, " "not doing conversion for %s", self.image_id) diff --git a/glance/common/config.py b/glance/common/config.py index dd7e1b6e9..7891daccf 100644 --- a/glance/common/config.py +++ b/glance/common/config.py @@ -99,6 +99,18 @@ image_format_opts = [ "image attribute"), deprecated_opts=[cfg.DeprecatedOpt('disk_formats', group='DEFAULT')]), + cfg.ListOpt('vmdk_allowed_types', + default=['streamOptimized', 'monolithicSparse'], + help=_("A list of strings describing allowed VMDK " + "'create-type' subformats that will be allowed. " + "This is recommended to only include " + "single-file-with-sparse-header variants to avoid " + "potential host file exposure due to processing named " + "extents. If this list is empty, then no VDMK image " + "types allowed. Note that this is currently only " + "checked during image conversion (if enabled), and " + "limits the types of VMDK images we will convert " + "from.")), ] task_opts = [ cfg.IntOpt('task_time_to_live', diff --git a/glance/common/format_inspector.py b/glance/common/format_inspector.py index 351c300dd..550cceadb 100755 --- a/glance/common/format_inspector.py +++ b/glance/common/format_inspector.py @@ -345,6 +345,7 @@ class VHDXInspector(FileInspector): """ METAREGION = '8B7CA206-4790-4B9A-B8FE-575F050F886E' VIRTUAL_DISK_SIZE = '2FA54224-CD1B-4876-B211-5DBED83BF4B8' + VHDX_METADATA_TABLE_MAX_SIZE = 32 * 2048 # From qemu def __init__(self, *a, **k): super(VHDXInspector, self).__init__(*a, **k) @@ -459,6 +460,8 @@ class VHDXInspector(FileInspector): item_offset, item_length, _reserved = struct.unpack( '<III', meta_buffer[entry_offset + 16:entry_offset + 28]) + item_length = min(item_length, + self.VHDX_METADATA_TABLE_MAX_SIZE) self.region('metadata').length = len(meta_buffer) self._log.debug('Found entry at offset %x', item_offset) # Metadata item offset is from the beginning of the metadata @@ -516,6 +519,12 @@ class VMDKInspector(FileInspector): variable number of 512 byte sectors, but is just text defining the layout of the disk. """ + + # The beginning and max size of the descriptor is also hardcoded in Qemu + # at 0x200 and 1MB - 1 + DESC_OFFSET = 0x200 + DESC_MAX_SIZE = (1 << 20) - 1 + def __init__(self, *a, **k): super(VMDKInspector, self).__init__(*a, **k) self.new_region('header', CaptureRegion(0, 512)) @@ -532,15 +541,22 @@ class VMDKInspector(FileInspector): if sig != b'KDMV': raise ImageFormatError('Signature KDMV not found: %r' % sig) - return if ver not in (1, 2, 3): raise ImageFormatError('Unsupported format version %i' % ver) - return + + # Since we parse both desc_sec and desc_num (the location of the + # VMDK's descriptor, expressed in 512 bytes sectors) we enforce a + # check on the bounds to create a reasonable CaptureRegion. This + # is similar to how it's done in qemu. + desc_offset = desc_sec * 512 + desc_size = min(desc_num * 512, self.DESC_MAX_SIZE) + if desc_offset != self.DESC_OFFSET: + raise ImageFormatError("Wrong descriptor location") if not self.has_region('descriptor'): self.new_region('descriptor', CaptureRegion( - desc_sec * 512, desc_num * 512)) + desc_offset, desc_size)) @property def format_match(self): diff --git a/glance/db/migration.py b/glance/db/migration.py index f6fdd8d08..1977de469 100644 --- a/glance/db/migration.py +++ b/glance/db/migration.py @@ -29,5 +29,5 @@ db_options.set_defaults(cfg.CONF) # Migration-related constants EXPAND_BRANCH = 'expand' CONTRACT_BRANCH = 'contract' -CURRENT_RELEASE = 'zed' +CURRENT_RELEASE = '2023_1' ALEMBIC_INIT_VERSION = 'liberty' diff --git a/glance/db/sqlalchemy/alembic_migrations/data_migrations/2023_1_migrate01_empty.py b/glance/db/sqlalchemy/alembic_migrations/data_migrations/2023_1_migrate01_empty.py new file mode 100644 index 000000000..893d66db9 --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/data_migrations/2023_1_migrate01_empty.py @@ -0,0 +1,26 @@ +# Copyright (C) 2021 RedHat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def has_migrations(engine): + """Returns true if at least one data row can be migrated.""" + + return False + + +def migrate(engine): + """Return the number of rows migrated.""" + + return 0 diff --git a/glance/db/sqlalchemy/alembic_migrations/data_migrations/xena_migrate01_empty.py b/glance/db/sqlalchemy/alembic_migrations/data_migrations/xena_migrate01_empty.py new file mode 100644 index 000000000..893d66db9 --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/data_migrations/xena_migrate01_empty.py @@ -0,0 +1,26 @@ +# Copyright (C) 2021 RedHat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def has_migrations(engine): + """Returns true if at least one data row can be migrated.""" + + return False + + +def migrate(engine): + """Return the number of rows migrated.""" + + return 0 diff --git a/glance/db/sqlalchemy/alembic_migrations/data_migrations/yoga_migrate01_empty.py b/glance/db/sqlalchemy/alembic_migrations/data_migrations/yoga_migrate01_empty.py new file mode 100644 index 000000000..893d66db9 --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/data_migrations/yoga_migrate01_empty.py @@ -0,0 +1,26 @@ +# Copyright (C) 2021 RedHat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def has_migrations(engine): + """Returns true if at least one data row can be migrated.""" + + return False + + +def migrate(engine): + """Return the number of rows migrated.""" + + return 0 diff --git a/glance/db/sqlalchemy/alembic_migrations/data_migrations/zed_migrate01_empty.py b/glance/db/sqlalchemy/alembic_migrations/data_migrations/zed_migrate01_empty.py new file mode 100644 index 000000000..893d66db9 --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/data_migrations/zed_migrate01_empty.py @@ -0,0 +1,26 @@ +# Copyright (C) 2021 RedHat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def has_migrations(engine): + """Returns true if at least one data row can be migrated.""" + + return False + + +def migrate(engine): + """Return the number of rows migrated.""" + + return 0 diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/2023_1_contract01_empty.py b/glance/db/sqlalchemy/alembic_migrations/versions/2023_1_contract01_empty.py new file mode 100644 index 000000000..b9afc5e76 --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/2023_1_contract01_empty.py @@ -0,0 +1,25 @@ +# Copyright (C) 2020 RedHat Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# revision identifiers, used by Alembic. +revision = '2023_1_contract01' +down_revision = 'zed_contract01' +branch_labels = None +depends_on = '2023_1_expand01' + + +def upgrade(): + pass diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/2023_1_expand01_empty.py b/glance/db/sqlalchemy/alembic_migrations/versions/2023_1_expand01_empty.py new file mode 100644 index 000000000..334766c2f --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/2023_1_expand01_empty.py @@ -0,0 +1,30 @@ +# Copyright (C) 2020 RedHat Inc +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""empty expand for symmetry with 2023_1_expand01 + +Revision ID: ussuri_expand01 +Revises: train_expand01 +Create Date: 2020-01-03 11:55:16.657499 + +""" + +# revision identifiers, used by Alembic. +revision = '2023_1_expand01' +down_revision = 'zed_expand01' +branch_labels = None +depends_on = None + + +def upgrade(): + pass diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/xena_contract01_empty.py b/glance/db/sqlalchemy/alembic_migrations/versions/xena_contract01_empty.py new file mode 100644 index 000000000..ecbb75cfd --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/xena_contract01_empty.py @@ -0,0 +1,25 @@ +# Copyright (C) 2020 RedHat Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# revision identifiers, used by Alembic. +revision = 'xena_contract01' +down_revision = 'wallaby_contract01' +branch_labels = None +depends_on = 'xena_expand01' + + +def upgrade(): + pass diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/xena_expand01_empty.py b/glance/db/sqlalchemy/alembic_migrations/versions/xena_expand01_empty.py new file mode 100644 index 000000000..42306928d --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/xena_expand01_empty.py @@ -0,0 +1,30 @@ +# Copyright (C) 2020 RedHat Inc +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""empty expand for symmetry with 2023_1_expand01 + +Revision ID: ussuri_expand01 +Revises: train_expand01 +Create Date: 2020-01-03 11:55:16.657499 + +""" + +# revision identifiers, used by Alembic. +revision = 'xena_expand01' +down_revision = 'wallaby_expand01' +branch_labels = None +depends_on = None + + +def upgrade(): + pass diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/yoga_contract01_empty.py b/glance/db/sqlalchemy/alembic_migrations/versions/yoga_contract01_empty.py new file mode 100644 index 000000000..082be9994 --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/yoga_contract01_empty.py @@ -0,0 +1,25 @@ +# Copyright (C) 2020 RedHat Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# revision identifiers, used by Alembic. +revision = 'yoga_contract01' +down_revision = 'xena_contract01' +branch_labels = None +depends_on = 'yoga_expand01' + + +def upgrade(): + pass diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/yoga_expand01_empty.py b/glance/db/sqlalchemy/alembic_migrations/versions/yoga_expand01_empty.py new file mode 100644 index 000000000..f984a2fa5 --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/yoga_expand01_empty.py @@ -0,0 +1,30 @@ +# Copyright (C) 2020 RedHat Inc +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""empty expand for symmetry with 2023_1_expand01 + +Revision ID: ussuri_expand01 +Revises: train_expand01 +Create Date: 2020-01-03 11:55:16.657499 + +""" + +# revision identifiers, used by Alembic. +revision = 'yoga_expand01' +down_revision = 'xena_expand01' +branch_labels = None +depends_on = None + + +def upgrade(): + pass diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/zed_contract01_empty.py b/glance/db/sqlalchemy/alembic_migrations/versions/zed_contract01_empty.py new file mode 100644 index 000000000..32d36ddfb --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/zed_contract01_empty.py @@ -0,0 +1,25 @@ +# Copyright (C) 2020 RedHat Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# revision identifiers, used by Alembic. +revision = 'zed_contract01' +down_revision = 'yoga_contract01' +branch_labels = None +depends_on = 'zed_expand01' + + +def upgrade(): + pass diff --git a/glance/db/sqlalchemy/alembic_migrations/versions/zed_expand01_empty.py b/glance/db/sqlalchemy/alembic_migrations/versions/zed_expand01_empty.py new file mode 100644 index 000000000..14e6e081d --- /dev/null +++ b/glance/db/sqlalchemy/alembic_migrations/versions/zed_expand01_empty.py @@ -0,0 +1,30 @@ +# Copyright (C) 2020 RedHat Inc +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""empty expand for symmetry with 2023_1_expand01 + +Revision ID: ussuri_expand01 +Revises: train_expand01 +Create Date: 2020-01-03 11:55:16.657499 + +""" + +# revision identifiers, used by Alembic. +revision = 'zed_expand01' +down_revision = 'yoga_expand01' +branch_labels = None +depends_on = None + + +def upgrade(): + pass diff --git a/glance/location.py b/glance/location.py index 13e73e21a..cf05fcdb2 100644 --- a/glance/location.py +++ b/glance/location.py @@ -584,15 +584,23 @@ class ImageProxy(glance.domain.proxy.Image): self._upload_to_store(data, verifier, backend, size) - if fmt and fmt.format_match and fmt.virtual_size: - self.image.virtual_size = fmt.virtual_size - LOG.info('Image format matched and virtual size computed: %i', - self.image.virtual_size) + virtual_size = 0 + if fmt and fmt.format_match: + try: + virtual_size = fmt.virtual_size + LOG.info('Image format matched and virtual size computed: %i', + virtual_size) + except Exception as e: + LOG.error(_LE('Unable to determine virtual_size because: %s'), + e) elif fmt: LOG.warning('Image format %s did not match; ' 'unable to calculate virtual size', self.image.disk_format) + if virtual_size: + self.image.virtual_size = fmt.virtual_size + if set_active and self.image.status != 'active': self.image.status = 'active' diff --git a/glance/tests/functional/db/test_migrations.py b/glance/tests/functional/db/test_migrations.py index 0f7f0b4df..150af726d 100644 --- a/glance/tests/functional/db/test_migrations.py +++ b/glance/tests/functional/db/test_migrations.py @@ -63,21 +63,44 @@ class TestVersions(test_utils.BaseTestCase): for prefix in exception_releases]): continue - # File format should be release_phaseNN_description.py - try: - _rest = '' # noqa - release, phasever, _rest = version_file.split('_', 2) - except ValueError: - release = phasever = '' - phase = ''.join(x for x in phasever if x.isalpha()) - # Grab the non-numeric part of phaseNN - if phase not in required_phases: - # Help make sure that going forward developers stick to the - # consistent format. - self.fail('Migration files should be in the form of: ' - 'release_phaseNN_some_description.py ' - '(while processing %r)' % version_file) - releases[release].add(phase) + # For legacy database scripts does not starts with + # YYYY i.e. pre Antelope + if not version_file.split('_', 2)[0].isnumeric(): + # File format should be release_phaseNN_description.py + try: + _rest = '' # noqa + release, phasever, _rest = version_file.split('_', 2) + except ValueError: + release = phasever = '' + phase = ''.join(x for x in phasever if x.isalpha()) + # Grab the non-numeric part of phaseNN + if phase not in required_phases: + # Help make sure that going forward developers stick to the + # consistent format. + self.fail('Migration files should be in the form of: ' + 'release_phaseNN_some_description.py ' + '(while processing %r)' % version_file) + releases[release].add(phase) + else: + # For new database scripts i.e. Antelope onwards + # File format should be + # releaseYear_releaseN_phaseNN_description.py + # For example 2023_1_expand01_empty.py + try: + _rest = '' # noqa + release_y, release_n, phasever, _rest = version_file.split( + '_', 3) + except ValueError: + release_y = phasever = '' + phase = ''.join(x for x in phasever if x.isalpha()) + # Grab the non-numeric part of phaseNN + if phase not in required_phases: + # Help make sure that going forward developers stick to the + # consistent format. + self.fail('Migration files should be in the form of: ' + 'releaseYear_releaseN_phaseNN_description.py ' + '(while processing %r)' % version_file) + releases[release_y].add(phase) for release, phases in releases.items(): missing = required_phases - phases diff --git a/glance/tests/functional/v2/test_legacy_update_cinder_store.py b/glance/tests/functional/v2/test_legacy_update_cinder_store.py index 3911711ab..d42ae12c1 100644 --- a/glance/tests/functional/v2/test_legacy_update_cinder_store.py +++ b/glance/tests/functional/v2/test_legacy_update_cinder_store.py @@ -19,7 +19,6 @@ import uuid from cinderclient.v3 import client as cinderclient import glance_store -from glance_store._drivers import cinder from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils @@ -27,6 +26,16 @@ from oslo_utils import strutils from glance.common import wsgi from glance.tests import functional +# Keeping backward compatibility to support importing from old +# path +try: + from glance_store._drivers.cinder import base + from glance_store._drivers.cinder import store as cinder +except ImportError: + from glance_store._drivers import cinder + base = mock.Mock() + + LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -135,6 +144,7 @@ class TestLegacyUpdateCinderStore(functional.SynchronousAPIBase): volume.status = status_expected return volume + @mock.patch.object(base, 'connector') @mock.patch.object(cinderclient, 'Client') @mock.patch.object(cinder.Store, 'temporary_chown') @mock.patch.object(cinder, 'connector') @@ -143,7 +153,8 @@ class TestLegacyUpdateCinderStore(functional.SynchronousAPIBase): @mock.patch.object(strutils, 'mask_dict_password') @mock.patch.object(socket, 'getaddrinfo') def test_create_image(self, mock_host_addr, mock_mask_pass, mock_wait, - mock_open, mock_connector, mock_chown, mocked_cc): + mock_open, mock_connector, mock_chown, mocked_cc, + mock_base): # setup multiple cinder stores self.setup_multiple_stores() self.start_server() @@ -165,6 +176,7 @@ class TestLegacyUpdateCinderStore(functional.SynchronousAPIBase): mock_chown.assert_called() mock_connector.get_connector_properties.assert_called() + @mock.patch.object(base, 'connector') @mock.patch.object(cinderclient, 'Client') @mock.patch.object(cinder.Store, 'temporary_chown') @mock.patch.object(cinder, 'connector') @@ -174,7 +186,7 @@ class TestLegacyUpdateCinderStore(functional.SynchronousAPIBase): @mock.patch.object(socket, 'getaddrinfo') def test_migrate_image_after_upgrade(self, mock_host_addr, mock_mask_pass, mock_wait, mock_open, mock_connector, - mock_chown, mocked_cc): + mock_chown, mocked_cc, mock_base): """Test to check if an image is successfully migrated when we upgrade from a single cinder store to multiple cinder stores. @@ -213,6 +225,7 @@ class TestLegacyUpdateCinderStore(functional.SynchronousAPIBase): mock_chown.assert_called() mock_connector.get_connector_properties.assert_called() + @mock.patch.object(base, 'connector') @mock.patch.object(cinderclient, 'Client') @mock.patch.object(cinder.Store, 'temporary_chown') @mock.patch.object(cinder, 'connector') @@ -224,7 +237,8 @@ class TestLegacyUpdateCinderStore(functional.SynchronousAPIBase): mock_mask_pass, mock_wait, mock_open, mock_connector, - mock_chown, mocked_cc): + mock_chown, mocked_cc, + mock_base): """Test to check if an image is successfully migrated when we upgrade from a single cinder store to multiple cinder stores, and that GETs from non-owners in the meantime are not interrupted. diff --git a/glance/tests/gate/README b/glance/tests/gate/README deleted file mode 100644 index 05636e9ac..000000000 --- a/glance/tests/gate/README +++ /dev/null @@ -1,11 +0,0 @@ -=============== -Gate-only tests -=============== - -These tests catch configuration problems for some code constants that -must be maintained manually. We have them separated out from the other -tests so that they can easily be run in their own gate job and don't -affect local development. - -It would be nice if someone with some free time could figure out how -to make these changes automatic (or unnecessary) ... diff --git a/glance/tests/gate/__init__.py b/glance/tests/gate/__init__.py deleted file mode 100644 index e69de29bb..000000000 --- a/glance/tests/gate/__init__.py +++ /dev/null diff --git a/glance/tests/gate/test_data_migration_version.py b/glance/tests/gate/test_data_migration_version.py deleted file mode 100644 index 992e89001..000000000 --- a/glance/tests/gate/test_data_migration_version.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2019 Red Hat, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from glance.db.migration import CURRENT_RELEASE -from glance.version import version_info - - -class TestDataMigrationVersion(testtools.TestCase): - - def test_migration_version(self): - """Make sure the data migration version info has been updated.""" - - release_number = int(version_info.version_string().split('.', 1)[0]) - - # by rule, release names must be composed of the 26 letters of the - # ISO Latin alphabet (ord('A')==65, ord('Z')==90) - release_letter = str(CURRENT_RELEASE[:1].upper()).encode('ascii') - - # Convert release letter into an int in [1:26]. The first - # glance release was 'Bexar'. - converted_release_letter = (ord(release_letter) - - ord(u'B'.encode('ascii')) + 1) - - # Project the release number into [1:26] - converted_release_number = release_number % 26 - - # Prepare for the worst with a super-informative message - msg = ('\n\n' - 'EMERGENCY!\n' - 'glance.db.migration.CURRENT_RELEASE is out of sync ' - 'with the glance version.\n' - ' CURRENT_RELEASE: %s\n' - ' glance version: %s\n' - 'glance.db.migration.CURRENT_RELEASE needs to be ' - 'updated IMMEDIATELY.\n' - 'The gate will be wedged until the update is made.\n' - 'EMERGENCY!\n' - '\n') % (CURRENT_RELEASE, - version_info.version_string()) - - self.assertEqual(converted_release_letter, - converted_release_number, - msg) diff --git a/glance/tests/unit/async_/flows/plugins/test_image_conversion.py b/glance/tests/unit/async_/flows/plugins/test_image_conversion.py index 77d68acf8..a60e2e1a5 100644 --- a/glance/tests/unit/async_/flows/plugins/test_image_conversion.py +++ b/glance/tests/unit/async_/flows/plugins/test_image_conversion.py @@ -172,6 +172,53 @@ class TestConvertImageTask(test_utils.BaseTestCase): # Make sure we did not update the image self.img_repo.save.assert_not_called() + def test_image_convert_invalid_qcow(self): + data = {'format': 'qcow2', + 'backing-filename': '/etc/hosts'} + + convert = self._setup_image_convert_info_fail() + with mock.patch.object(processutils, 'execute') as exc_mock: + exc_mock.return_value = json.dumps(data), '' + e = self.assertRaises(RuntimeError, + convert.execute, 'file:///test/path.qcow') + self.assertEqual('QCOW images with backing files are not allowed', + str(e)) + + def _test_image_convert_invalid_vmdk(self): + data = {'format': 'vmdk', + 'format-specific': { + 'data': { + 'create-type': 'monolithicFlat', + }}} + + convert = self._setup_image_convert_info_fail() + with mock.patch.object(processutils, 'execute') as exc_mock: + exc_mock.return_value = json.dumps(data), '' + convert.execute('file:///test/path.vmdk') + + def test_image_convert_invalid_vmdk(self): + e = self.assertRaises(RuntimeError, + self._test_image_convert_invalid_vmdk) + self.assertEqual('Invalid VMDK create-type specified', str(e)) + + def test_image_convert_valid_vmdk_no_types(self): + with mock.patch.object(CONF.image_format, 'vmdk_allowed_types', + new=[]): + # We make it past the VMDK check and fail because our file + # does not exist + e = self.assertRaises(RuntimeError, + self._test_image_convert_invalid_vmdk) + self.assertEqual('Image is a VMDK, but no VMDK createType is ' + 'specified', str(e)) + + def test_image_convert_valid_vmdk(self): + with mock.patch.object(CONF.image_format, 'vmdk_allowed_types', + new=['monolithicSparse', 'monolithicFlat']): + # We make it past the VMDK check and fail because our file + # does not exist + self.assertRaises(FileNotFoundError, + self._test_image_convert_invalid_vmdk) + def test_image_convert_fails(self): convert = self._setup_image_convert_info_fail() with mock.patch.object(processutils, 'execute') as exc_mock: diff --git a/glance/tests/unit/common/test_format_inspector.py b/glance/tests/unit/common/test_format_inspector.py index d229d094f..db6a9830b 100644 --- a/glance/tests/unit/common/test_format_inspector.py +++ b/glance/tests/unit/common/test_format_inspector.py @@ -16,6 +16,7 @@ import io import os import re +import struct import subprocess import tempfile from unittest import mock @@ -63,6 +64,28 @@ class TestFormatInspectors(test_utils.BaseTestCase): shell=True) return fn + def _create_allocated_vmdk(self, size_mb): + # We need a "big" VMDK file to exercise some parts of the code of the + # format_inspector. A way to create one is to first create an empty + # file, and then to convert it with the -S 0 option. + fn = tempfile.mktemp(prefix='glance-unittest-formatinspector-', + suffix='.vmdk') + self._created_files.append(fn) + zeroes = tempfile.mktemp(prefix='glance-unittest-formatinspector-', + suffix='.zero') + self._created_files.append(zeroes) + + # Create an empty file + subprocess.check_output( + 'dd if=/dev/zero of=%s bs=1M count=%i' % (zeroes, size_mb), + shell=True) + + # Convert it to VMDK + subprocess.check_output( + 'qemu-img convert -f raw -O vmdk -S 0 %s %s' % (zeroes, fn), + shell=True) + return fn + def _test_format_at_block_size(self, format_name, img, block_size): fmt = format_inspector.get_inspector(format_name)() self.assertIsNotNone(fmt, @@ -119,6 +142,64 @@ class TestFormatInspectors(test_utils.BaseTestCase): def test_vmdk(self): self._test_format('vmdk') + def test_vmdk_bad_descriptor_offset(self): + format_name = 'vmdk' + image_size = 10 * units.Mi + descriptorOffsetAddr = 0x1c + BAD_ADDRESS = 0x400 + img = self._create_img(format_name, image_size) + + # Corrupt the header + fd = open(img, 'r+b') + fd.seek(descriptorOffsetAddr) + fd.write(struct.pack('<Q', BAD_ADDRESS // 512)) + fd.close() + + # Read the format in various sizes, some of which will read whole + # sections in a single read, others will be completely unaligned, etc. + for block_size in (64 * units.Ki, 512, 17, 1 * units.Mi): + fmt = self._test_format_at_block_size(format_name, img, block_size) + self.assertTrue(fmt.format_match, + 'Failed to match %s at size %i block %i' % ( + format_name, image_size, block_size)) + self.assertEqual(0, fmt.virtual_size, + ('Calculated a virtual size for a corrupt %s at ' + 'size %i block %i') % (format_name, image_size, + block_size)) + + def test_vmdk_bad_descriptor_mem_limit(self): + format_name = 'vmdk' + image_size = 5 * units.Mi + virtual_size = 5 * units.Mi + descriptorOffsetAddr = 0x1c + descriptorSizeAddr = descriptorOffsetAddr + 8 + twoMBInSectors = (2 << 20) // 512 + # We need a big VMDK because otherwise we will not have enough data to + # fill-up the CaptureRegion. + img = self._create_allocated_vmdk(image_size // units.Mi) + + # Corrupt the end of descriptor address so it "ends" at 2MB + fd = open(img, 'r+b') + fd.seek(descriptorSizeAddr) + fd.write(struct.pack('<Q', twoMBInSectors)) + fd.close() + + # Read the format in various sizes, some of which will read whole + # sections in a single read, others will be completely unaligned, etc. + for block_size in (64 * units.Ki, 512, 17, 1 * units.Mi): + fmt = self._test_format_at_block_size(format_name, img, block_size) + self.assertTrue(fmt.format_match, + 'Failed to match %s at size %i block %i' % ( + format_name, image_size, block_size)) + self.assertEqual(virtual_size, fmt.virtual_size, + ('Failed to calculate size for %s at size %i ' + 'block %i') % (format_name, image_size, + block_size)) + memory = sum(fmt.context_info.values()) + self.assertLess(memory, 1.5 * units.Mi, + 'Format used more than 1.5MiB of memory: %s' % ( + fmt.context_info)) + def test_vdi(self): self._test_format('vdi') @@ -275,3 +356,42 @@ class TestFormatInspectorInfra(test_utils.BaseTestCase): self.assertEqual(format_inspector.QcowInspector, format_inspector.get_inspector('qcow2')) self.assertIsNone(format_inspector.get_inspector('foo')) + + +class TestFormatInspectorsTargeted(test_utils.BaseTestCase): + def _make_vhd_meta(self, guid_raw, item_length): + # Meta region header, padded to 32 bytes + data = struct.pack('<8sHH', b'metadata', 0, 1) + data += b'0' * 20 + + # Metadata table entry, 16-byte GUID, 12-byte information, + # padded to 32-bytes + data += guid_raw + data += struct.pack('<III', 256, item_length, 0) + data += b'0' * 6 + + return data + + def test_vhd_table_over_limit(self): + ins = format_inspector.VHDXInspector() + meta = format_inspector.CaptureRegion(0, 0) + desired = b'012345678ABCDEF0' + # This is a poorly-crafted image that specifies a larger table size + # than is allowed + meta.data = self._make_vhd_meta(desired, 33 * 2048) + ins.new_region('metadata', meta) + new_region = ins._find_meta_entry(ins._guid(desired)) + # Make sure we clamp to our limit of 32 * 2048 + self.assertEqual( + format_inspector.VHDXInspector.VHDX_METADATA_TABLE_MAX_SIZE, + new_region.length) + + def test_vhd_table_under_limit(self): + ins = format_inspector.VHDXInspector() + meta = format_inspector.CaptureRegion(0, 0) + desired = b'012345678ABCDEF0' + meta.data = self._make_vhd_meta(desired, 16 * 2048) + ins.new_region('metadata', meta) + new_region = ins._find_meta_entry(ins._guid(desired)) + # Table size was under the limit, make sure we get it back + self.assertEqual(16 * 2048, new_region.length) diff --git a/glance/tests/unit/test_store_image.py b/glance/tests/unit/test_store_image.py index 50bda54a2..01b1fc4ff 100644 --- a/glance/tests/unit/test_store_image.py +++ b/glance/tests/unit/test_store_image.py @@ -283,6 +283,37 @@ class TestStoreImage(utils.BaseTestCase): self.assertEqual('active', image.status) self.assertEqual(0, image.virtual_size) + @mock.patch('glance.common.format_inspector.QcowInspector.virtual_size', + new_callable=mock.PropertyMock) + @mock.patch('glance.common.format_inspector.QcowInspector.format_match', + new_callable=mock.PropertyMock) + def test_image_set_data_inspector_virtual_size_failure(self, mock_fm, + mock_vs): + # Force our format to match + mock_fm.return_value = True + + # Make virtual_size fail in some unexpected way + mock_vs.side_effect = ValueError('some error') + + context = glance.context.RequestContext(user=USER1) + image_stub = ImageStub(UUID2, status='queued', locations=[]) + image_stub.disk_format = 'qcow2' + # We are going to pass an iterable data source, so use the + # FakeStoreAPIReader that actually reads from that data + store_api = unit_test_utils.FakeStoreAPIReader() + image = glance.location.ImageProxy(image_stub, context, + store_api, self.store_utils) + + # Make sure set_data proceeds even though the format clearly + # does not match + image.set_data(iter(['YYYY']), 4) + self.assertEqual(4, image.size) + # NOTE(markwash): FakeStore returns image_id for location + self.assertEqual(UUID2, image.locations[0]['url']) + self.assertEqual('Z', image.checksum) + self.assertEqual('active', image.status) + self.assertEqual(0, image.virtual_size) + @mock.patch('glance.common.format_inspector.get_inspector') def test_image_set_data_inspector_not_needed(self, mock_gi): context = glance.context.RequestContext(user=USER1) @@ -67,17 +67,6 @@ basepython = python3 commands = oslopolicy-sample-generator --config-file=etc/glance-policy-generator.conf -[testenv:gateonly] -# NOTE(rosmaita): these tests catch configuration problems for some code -# constants that must be maintained manually; we have them separated out -# so they don't affect local development -# TODO(someone other than me): figure out how to make these changes either -# automatic or unnecessary -setenv = - TEST_PATH = ./glance/tests/gate -commands = - stestr run {posargs} - [testenv:pep8] commands = flake8 {posargs} |