summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Burke <tim.burke@gmail.com>2020-06-18 09:41:46 -0700
committerTim Burke <tim.burke@gmail.com>2020-06-18 09:41:46 -0700
commit481f126e6b59689599f438e5d27f7328f5b3e813 (patch)
tree14212db13aee782e95ffd36993d74c6bf35df0cb
parentb3fd0bd9d82160305a821e742b2cd968036911b2 (diff)
parent51a587ed8dd5700b558ad26d70dcb7facc0f91e4 (diff)
downloadswift-feature/losf.tar.gz
Merge remote-tracking branch 'gerrit/master' into feature/losffeature/losf
Change-Id: If9d7c63f3c4c15fbccff31e2b77a6911bb95972a
-rw-r--r--.zuul.yaml149
-rw-r--r--CHANGELOG11
-rw-r--r--CONTRIBUTING.rst10
-rw-r--r--REVIEW_GUIDELINES.rst2
-rw-r--r--api-ref/source/conf.py44
-rwxr-xr-xbin/swift-get-nodes2
-rwxr-xr-xbin/swift-object-relinker2
-rw-r--r--doc/requirements.txt7
-rw-r--r--doc/saio/swift/proxy-server.conf3
-rw-r--r--doc/source/conf.py25
-rw-r--r--doc/source/contributor/contributing.rst79
-rw-r--r--doc/source/contributor/review_guidelines.rst1
-rw-r--r--doc/source/index.rst9
-rw-r--r--doc/source/s3_compat.rst4
-rw-r--r--etc/proxy-server.conf-sample5
-rw-r--r--lower-constraints.txt5
-rw-r--r--releasenotes/source/conf.py14
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po882
-rw-r--r--requirements.txt11
-rw-r--r--roles/additional-keystone-users/tasks/main.yaml134
-rw-r--r--roles/additional-tempauth-users/tasks/main.yaml47
-rw-r--r--roles/dsvm-additional-middlewares/tasks/main.yaml26
-rw-r--r--setup.cfg1
-rw-r--r--swift/cli/info.py17
-rw-r--r--swift/cli/relinker.py181
-rw-r--r--swift/common/db.py53
-rw-r--r--swift/common/memcached.py21
-rw-r--r--swift/common/middleware/memcache.py5
-rw-r--r--swift/common/middleware/ratelimit.py4
-rw-r--r--swift/common/middleware/s3api/controllers/obj.py24
-rw-r--r--swift/common/middleware/symlink.py31
-rw-r--r--swift/common/middleware/versioned_writes/object_versioning.py6
-rw-r--r--swift/common/swob.py27
-rw-r--r--swift/common/utils.py86
-rw-r--r--swift/container/backend.py70
-rw-r--r--swift/container/server.py44
-rw-r--r--swift/container/sharder.py17
-rw-r--r--swift/obj/updater.py13
-rw-r--r--test/functional/__init__.py16
-rw-r--r--test/functional/s3api/__init__.py9
-rw-r--r--test/functional/s3api/s3_test_client.py40
-rw-r--r--test/functional/s3api/test_acl.py6
-rw-r--r--test/functional/s3api/test_bucket.py16
-rw-r--r--test/functional/s3api/test_multi_delete.py2
-rw-r--r--test/functional/s3api/test_multi_upload.py19
-rw-r--r--test/functional/s3api/test_object.py10
-rw-r--r--test/functional/s3api/test_service.py2
-rw-r--r--test/functional/test_object.py9
-rw-r--r--test/functional/test_object_versioning.py106
-rw-r--r--test/functional/test_slo.py31
-rwxr-xr-xtest/functional/test_symlink.py79
-rw-r--r--test/functional/test_versioned_writes.py11
-rw-r--r--test/functional/tests.py75
-rw-r--r--test/probe/test_object_async_update.py7
-rw-r--r--test/probe/test_object_metadata_replication.py3
-rw-r--r--test/sample.conf4
-rw-r--r--test/unit/account/test_backend.py66
-rw-r--r--test/unit/cli/test_info.py148
-rw-r--r--test/unit/cli/test_manage_shard_ranges.py2
-rw-r--r--test/unit/cli/test_relinker.py218
-rw-r--r--test/unit/common/middleware/s3api/test_obj.py91
-rw-r--r--test/unit/common/middleware/s3api/test_s3_acl.py16
-rw-r--r--test/unit/common/middleware/test_ratelimit.py114
-rw-r--r--test/unit/common/middleware/test_symlink.py50
-rw-r--r--test/unit/common/test_db.py2
-rw-r--r--test/unit/common/test_memcached.py79
-rw-r--r--test/unit/common/test_swob.py16
-rw-r--r--test/unit/common/test_utils.py154
-rw-r--r--test/unit/container/test_backend.py259
-rw-r--r--test/unit/container/test_server.py110
-rw-r--r--test/unit/container/test_sharder.py28
-rw-r--r--test/unit/proxy/controllers/test_obj.py6
-rw-r--r--test/unit/proxy/test_server.py2
-rw-r--r--tools/playbooks/common/install_dependencies.yaml3
-rw-r--r--tools/playbooks/dsvm/pre.yaml2
-rw-r--r--tools/playbooks/saio_single_node_setup/setup_saio.yaml10
-rw-r--r--tox.ini26
77 files changed, 3174 insertions, 745 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 1a5813f12..4ae44ec20 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -6,7 +6,7 @@
It sets TMPDIR to an XFS mount point created via
tools/test-setup.sh.
- timeout: 2400
+ timeout: 3600
vars:
tox_environment:
TMPDIR: '{{ ansible_env.HOME }}/xfstmp'
@@ -140,30 +140,6 @@
tox_envlist: func-ec-py3
- job:
- name: swift-tox-func-domain-remap-staticweb-py37
- parent: swift-tox-func-py37
- description: |
- Run functional tests for swift under cPython version 3.7.
-
- Uses tox with the ``func-domain-remap-staticweb-py3`` environment.
- It sets TMPDIR to an XFS mount point created via
- tools/test-setup.sh.
- vars:
- tox_envlist: func-domain-remap-staticweb-py3
-
-- job:
- name: swift-tox-func-s3api-py37
- parent: swift-tox-func-py37
- description: |
- Run functional tests for swift under cPython version 3.7.
-
- Uses tox with the ``func-s3api`` environment.
- It sets TMPDIR to an XFS mount point created via
- tools/test-setup.sh.
- vars:
- tox_envlist: func-s3api-py3
-
-- job:
name: swift-tox-func-py27-centos-7
parent: swift-tox-func-py27
nodeset: centos-7
@@ -220,30 +196,6 @@
nodeset: centos-7
- job:
- name: swift-tox-func-domain-remap-staticweb-py27
- parent: swift-tox-base
- description: |
- Run functional tests for swift under cPython version 2.7.
-
- Uses tox with the ``func-domain-remap-staticweb`` environment.
- It sets TMPDIR to an XFS mount point created via
- tools/test-setup.sh.
- vars:
- tox_envlist: func-domain-remap-staticweb
-
-- job:
- name: swift-tox-func-s3api-py27
- parent: swift-tox-base
- description: |
- Run functional tests for swift under cPython version 2.7.
-
- Uses tox with the ``func-s3api`` environment.
- It sets TMPDIR to an XFS mount point created via
- tools/test-setup.sh.
- vars:
- tox_envlist: func-s3api
-
-- job:
name: swift-dsvm-functional
parent: devstack-minimal
description: |
@@ -256,16 +208,12 @@
override-checkout: master
- name: opendev.org/openstack/devstack
override-checkout: master
- timeout: 2700
+ timeout: 3600
vars:
tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/openstack/requirements/upper-constraints.txt'
# This tox env get run twice; once for Keystone and once for tempauth
tox_envlist: func
devstack_localrc:
- # Other services are fine to run py3
- USE_PYTHON3: true
- # explicitly state that we want to test swift under py2
- DISABLED_PYTHON3_PACKAGES: 'swift'
SWIFT_HASH: changeme
# We don't need multiple replicas to run purely functional tests.
# In fact, devstack special cases some things when there's only
@@ -279,26 +227,13 @@
devstack_services:
keystone: true
swift: true
+ s3api: true
zuul_work_dir: src/opendev.org/openstack/swift
pre-run: tools/playbooks/dsvm/pre.yaml
run: tools/playbooks/dsvm/run.yaml
post-run: tools/playbooks/dsvm/post.yaml
- job:
- name: swift-dsvm-functional-py3
- parent: swift-dsvm-functional
- description: |
- Setup a Swift/Keystone environment under py3 and run Swift's func tests
- (also under py3).
- vars:
- # This tox env get run twice; once for Keystone and once for tempauth
- tox_envlist: func-py3
- devstack_localrc:
- USE_PYTHON3: true
- # explicitly clear swift's default-disabled status
- DISABLED_PYTHON3_PACKAGES: ''
-
-- job:
name: swift-dsvm-functional-ipv6
parent: swift-dsvm-functional
vars:
@@ -313,7 +248,7 @@
nodeset: centos-7
description: |
Setup a SAIO dev environment and run ceph-s3tests
- timeout: 2400
+ timeout: 3600
pre-run:
- tools/playbooks/common/install_dependencies.yaml
- tools/playbooks/saio_single_node_setup/setup_saio.yaml
@@ -559,59 +494,39 @@
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-tox-func-encryption-py27:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- - swift-tox-func-domain-remap-staticweb-py27:
- irrelevant-files:
- - ^(api-ref|doc|releasenotes)/.*$
- - ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-tox-func-ec-py27:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- - swift-tox-func-s3api-py27:
- irrelevant-files:
- - ^(api-ref|doc|releasenotes)/.*$
- - ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-tox-func-losf-py27:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
# py3 functional tests
- swift-tox-func-py37:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-tox-func-encryption-py37:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- - swift-tox-func-domain-remap-staticweb-py37:
- irrelevant-files:
- - ^(api-ref|doc|releasenotes)/.*$
- - ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-tox-func-ec-py37:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- - swift-tox-func-s3api-py37:
- irrelevant-files:
- - ^(api-ref|doc|releasenotes)/.*$
- - ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
# Other tests
- swift-tox-func-s3api-ceph-s3tests-tempauth:
@@ -621,29 +536,24 @@
# Also keep doc/s3api -- it holds known failures for these tests
- ^doc/(requirements.txt|(manpages|source)/.*)$
- ^test/(unit|probe)/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-probetests-centos-7:
irrelevant-files:
- ^(api-ref|releasenotes)/.*$
# Keep doc/saio -- we use those sample configs in the saio playbooks
- ^doc/(requirements.txt|(manpages|s3api|source)/.*)$
- ^test/(unit|functional)/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-dsvm-functional:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- - swift-dsvm-functional-py3:
- irrelevant-files:
- - ^(api-ref|doc|releasenotes)/.*$
- - ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-dsvm-functional-ipv6:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-tox-lower-constraints:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
@@ -657,24 +567,24 @@
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
# 2.25.0 had a test issue; see https://review.opendev.org/#/c/721518/
voting: false
- tempest-integrated-object-storage:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- tempest-ipv6-only:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- grenade:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
gate:
jobs:
# For gate jobs, err towards running more jobs (so, generally avoid
@@ -689,37 +599,28 @@
- swift-tox-py38
- swift-tox-func-py27
- swift-tox-func-encryption-py27
- - swift-tox-func-domain-remap-staticweb-py27
- swift-tox-func-ec-py27
- - swift-tox-func-s3api-py27
- swift-tox-func-losf-py27
- swift-tox-func-py37
- swift-tox-func-encryption
- - swift-tox-func-domain-remap-staticweb-py37
- swift-tox-func-ec-py37
- - swift-tox-func-s3api-py37
- swift-probetests-centos-7:
irrelevant-files:
- ^(api-ref|releasenotes)/.*$
# Keep doc/saio -- we use those sample configs in the saio playbooks
- ^doc/(requirements.txt|(manpages|s3api|source)/.*)$
- ^test/(unit|functional)/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-dsvm-functional:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- - swift-dsvm-functional-py3:
- irrelevant-files:
- - ^(api-ref|doc|releasenotes)/.*$
- - ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-dsvm-functional-ipv6:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-tox-lower-constraints:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
@@ -733,17 +634,17 @@
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- tempest-ipv6-only:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- grenade:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
experimental:
jobs:
- swift-tox-py27-centos-7
diff --git a/CHANGELOG b/CHANGELOG
index 53ea4d45e..2db3fa8b4 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -2,12 +2,6 @@ swift (2.25.0, OpenStack Ussuri)
* WSGI server processes can now notify systemd when they are ready.
- * Added a new middleware that allows users and operators to configure
- accounts and containers to use RFC-compliant (i.e., double-quoted)
- ETags. This may be useful when using Swift as an origin for some
- content delivery networks. For more information, see
- https://docs.openstack.org/swift/latest/middleware.html#etag-quoter
-
* Added `ttfb` (Time to First Byte) and `pid` (Process ID) to the set
of available proxy-server log fields. For more information, see
https://docs.openstack.org/swift/latest/logs.html
@@ -58,8 +52,9 @@ swift (2.24.0)
* Added support for S3 versioning using the above new mode.
* Added a new middleware to allow accounts and containers to opt-in to
- RFC-compliant ETags. For more information, see the documentation at
- https://docs.openstack.org/swift/latest/middleware.html#module-swift.common.middleware.etag_quoter
+ RFC-compliant ETags. This may be useful when using Swift as an origin
+ for some content delivery networks. For more information, see the
+ documentation at https://docs.openstack.org/swift/latest/middleware.html#module-swift.common.middleware.etag_quoter
Clients should be aware of the fact that ETags may be quoted for RFC
compliance; this may become the default behavior in some future release.
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 3aef4845f..245843071 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -21,6 +21,16 @@ Swift is nothing without the community behind it. We'd love to welcome you to
our community. Come find us in #openstack-swift on freenode IRC or on the
OpenStack dev mailing list.
+For general information on contributing to OpenStack, please check out the
+`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
+It covers all the basics that are common to all OpenStack projects: the accounts
+you need, the basics of interacting with our Gerrit review system, how we
+communicate as a community, etc.
+
+If you want more Swift related project documentation make sure you checkout
+the Swift developer (contributor) documentation at
+https://docs.openstack.org/swift/latest/
+
Filing a Bug
~~~~~~~~~~~~
diff --git a/REVIEW_GUIDELINES.rst b/REVIEW_GUIDELINES.rst
index a61a29869..e2bc4c6bb 100644
--- a/REVIEW_GUIDELINES.rst
+++ b/REVIEW_GUIDELINES.rst
@@ -369,7 +369,7 @@ Endeavor to leave a positive or negative score on every change you review.
Use your best judgment.
A note on Swift Core Maintainers
-================================
+--------------------------------
Swift Core maintainers may provide positive reviews scores that *look*
different from your reviews - a "+2" instead of a "+1".
diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py
index cb2f07428..dea25e9c7 100644
--- a/api-ref/source/conf.py
+++ b/api-ref/source/conf.py
@@ -25,21 +25,18 @@
import datetime
import os
-from swift import __version__
-import subprocess
import sys
import warnings
-import openstackdocstheme
html_theme = 'openstackdocs'
-html_theme_path = [openstackdocstheme.get_html_theme_path()]
html_theme_options = {
"sidebar_mode": "toc",
}
extensions = [
'os_api_ref',
+ 'openstackdocstheme'
]
# If extensions (or modules to document with autodoc) are in another directory,
@@ -68,19 +65,6 @@ master_doc = 'index'
project = u'Object Storage API Reference'
copyright = u'2010-present, OpenStack Foundation'
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = __version__.rsplit('.', 1)[0]
-# The full version, including alpha/beta/rc tags.
-release = __version__
-
-# html_context allows us to pass arbitrary values into the html template
-html_context = {'bug_tag': 'api-ref',
- 'bug_project': 'swift'}
-
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
@@ -108,7 +92,12 @@ add_module_names = False
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
+
+# openstackdocstheme options
+openstackdocs_repo_name = 'openstack/swift'
+openstackdocs_bug_project = 'swift'
+openstackdocs_bug_tag = 'api-ref'
# -- Options for man page output ----------------------------------------------
@@ -152,25 +141,6 @@ pygments_style = 'sphinx'
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-# html_last_updated_fmt = '%b %d, %Y'
-if 'SOURCE_DATE_EPOCH' in os.environ:
- now = float(os.environ.get('SOURCE_DATE_EPOCH'))
- html_last_updated_fmt = datetime.datetime.utcfromtimestamp(now).isoformat()
-else:
- git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'",
- "--date=local", "-n1"]
- try:
- html_last_updated_fmt = subprocess.Popen(
- git_cmd, stdout=subprocess.PIPE).communicate()[0]
- except OSError:
- warnings.warn('Cannot get last updated time from git repository. '
- 'Not setting "html_last_updated_fmt".')
- else:
- if not isinstance(html_last_updated_fmt, str):
- # for py3
- html_last_updated_fmt = html_last_updated_fmt.decode('ascii')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
diff --git a/bin/swift-get-nodes b/bin/swift-get-nodes
index 3d944bf56..5bc21c945 100755
--- a/bin/swift-get-nodes
+++ b/bin/swift-get-nodes
@@ -52,6 +52,8 @@ if __name__ == '__main__':
help='Specify which policy to use')
parser.add_option('-d', '--swift-dir', default='/etc/swift',
dest='swift_dir', help='Path to swift directory')
+ parser.add_option('-Q', '--quoted', action='store_true',
+ help='Assume swift paths are quoted')
options, args = parser.parse_args()
if set_swift_dir(options.swift_dir):
diff --git a/bin/swift-object-relinker b/bin/swift-object-relinker
index 0712ea769..7afd7b873 100755
--- a/bin/swift-object-relinker
+++ b/bin/swift-object-relinker
@@ -28,6 +28,8 @@ if __name__ == '__main__':
dest='swift_dir', help='Path to swift directory')
parser.add_argument('--devices', default='/srv/node',
dest='devices', help='Path to swift device directory')
+ parser.add_argument('--device', default=None, dest='device',
+ help='Device name to relink (default: all)')
parser.add_argument('--skip-mount-check', default=False,
help='Don\'t test if disk is mounted',
action="store_true", dest='skip_mount_check')
diff --git a/doc/requirements.txt b/doc/requirements.txt
index 88f49c663..46c4f5d8b 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -2,10 +2,9 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# this is required for the docs build jobs
-sphinx>=1.6.2,<2.0.0;python_version=='2.7' # BSD
-sphinx>=1.6.2;python_version>='3.4' # BSD
-openstackdocstheme>=1.30.0 # Apache-2.0
-reno>=1.8.0 # Apache-2.0
+sphinx>=2.0.0,!=2.1.0 # BSD
+openstackdocstheme>=2.2.1 # Apache-2.0
+reno>=3.1.0 # Apache-2.0
os-api-ref>=1.0.0 # Apache-2.0
python-keystoneclient!=2.1.0,>=2.0.0 # Apache-2.0
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
diff --git a/doc/saio/swift/proxy-server.conf b/doc/saio/swift/proxy-server.conf
index ca9431b24..8b64a6c09 100644
--- a/doc/saio/swift/proxy-server.conf
+++ b/doc/saio/swift/proxy-server.conf
@@ -80,6 +80,9 @@ use = egg:swift#copy
[filter:listing_formats]
use = egg:swift#listing_formats
+[filter:domain_remap]
+use = egg:swift#domain_remap
+
[filter:symlink]
use = egg:swift#symlink
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 3f048d68f..2ca5d8477 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -29,7 +29,6 @@
import datetime
import logging
import os
-from swift import __version__
import sys
# NOTE(amotoki): Our current doc build job uses an older version of
@@ -80,15 +79,6 @@ else:
now = datetime.date.today()
copyright = u'%d, OpenStack Foundation' % now.year
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = __version__.rsplit('.', 1)[0]
-# The full version, including alpha/beta/rc tags.
-release = __version__
-
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
@@ -122,7 +112,7 @@ exclude_trees = []
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['swift.']
@@ -172,11 +162,6 @@ html_theme_options = {
# robots.txt.
html_extra_path = ['_extra']
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-# html_last_updated_fmt = '%b %d, %Y'
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
@@ -248,6 +233,8 @@ latex_documents = [
latex_use_xindy = False
# -- Options for openstackdocstheme -------------------------------------------
-repository_name = 'openstack/swift'
-bug_project = 'swift'
-bug_tag = ''
+openstackdocs_repo_name = 'openstack/swift'
+openstackdocs_pdf_link = True
+openstackdocs_auto_name = False
+openstackdocs_bug_project = 'swift'
+openstackdocs_bug_tag = ''
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
new file mode 100644
index 000000000..592cf90c8
--- /dev/null
+++ b/doc/source/contributor/contributing.rst
@@ -0,0 +1,79 @@
+.. include:: ../../../CONTRIBUTING.rst
+
+Community
+=========
+
+Communication
+-------------
+IRC
+ People working on the Swift project may be found in the
+ ``#openstack-swift`` channel on Freenode during working hours
+ in their timezone. The channel is logged, so if you ask a question
+ when no one is around, you can check the log to see if it's been
+ answered: http://eavesdrop.openstack.org/irclogs/%23openstack-swift/
+
+weekly meeting
+ This is a Swift team meeting. The discussion in this meeting is about
+ all things related to the Swift project:
+
+- time: http://eavesdrop.openstack.org/#Swift_Team_Meeting
+- agenda: https://wiki.openstack.org/wiki/Meetings/Swift
+
+mailing list
+ We use the openstack-discuss@lists.openstack.org mailing list for
+ asynchronous discussions or to communicate with other OpenStack teams.
+ Use the prefix ``[swift]`` in your subject line (it's a high-volume
+ list, so most people use email filters).
+
+ More information about the mailing list, including how to subscribe
+ and read the archives, can be found at:
+ http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
+
+Contacting the Core Team
+------------------------
+
+The swift-core team is an active group of contributors who are responsible
+for directing and maintaining the Swift project. As a new contributor, your
+interaction with this group will be mostly through code reviews, because
+only members of swift-core can approve a code change to be merged into the
+code repository. But the swift-core team also spend time on IRC so feel
+free to drop in to ask questions or just to meet us.
+
+.. note::
+ Although your contribution will require reviews by members of
+ swift-core, these aren't the only people whose reviews matter.
+ Anyone with a gerrit account can post reviews, so you can ask
+ other developers you know to review your code ... and you can
+ review theirs. (A good way to learn your way around the codebase
+ is to review other people's patches.)
+
+ If you're thinking, "I'm new at this, how can I possibly provide
+ a helpful review?", take a look at `How to Review Changes the
+ OpenStack Way
+ <https://docs.openstack.org/project-team-guide/review-the-openstack-way.html>`_.
+
+ Or for more specifically in a Swift context read :doc:`review_guidelines`
+
+You can learn more about the role of core reviewers in the OpenStack
+governance documentation:
+https://docs.openstack.org/contributors/common/governance.html#core-reviewer
+
+The membership list of swift-core is maintained in gerrit:
+https://review.opendev.org/#/admin/groups/24,members
+
+You can also find the members of the swift-core team at the Swift weekly
+meetings.
+
+Getting Your Patch Merged
+-------------------------
+Understanding how reviewers review and what they look for will help getting
+your code merged. See `Swift Review Guidelines <contributor/review_guidelines>`_
+for how we review code.
+
+Keep in mind that reviewers are also human; if something feels stalled, then
+come and poke us on IRC or add it to our meeting agenda.
+
+Project Team Lead Duties
+------------------------
+All common PTL duties are enumerated in the `PTL guide
+<https://docs.openstack.org/project-team-guide/ptl.html>`_.
diff --git a/doc/source/contributor/review_guidelines.rst b/doc/source/contributor/review_guidelines.rst
new file mode 100644
index 000000000..314e764f4
--- /dev/null
+++ b/doc/source/contributor/review_guidelines.rst
@@ -0,0 +1 @@
+.. include:: ../../../REVIEW_GUIDELINES.rst
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 9d3023aac..7aeb82e70 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -66,6 +66,15 @@ Overview and Concepts
ring_partpower
associated_projects
+Contributor Documentation
+=========================
+
+.. toctree::
+ :maxdepth: 2
+
+ contributor/contributing
+ contributor/review_guidelines
+
Developer Documentation
=======================
diff --git a/doc/source/s3_compat.rst b/doc/source/s3_compat.rst
index 7ad655cee..04010a7c1 100644
--- a/doc/source/s3_compat.rst
+++ b/doc/source/s3_compat.rst
@@ -60,8 +60,6 @@ Amazon S3 operations
+------------------------------------------------+------------------+--------------+
| `PUT Bucket acl`_ | Core-API | Yes |
+------------------------------------------------+------------------+--------------+
-| `Object tagging`_ | Core-API | Yes |
-+------------------------------------------------+------------------+--------------+
| `Versioning`_ | Versioning | Yes |
+------------------------------------------------+------------------+--------------+
| `Bucket notification`_ | Notifications | No |
@@ -78,6 +76,8 @@ Amazon S3 operations
+------------------------------------------------+------------------+--------------+
| `Delete Multiple Objects`_ | Advanced Feature | Yes |
+------------------------------------------------+------------------+--------------+
+| `Object tagging`_ | Advanced Feature | No |
++------------------------------------------------+------------------+--------------+
| `GET Object torrent`_ | Advanced Feature | No |
+------------------------------------------------+------------------+--------------+
| `Bucket inventory`_ | Advanced Feature | No |
diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample
index 2958347d1..0387fde20 100644
--- a/etc/proxy-server.conf-sample
+++ b/etc/proxy-server.conf-sample
@@ -470,7 +470,10 @@ use = egg:swift#s3api
# With either tempauth or your custom auth:
# - Put s3api just before your auth filter(s) in the pipeline
# With keystone:
-# - Put s3api and s3token before keystoneauth in the pipeline
+# - Put s3api and s3token before keystoneauth in the pipeline, but after
+# auth_token
+# If you have ratelimit enabled for Swift requests, you may want to place a
+# second copy after auth to also ratelimit S3 requests.
#
# Swift has no concept of the S3's resource owner; the resources
# (i.e. containers and objects) created via the Swift API have no owner
diff --git a/lower-constraints.txt b/lower-constraints.txt
index 2c5c894fc..0928e7533 100644
--- a/lower-constraints.txt
+++ b/lower-constraints.txt
@@ -44,8 +44,6 @@ netifaces==0.8
nose==1.3.7
nosehtmloutput==0.0.3
nosexcover==1.0.10
-openstackdocstheme==1.30.0
-os-api-ref==1.0.0
os-testr==0.8.0
oslo.config==4.0.0
oslo.i18n==3.20.0
@@ -68,15 +66,12 @@ python-swiftclient==3.2.0
python-openstackclient==3.12.0
pytz==2018.3
PyYAML==3.12
-reno==1.8.0
requests==2.14.2
requests-mock==1.2.0
rfc3986==1.1.0
six==1.10.0
smmap2==2.0.3
snowballstemmer==1.2.1
-Sphinx==1.6.2
-sphinxcontrib-websupport==1.0.1
stestr==2.0.0
stevedore==1.28.0
testtools==2.3.0
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 1dea1af1d..932a4cbbd 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -116,7 +116,7 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@@ -174,11 +174,6 @@ html_theme = 'openstackdocs'
#
# html_extra_path = []
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-# html_last_updated_fmt = '%b %d, %Y'
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
@@ -352,6 +347,7 @@ htmlhelp_basename = 'SwiftReleaseNotesdoc'
locale_dirs = ['locale/']
# -- Options for openstackdocstheme -------------------------------------------
-repository_name = 'openstack/swift'
-bug_project = 'swift'
-bug_tag = ''
+openstackdocs_repo_name = 'openstack/swift'
+openstackdocs_auto_name = False
+openstackdocs_bug_project = 'swift'
+openstackdocs_bug_tag = ''
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index f7ce67a0b..5f16d6290 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -5,11 +5,11 @@ msgid ""
msgstr ""
"Project-Id-Version: swift\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-04-14 13:23+0000\n"
+"POT-Creation-Date: 2020-05-27 04:38+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-04-16 12:48+0000\n"
+"PO-Revision-Date: 2020-05-26 11:24+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -112,6 +112,9 @@ msgstr "2.23.1"
msgid "2.24.0"
msgstr "2.24.0"
+msgid "2.25.0"
+msgstr "2.25.0"
+
msgid ""
"A PUT or POST to a container will now update the container's Last-Modified "
"time, and that value will be included in a GET/HEAD response."
@@ -156,6 +159,9 @@ msgstr ""
"Account and container replication stats logs now include ``remote_merges``, "
"the number of times a whole database was sent to another node."
+msgid "Account quotas are now enforced even on empty accounts."
+msgstr "Account quotas are now enforced even on empty accounts."
+
msgid "Add Composite Ring Functionality"
msgstr "Add Composite Ring Functionality"
@@ -239,6 +245,19 @@ msgstr ""
"setting to enable or disable this functionality."
msgid ""
+"Add support for multiple root encryption secrets for the trivial and KMIP "
+"keymasters. This allows operators to rotate encryption keys over time "
+"without needing to re-encrypt all existing data in the cluster. Please see "
+"the included sample config files for instructions on how to multiple "
+"encryption keys."
+msgstr ""
+"Add support for multiple root encryption secrets for the trivial and KMIP "
+"keymasters. This allows operators to rotate encryption keys over time "
+"without needing to re-encrypt all existing data in the cluster. Please see "
+"the included sample config files for instructions on how to multiple "
+"encryption keys."
+
+msgid ""
"Add support to increase object ring partition power transparently to end "
"users and with no cluster downtime. Increasing the ring part power allows "
"for incremental adjustment to the upper bound of the cluster size. Please "
@@ -261,6 +280,19 @@ msgstr ""
"locations more quickly. This helps when adding capacity to a ring."
msgid ""
+"Added \"static symlinks\", which perform some validation as they follow "
+"redirects and include more information about their target in container "
+"listings. For more information, see the `symlink middleware <https://docs."
+"openstack.org/swift/latest/middleware.html#symlink>`__ section of the "
+"documentation."
+msgstr ""
+"Added \"static symlinks\", which perform some validation as they follow "
+"redirects and include more information about their target in container "
+"listings. For more information, see the `symlink middleware <https://docs."
+"openstack.org/swift/latest/middleware.html#symlink>`__ section of the "
+"documentation."
+
+msgid ""
"Added ``--swift-versions`` to ``swift-recon`` CLI to compare installed "
"versions in the cluster."
msgstr ""
@@ -271,6 +303,15 @@ msgid "Added ``-d <devs>`` and ``-p <partitions>`` command line options."
msgstr "Added ``-d <devs>`` and ``-p <partitions>`` command line options."
msgid ""
+"Added ``ttfb`` (Time to First Byte) and ``pid`` (Process ID) to the set of "
+"available proxy-server log fields. For more information, see `the "
+"documentation <https://docs.openstack.org/swift/latest/logs.html>`__."
+msgstr ""
+"Added ``ttfb`` (Time to First Byte) and ``pid`` (Process ID) to the set of "
+"available proxy-server log fields. For more information, see `the "
+"documentation <https://docs.openstack.org/swift/latest/logs.html>`__."
+
+msgid ""
"Added a \"user\" option to the drive-audit config file. Its value is used to "
"set the owner of the drive-audit recon cache."
msgstr ""
@@ -278,6 +319,13 @@ msgstr ""
"set the owner of the drive-audit recon cache."
msgid ""
+"Added a ``keep_idle`` config option to configure KEEPIDLE time for TCP "
+"sockets. The default value is the old constant of 600."
+msgstr ""
+"Added a ``keep_idle`` config option to configure KEEPIDLE time for TCP "
+"sockets. The default value is the old constant of 600."
+
+msgid ""
"Added a configurable URL base to staticweb, fixing issues when the "
"accessible endpoint isn't known to the Swift cluster (eg http vs https)."
msgstr ""
@@ -291,6 +339,66 @@ msgid "Added a handoffs-only mode."
msgstr "Added a handoffs-only mode."
msgid ""
+"Added a new middleware that allows users and operators to configure accounts "
+"and containers to use RFC-compliant (i.e., double-quoted) ETags. This may be "
+"useful when using Swift as an origin for some content delivery networks. For "
+"more information, see `the middleware documentation <https://docs.openstack."
+"org/swift/latest/middleware.html#etag-quoter>`__."
+msgstr ""
+"Added a new middleware that allows users and operators to configure accounts "
+"and containers to use RFC-compliant (i.e., double-quoted) ETags. This may be "
+"useful when using Swift as an origin for some content delivery networks. For "
+"more information, see `the middleware documentation <https://docs.openstack."
+"org/swift/latest/middleware.html#etag-quoter>`__."
+
+msgid ""
+"Added a new middleware to allow accounts and containers to opt-in to RFC-"
+"compliant ETags. For more information, see `the documentation <https://docs."
+"openstack.org/swift/latest/middleware.html#module-swift.common.middleware."
+"etag_quoter>`__. Clients should be aware of the fact that ETags may be "
+"quoted for RFC compliance; this may become the default behavior in some "
+"future release."
+msgstr ""
+"Added a new middleware to allow accounts and containers to opt-in to RFC-"
+"compliant ETags. For more information, see `the documentation <https://docs."
+"openstack.org/swift/latest/middleware.html#module-swift.common.middleware."
+"etag_quoter>`__. Clients should be aware of the fact that ETags may be "
+"quoted for RFC compliance; this may become the default behaviour in some "
+"future release."
+
+msgid ""
+"Added a new object versioning mode, with APIs for querying and accessing old "
+"versions. For more information, see `the documentation <https://docs."
+"openstack.org/swift/latest/middleware.html#module-swift.common.middleware."
+"versioned_writes.object_versioning>`__."
+msgstr ""
+"Added a new object versioning mode, with APIs for querying and accessing old "
+"versions. For more information, see `the documentation <https://docs."
+"openstack.org/swift/latest/middleware.html#module-swift.common.middleware."
+"versioned_writes.object_versioning>`__."
+
+msgid ""
+"Added an experimental ``swift-ring-composer`` CLI tool to build composite "
+"rings."
+msgstr ""
+"Added an experimental ``swift-ring-composer`` CLI tool to build composite "
+"rings."
+
+msgid ""
+"Added an operator tool, ``swift-container-deleter``, to asynchronously "
+"delete some or all objects in a container using the object expirers."
+msgstr ""
+"Added an operator tool, ``swift-container-deleter``, to asynchronously "
+"delete some or all objects in a container using the object expirers."
+
+msgid ""
+"Added an optional ``read_only`` middleware to make an entire cluster or "
+"individual accounts read only."
+msgstr ""
+"Added an optional ``read_only`` middleware to make an entire cluster or "
+"individual accounts read only."
+
+msgid ""
"Added container sharding, an operator controlled feature that may be used to "
"shard very large container databases into a number of smaller shard "
"containers. This mitigates the issues with one large DB by distributing the "
@@ -308,6 +416,12 @@ msgstr ""
msgid "Added container/object listing with prefix to InternalClient."
msgstr "Added container/object listing with prefix to InternalClient."
+msgid "Added support for Python 3.8."
+msgstr "Added support for Python 3.8."
+
+msgid "Added support for S3 versioning using the above new mode."
+msgstr "Added support for S3 versioning using the above new mode."
+
msgid "Added support for inline data segments in SLO manifests."
msgstr "Added support for inline data segments in SLO manifests."
@@ -347,19 +461,46 @@ msgstr ""
"unsatisfied-range value. This allows the caller to know the valid range "
"request value for an object."
+msgid "Allow ``fallocate_reserve`` to be specified as a percentage."
+msgstr "Allow ``fallocate_reserve`` to be specified as a percentage."
+
+msgid "Allow direct_client users to overwrite the ``X-Timestamp`` header."
+msgstr "Allow direct_client users to overwrite the ``X-Timestamp`` header."
+
msgid "Allow the expirer to gracefully move past updating stale work items."
msgstr "Allow the expirer to gracefully move past updating stale work items."
msgid "Always set Swift processes to use UTC."
msgstr "Always set Swift processes to use UTC."
+msgid "Background corruption-detection improvements"
+msgstr "Background corruption-detection improvements"
+
msgid "Bug Fixes"
msgstr "Bug Fixes"
+msgid "COPY now works with unicode account names."
+msgstr "COPY now works with Unicode account names."
+
msgid "Cache all answers from nameservers in cname_lookup."
msgstr "Cache all answers from nameservers in cname_lookup."
msgid ""
+"Change the behavior of the EC reconstructor to perform a fragment rebuild to "
+"a handoff node when a primary peer responds with 507 to the REPLICATE "
+"request. This changes EC to match the existing behavior of replication when "
+"drives fail. After a rebalance of EC rings (potentially removing unmounted/"
+"failed devices), it's most IO efficient to run in handoffs_only mode to "
+"avoid unnecessary rebuilds."
+msgstr ""
+"Change the behaviour of the EC reconstructor to perform a fragment rebuild "
+"to a handoff node when a primary peer responds with 507 to the REPLICATE "
+"request. This changes EC to match the existing behaviour of replication when "
+"drives fail. After a rebalance of EC rings (potentially removing unmounted/"
+"failed devices), it's most IO efficient to run in handoffs_only mode to "
+"avoid unnecessary rebuilds."
+
+msgid ""
"Changed where liberasurecode-devel for CentOS 7 is referenced and installed "
"as a dependency."
msgstr ""
@@ -395,6 +536,9 @@ msgstr ""
"fragment metadata will now be validated when read and, if bad data is found, "
"the fragment will be quarantined."
+msgid "CompleteMultipartUpload requests with a ``Content-MD5`` now work."
+msgstr "CompleteMultipartUpload requests with a ``Content-MD5`` now work."
+
msgid ""
"Composite rings can be used for explicit replica placement and \"replicated "
"EC\" for global erasure codes policies."
@@ -414,6 +558,11 @@ msgstr ""
"moved in multiple components."
msgid ""
+"Container metadata related to sharding are now removed when no longer needed."
+msgstr ""
+"Container metadata related to sharding are now removed when no longer needed."
+
+msgid ""
"Container sync can now copy SLOs more efficiently by allowing the manifest "
"to be synced before all of the referenced segments. This fixes a bug where "
"container sync would not copy SLO manifests."
@@ -422,6 +571,13 @@ msgstr ""
"to be synced before all of the referenced segments. This fixes a bug where "
"container sync would not copy SLO manifests."
+msgid ""
+"Container sync now synchronizes static symlinks in a way similar to static "
+"large objects."
+msgstr ""
+"Container sync now synchronizes static symlinks in a way similar to static "
+"large objects."
+
msgid "Correctly handle deleted files with if-none-match requests."
msgstr "Correctly handle deleted files with if-none-match requests."
@@ -432,9 +588,19 @@ msgstr ""
"Correctly send 412 Precondition Failed if a user sends an invalid copy "
"destination. Previously Swift would send a 500 Internal Server Error."
+msgid "Correctness improvements"
+msgstr "Correctness improvements"
+
msgid "Critical Issues"
msgstr "Critical Issues"
+msgid ""
+"Cross-account symlinks now store correct account information in container "
+"listings. This was previously fixed in 2.22.0."
+msgstr ""
+"Cross-account symlinks now store correct account information in container "
+"listings. This was previously fixed in 2.22.0."
+
msgid "Current (Unreleased) Release Notes"
msgstr "Current (Unreleased) Release Notes"
@@ -452,6 +618,9 @@ msgstr ""
msgid "Daemons using InternalClient can now be properly killed with SIGTERM."
msgstr "Daemons using InternalClient can now be properly killed with SIGTERM."
+msgid "Data encryption updates"
+msgstr "Data encryption updates"
+
msgid ""
"Deleting an expiring object will now cause less work in the system. The "
"number of async pending files written has been reduced for all objects and "
@@ -464,6 +633,15 @@ msgstr ""
"burden on container servers."
msgid ""
+"Deployers with clusters that relied on the old implicit default location of "
+"\"US\" should explicitly set ``location = US`` in the ``[filter:s3api]`` "
+"section of proxy-server.conf before upgrading."
+msgstr ""
+"Deployers with clusters that relied on the old implicit default location of "
+"\"US\" should explicitly set ``location = US`` in the ``[filter:s3api]`` "
+"section of proxy-server.conf before upgrading."
+
+msgid ""
"Deprecate swift-temp-url and call python-swiftclient's implementation "
"instead. This adds python-swiftclient as an optional dependency of Swift."
msgstr ""
@@ -473,9 +651,24 @@ msgstr ""
msgid "Deprecation Notes"
msgstr "Deprecation Notes"
+msgid "Detect and remove invalid entries from ``hashes.pkl``"
+msgstr "Detect and remove invalid entries from ``hashes.pkl``"
+
+msgid ""
+"Device region and zone can now be changed via ``swift-ring-builder``. Note "
+"that this may cause a lot of data movement on the next rebalance as the "
+"builder tries to reach full dispersion."
+msgstr ""
+"Device region and zone can now be changed via ``swift-ring-builder``. Note "
+"that this may cause a lot of data movement on the next rebalance as the "
+"builder tries to reach full dispersion."
+
msgid "Disallow X-Delete-At header values equal to the X-Timestamp header."
msgstr "Disallow X-Delete-At header values equal to the X-Timestamp header."
+msgid "Display crypto data/metadata details in swift-object-info."
+msgstr "Display crypto data/metadata details in swift-object-info."
+
msgid "Display more info on empty rings."
msgstr "Display more info on empty rings."
@@ -563,6 +756,15 @@ msgid "Fix SLO delete for accounts with non-ASCII names."
msgstr "Fix SLO delete for accounts with non-ASCII names."
msgid ""
+"Fixed 500 from cname_lookup middleware. Previously, if the looked-up domain "
+"was used by domain_remap to update the request path, the server would "
+"respond Internal Error."
+msgstr ""
+"Fixed 500 from cname_lookup middleware. Previously, if the looked-up domain "
+"was used by domain_remap to update the request path, the server would "
+"respond Internal Error."
+
+msgid ""
"Fixed UnicodeDecodeError in the object reconstructor that would prevent "
"objects with non-ascii names from being reconstructed and caused the "
"reconstructor process to hang."
@@ -603,6 +805,13 @@ msgstr ""
"only requested if the sync request was successful."
msgid ""
+"Fixed a bug in the new object versioning API that would cause more than "
+"``limit`` results to be returned when listing."
+msgstr ""
+"Fixed a bug in the new object versioning API that would cause more than "
+"``limit`` results to be returned when listing."
+
+msgid ""
"Fixed a bug introduced in 2.15.0 where the object reconstructor would exit "
"with a traceback if no EC policy was configured."
msgstr ""
@@ -627,6 +836,13 @@ msgstr ""
"a 5xx series response."
msgid ""
+"Fixed a bug where encryption would store the incorrect key metadata if the "
+"object name starts with a slash."
+msgstr ""
+"Fixed a bug where encryption would store the incorrect key metadata if the "
+"object name starts with a slash."
+
+msgid ""
"Fixed a bug where some headers weren't being copied correctly in a COPY "
"request."
msgstr ""
@@ -643,6 +859,22 @@ msgstr ""
"Fixed a bug where the ring builder would not allow removal of a device when "
"min_part_seconds_left was greater than zero."
+msgid ""
+"Fixed a bug where zero-byte PUTs would not work properly with \"If-None-"
+"Match: \\*\" conditional requests."
+msgstr ""
+"Fixed a bug where zero-byte PUTs would not work properly with \"If-None-"
+"Match: \\*\" conditional requests."
+
+msgid ""
+"Fixed a cache invalidation issue related to GET and PUT requests to "
+"containers that would occasionally cause object PUTs to a container to 404 "
+"after the container had been successfully created."
+msgstr ""
+"Fixed a cache invalidation issue related to GET and PUT requests to "
+"containers that would occasionally cause object PUTs to a container to 404 "
+"after the container had been successfully created."
+
msgid "Fixed a few areas where the ``swiftdir`` option was not respected."
msgstr "Fixed a few areas where the ``swiftdir`` option was not respected."
@@ -668,16 +900,114 @@ msgid "Fixed a socket leak in copy middleware when a large object was copied."
msgstr "Fixed a socket leak in copy middleware when a large object was copied."
msgid ""
+"Fixed an issue in COPY where concurrent requests may have copied the wrong "
+"data."
+msgstr ""
+"Fixed an issue in COPY where concurrent requests may have copied the wrong "
+"data."
+
+msgid ""
+"Fixed an issue that caused Delete Multiple Objects requests with large "
+"bodies to 400. This was previously fixed in 2.20.0."
+msgstr ""
+"Fixed an issue that caused Delete Multiple Objects requests with large "
+"bodies to 400. This was previously fixed in 2.20.0."
+
+msgid ""
+"Fixed an issue where S3 API v4 signatures would not be validated against the "
+"body of the request, allowing a replay attack if request headers were "
+"captured by a malicious third party."
+msgstr ""
+"Fixed an issue where S3 API v4 signatures would not be validated against the "
+"body of the request, allowing a replay attack if request headers were "
+"captured by a malicious third party."
+
+msgid ""
+"Fixed an issue where a failed drive could prevent the container sharder from "
+"making progress."
+msgstr ""
+"Fixed an issue where a failed drive could prevent the container sharder from "
+"making progress."
+
+msgid ""
+"Fixed an issue where an object server failure during a client download could "
+"leave an open socket between the proxy and client."
+msgstr ""
+"Fixed an issue where an object server failure during a client download could "
+"leave an open socket between the proxy and client."
+
+msgid ""
"Fixed an issue where background consistency daemon child processes would "
"deadlock waiting on the same file descriptor."
msgstr ""
"Fixed an issue where background consistency daemon child processes would "
"deadlock waiting on the same file descriptor."
+msgid ""
+"Fixed an issue where deleted EC objects didn't have their on-disk "
+"directories cleaned up. This would cause extra resource usage on the object "
+"servers."
+msgstr ""
+"Fixed an issue where deleted EC objects didn't have their on-disk "
+"directories cleaned up. This would cause extra resource usage on the object "
+"servers."
+
+msgid ""
+"Fixed an issue where multipart uploads with the S3 API would sometimes "
+"report an error despite all segments being upload successfully."
+msgstr ""
+"Fixed an issue where multipart uploads with the S3 API would sometimes "
+"report an error despite all segments being upload successfully."
+
+msgid ""
+"Fixed an issue where non-ASCII Keystone EC2 credentials would not get mapped "
+"to the correct account. This was previously fixed in 2.20.0."
+msgstr ""
+"Fixed an issue where non-ASCII Keystone EC2 credentials would not get mapped "
+"to the correct account. This was previously fixed in 2.20.0."
+
+msgid ""
+"Fixed an issue where v4 signatures would not be validated against the body "
+"of the request, allowing a replay attack if request headers were captured by "
+"a malicious third party. Note that unsigned payloads still function normally."
+msgstr ""
+"Fixed an issue where v4 signatures would not be validated against the body "
+"of the request, allowing a replay attack if request headers were captured by "
+"a malicious third party. Note that unsigned payloads still function normally."
+
+msgid ""
+"Fixed an issue with SSYNC requests to ensure that only one request can be "
+"running on a partition at a time."
+msgstr ""
+"Fixed an issue with SSYNC requests to ensure that only one request can be "
+"running on a partition at a time."
+
+msgid ""
+"Fixed an issue with multi-region EC policies that caused the EC "
+"reconstructor to constantly attempt cross-region rebuild traffic."
+msgstr ""
+"Fixed an issue with multi-region EC policies that caused the EC "
+"reconstructor to constantly attempt cross-region rebuild traffic."
+
msgid "Fixed deadlock when logging from a tpool thread."
msgstr "Fixed deadlock when logging from a tpool thread."
msgid ""
+"Fixed deadlock when logging from a tpool thread. The object server runs "
+"certain IO-intensive methods outside the main pthread for performance. "
+"Previously, if one of those methods tried to log, this can cause a crash "
+"that eventually leads to an object server with hundreds or thousands of "
+"greenthreads, all deadlocked. The fix is to use a mutex that works across "
+"different greenlets and different pthreads."
+msgstr ""
+"Fixed deadlock when logging from a tpool thread. The object server runs "
+"certain IO-intensive methods outside the main pthread for performance. "
+"Previously, if one of those methods tried to log, this can cause a crash "
+"that eventually leads to an object server with hundreds or thousands of "
+"greenthreads, all deadlocked. The fix is to use a mutex that works across "
+"different greenlets and different pthreads."
+
+msgid ""
"Fixed encoding issue in ssync where a mix of ascii and non-ascii metadata "
"values would cause an error."
msgstr ""
@@ -696,6 +1026,19 @@ msgstr ""
"drive space used and filling the cluster."
msgid ""
+"Fixed issue where bulk requests using xml and expect 100-continue would "
+"return a malformed HTTP response."
+msgstr ""
+"Fixed issue where bulk requests using XML and expect 100-continue would "
+"return a malformed HTTP response."
+
+msgid "Fixed listings for sharded containers."
+msgstr "Fixed listings for sharded containers."
+
+msgid "Fixed non-ASCII account metadata handling."
+msgstr "Fixed non-ASCII account metadata handling."
+
+msgid ""
"Fixed non-deterministic suffix updates in hashes.pkl where a partition may "
"be updated much less often than expected."
msgstr ""
@@ -730,17 +1073,26 @@ msgstr ""
msgid "Fixed some minor test compatibility issues."
msgstr "Fixed some minor test compatibility issues."
+msgid "Fixed some title-casing of headers."
+msgstr "Fixed some title-casing of headers."
+
msgid "Fixed the KeyError message when auditor finds an expired object."
msgstr "Fixed the KeyError message when auditor finds an expired object."
msgid "Fixed the stats calculation in the erasure code reconstructor."
msgstr "Fixed the stats calculation in the erasure code reconstructor."
+msgid "Fixed time skew when using X-Delete-After."
+msgstr "Fixed time skew when using X-Delete-After."
+
msgid ""
"Fixed using ``swift-ring-builder set_weight`` with more than one device."
msgstr ""
"Fixed using ``swift-ring-builder set_weight`` with more than one device."
+msgid "Fixed v1 listings that end with a non-ASCII object name."
+msgstr "Fixed v1 listings that end with a non-ASCII object name."
+
msgid ""
"For further information see the `docs <https://docs.openstack.org/swift/"
"latest/overview_ring.html#module-swift.common.ring.composite_builder>`__"
@@ -748,6 +1100,23 @@ msgstr ""
"For further information see the `docs <https://docs.openstack.org/swift/"
"latest/overview_ring.html#module-swift.common.ring.composite_builder>`__"
+msgid ""
+"For new multipart-uploads via the S3 API, the ETag that is stored will be "
+"calculated in the same way that AWS uses. This ETag will be used in GET/HEAD "
+"responses, bucket listings, and conditional requests via the S3 API. "
+"Accessing the same object via the Swift API will use the SLO Etag; however, "
+"in JSON container listings the multipart upload etag will be exposed in a "
+"new \"s3_etag\" key. Previously, some S3 clients would complain about "
+"download corruption when the ETag did not have a '-'."
+msgstr ""
+"For new multipart-uploads via the S3 API, the ETag that is stored will be "
+"calculated in the same way that AWS uses. This ETag will be used in GET/HEAD "
+"responses, bucket listings, and conditional requests via the S3 API. "
+"Accessing the same object via the Swift API will use the SLO Etag; however, "
+"in JSON container listings the multipart upload etag will be exposed in a "
+"new \"s3_etag\" key. Previously, some S3 clients would complain about "
+"download corruption when the ETag did not have a '-'."
+
msgid "Fractional replicas are no longer allowed for erasure code policies."
msgstr "Fractional replicas are no longer allowed for erasure code policies."
@@ -764,6 +1133,15 @@ msgstr ""
"forwarded to the referenced object. POST requests sent to a symlink will "
"result in a 307 Temporary Redirect response."
+msgid ""
+"Getting an SLO manifest with ``?format=raw`` now responds with an ETag that "
+"matches the MD5 of the generated body rather than the MD5 of the manifest "
+"stored on disk."
+msgstr ""
+"Getting an SLO manifest with ``?format=raw`` now responds with an ETag that "
+"matches the MD5 of the generated body rather than the MD5 of the manifest "
+"stored on disk."
+
msgid "I/O priority is now supported on AArch64 architecture."
msgstr "I/O priority is now supported on AArch64 architecture."
@@ -776,6 +1154,9 @@ msgstr ""
"create fails, it will now return a server error (500) instead of Not Found "
"(404)."
+msgid "If running Swift under Python 3, ``eventlet`` must be at least 0.25.0."
+msgstr "If running Swift under Python 3, ``eventlet`` must be at least 0.25.0."
+
msgid ""
"If using erasure coding with ISA-L in rs_vand mode and 5 or more parity "
"fragments, Swift will emit a warning. This is a configuration that is known "
@@ -793,12 +1174,28 @@ msgstr ""
"be migrated as soon as possible. Please see https://bugs.launchpad.net/swift/"
"+bug/1639691 for more information."
+msgid "If you have a config file like this::"
+msgstr "If you have a config file like this::"
+
msgid "If you upgrade and roll back, you must delete all `hashes.pkl` files."
msgstr "If you upgrade and roll back, you must delete all `hashes.pkl` files."
+msgid "If you want updates to be processed exactly as before, do this::"
+msgstr "If you want updates to be processed exactly as before, do this::"
+
+msgid ""
+"If you've been testing Swift on Python 3, upgrade at your earliest "
+"convenience."
+msgstr ""
+"If you've been testing Swift on Python 3, upgrade at your earliest "
+"convenience."
+
msgid "Imported docs content from openstack-manuals project."
msgstr "Imported docs content from openstack-manuals project."
+msgid "Improved S3 API compatibility."
+msgstr "Improved S3 API compatibility."
+
msgid ""
"Improved ``object-updater`` stats logging. It now tells you all of its stats "
"(successes, failures, quarantines due to bad pickles, unlinks, and errors), "
@@ -811,11 +1208,35 @@ msgstr ""
"end of a pass remains and has been expanded to also include all stats."
msgid ""
+"Improved container-sync performance when data has already been deleted or "
+"overwritten."
+msgstr ""
+"Improved container-sync performance when data has already been deleted or "
+"overwritten."
+
+msgid ""
"Improved performance by eliminating an unneeded directory structure hash."
msgstr ""
"Improved performance by eliminating an unneeded directory structure hash."
msgid ""
+"Improved performance of sharded container listings when performing prefix "
+"listings."
+msgstr ""
+"Improved performance of sharded container listings when performing prefix "
+"listings."
+
+msgid ""
+"Improved proxy-server performance by reducing unnecessary locking, memory "
+"copies, and eventlet scheduling."
+msgstr ""
+"Improved proxy-server performance by reducing unnecessary locking, memory "
+"copies, and eventlet scheduling."
+
+msgid "Improved proxy-to-backend requests to be more RFC-compliant."
+msgstr "Improved proxy-to-backend requests to be more RFC-compliant."
+
+msgid ""
"Improved the granularity of the ring dispersion metric so that small "
"improvements after a rebalance can show changes in the dispersion number. "
"Dispersion in existing and new rings can be recalculated using the new ``--"
@@ -878,6 +1299,9 @@ msgstr ""
"the filename. This saves one inode for every EC .data file. Existing ."
"durable files will not be removed, and they will continue to work just fine."
+msgid "Internal client no longer logs object DELETEs as status 499."
+msgstr "Internal client no longer logs object DELETEs as status 499."
+
msgid ""
"Let clients request heartbeats during SLO PUTs by including the query "
"parameter ``heartbeat=on``."
@@ -896,6 +1320,13 @@ msgstr ""
"exposes the value to offer consistency with the object listings on "
"containers."
+msgid ""
+"Lock timeouts in the container updater are now logged at INFO level, not "
+"ERROR."
+msgstr ""
+"Lock timeouts in the container updater are now logged at INFO level, not "
+"ERROR."
+
msgid "Log correct status code for conditional requests."
msgstr "Log correct status code for conditional requests."
@@ -908,10 +1339,37 @@ msgstr ""
"config. Configure the ``versioned_writes`` middleware in the proxy server "
"instead. This option will be ignored in a future release."
+msgid ""
+"Log deprecation warnings for ``run_pause``. This setting was deprecated in "
+"Swift 2.4.0 and is replaced by ``interval``. It may be removed in a future "
+"release."
+msgstr ""
+"Log deprecation warnings for ``run_pause``. This setting was deprecated in "
+"Swift 2.4.0 and is replaced by ``interval``. It may be removed in a future "
+"release."
+
+msgid ""
+"Log formats are now more configurable and include support for anonymization. "
+"See the ``log_msg_template`` option in ``proxy-server.conf`` and `the Swift "
+"documentation <https://docs.openstack.org/swift/latest/logs.html#proxy-"
+"logs>`__ for more information."
+msgstr ""
+"Log formats are now more configurable and include support for anonymization. "
+"See the ``log_msg_template`` option in ``proxy-server.conf`` and `the Swift "
+"documentation <https://docs.openstack.org/swift/latest/logs.html#proxy-"
+"logs>`__ for more information."
+
msgid "Log the correct request type of a subrequest downstream of copy."
msgstr "Log the correct request type of a sub-request downstream of copy."
msgid ""
+"Lower bounds of dependencies have been updated to reflect what is actually "
+"tested."
+msgstr ""
+"Lower bounds of dependencies have been updated to reflect what is actually "
+"tested."
+
+msgid ""
"Make mount_check option usable in containerized environments by adding a "
"check for an \".ismount\" file at the root directory of a device."
msgstr ""
@@ -942,10 +1400,43 @@ msgstr ""
"Moved other-requirements.txt to bindep.txt. bindep.txt lists non-Python "
"dependencies of Swift."
+msgid ""
+"Multi-character strings may now be used as delimiters in account and "
+"container listings."
+msgstr ""
+"Multi-character strings may now be used as delimiters in account and "
+"container listings."
+
+msgid ""
+"Multipart object segments are now actually deleted when the multipart object "
+"is deleted via the S3 API."
+msgstr ""
+"Multipart object segments are now actually deleted when the multipart object "
+"is deleted via the S3 API."
+
+msgid "Multipart upload parts may now be copied from other multipart uploads."
+msgstr "Multipart upload parts may now be copied from other multipart uploads."
+
+msgid ""
+"Multiple keymaster middlewares are now supported. This allows migration from "
+"one key provider to another."
+msgstr ""
+"Multiple keymaster middlewares are now supported. This allows migration from "
+"one key provider to another."
+
msgid "New Features"
msgstr "New Features"
msgid ""
+"New buckets created via the S3 API will now store multi-part upload data in "
+"the same storage policy as other data rather than the cluster's default "
+"storage policy."
+msgstr ""
+"New buckets created via the S3 API will now store multi-part upload data in "
+"the same storage policy as other data rather than the cluster's default "
+"storage policy."
+
+msgid ""
"New config variables to change the schedule priority and I/O scheduling "
"class. Servers and daemons now understand `nice_priority`, `ionice_class`, "
"and `ionice_priority` to schedule their relative importance. Please read "
@@ -962,6 +1453,15 @@ msgid "Newton Series Release Notes"
msgstr "Newton Series Release Notes"
msgid ""
+"Note that ``secret_id`` values must remain unique across all keymasters in a "
+"given pipeline. If they are not unique, the right-most keymaster will take "
+"precedence."
+msgstr ""
+"Note that ``secret_id`` values must remain unique across all keymasters in a "
+"given pipeline. If they are not unique, the right-most keymaster will take "
+"precedence."
+
+msgid ""
"Note that after writing EC data with Swift 2.11.0 or later, that data will "
"not be accessible to earlier versions of Swift."
msgstr ""
@@ -991,10 +1491,28 @@ msgstr ""
msgid "Now ``swift-recon-cron`` works with conf.d configs."
msgstr "Now ``swift-recon-cron`` works with conf.d configs."
+msgid ""
+"O_TMPFILE support is now detected by attempting to use it instead of looking "
+"at the kernel version. This allows older kernels with backported patches to "
+"take advantage of the O_TMPFILE functionality."
+msgstr ""
+"O_TMPFILE support is now detected by attempting to use it instead of looking "
+"at the kernel version. This allows older kernels with backported patches to "
+"take advantage of the O_TMPFILE functionality."
+
msgid "Object expiry improvements"
msgstr "Object expiry improvements"
msgid ""
+"Object reconstructor logs are now prefixed with information about the "
+"specific worker process logging the message. This makes reading the logs and "
+"understanding the messages much simpler."
+msgstr ""
+"Object reconstructor logs are now prefixed with information about the "
+"specific worker process logging the message. This makes reading the logs and "
+"understanding the messages much simpler."
+
+msgid ""
"Object versioning now supports a \"history\" mode in addition to the older "
"\"stack\" mode. The difference is in how DELETE requests are handled. For "
"full details, please read http://docs.openstack.org/developer/swift/"
@@ -1005,10 +1523,56 @@ msgstr ""
"full details, please read http://docs.openstack.org/developer/swift/"
"overview_object_versioning.html."
+msgid ""
+"Object writes to a container whose existence cannot be verified now 503 "
+"instead of 404."
+msgstr ""
+"Object writes to a container whose existence cannot be verified now 503 "
+"instead of 404."
+
+msgid ""
+"Objects with an ``X-Delete-At`` value in the far future no longer cause "
+"backend server errors."
+msgstr ""
+"Objects with an ``X-Delete-At`` value in the far future no longer cause "
+"backend server errors."
+
msgid "Ocata Series Release Notes"
msgstr "Ocata Series Release Notes"
msgid ""
+"On Python 3, certain S3 API headers are now lower case as they would be "
+"coming from AWS."
+msgstr ""
+"On Python 3, certain S3 API headers are now lower case as they would be "
+"coming from AWS."
+
+msgid ""
+"On Python 3, fixed a RecursionError in swift-dispersion-report when using "
+"TLS."
+msgstr ""
+"On Python 3, fixed a RecursionError in swift-dispersion-report when using "
+"TLS."
+
+msgid ""
+"On Python 3, fixed an issue when reading or writing objects with a content "
+"type like ``message/*``. Previously, Swift would fail to respond."
+msgstr ""
+"On Python 3, fixed an issue when reading or writing objects with a content "
+"type like ``message/*``. Previously, Swift would fail to respond."
+
+msgid ""
+"On Python 3, the KMS keymaster now works with secrets stored in Barbican "
+"with a ``text/plain`` payload-content-type."
+msgstr ""
+"On Python 3, the KMS keymaster now works with secrets stored in Barbican "
+"with a ``text/plain`` payload-content-type."
+
+msgid "On Python 3, the formpost middleware now works with unicode file names."
+msgstr ""
+"On Python 3, the formpost middleware now works with Unicode file names."
+
+msgid ""
"On newer kernels (3.15+ when using xfs), Swift will use the O_TMPFILE flag "
"when opening a file instead of creating a temporary file and renaming it on "
"commit. This makes the data path simpler and allows the filesystem to more "
@@ -1020,6 +1584,13 @@ msgstr ""
"efficiently optimise the files on disk, resulting in better performance."
msgid ""
+"On upgrade, a node configured with concurrency=N will still handle async "
+"updates N-at-a-time, but will do so using only one process instead of N."
+msgstr ""
+"On upgrade, a node configured with concurrency=N will still handle async "
+"updates N-at-a-time, but will do so using only one process instead of N."
+
+msgid ""
"Optimize the Erasure Code reconstructor protocol to reduce IO load on "
"servers."
msgstr ""
@@ -1033,6 +1604,19 @@ msgstr ""
"Optimised the common case for hashing filesystem trees, thus eliminating a "
"lot of extraneous disk I/O."
+msgid ""
+"Ordinary objects in S3 use the MD5 of the object as the ETag, just like "
+"Swift. Multipart Uploads follow a different format, notably including a dash "
+"followed by the number of segments. To that end (and for S3 API requests "
+"*only*), SLO responses via the S3 API have a literal '-N' added on the end "
+"of the ETag."
+msgstr ""
+"Ordinary objects in S3 use the MD5 of the object as the ETag, just like "
+"Swift. Multipart Uploads follow a different format, notably including a dash "
+"followed by the number of segments. To that end (and for S3 API requests "
+"*only*), SLO responses via the S3 API have a literal '-N' added on the end "
+"of the ETag."
+
msgid "Other Notes"
msgstr "Other Notes"
@@ -1045,10 +1629,41 @@ msgstr ""
"SSC (server-side copy) Swift source field. See https://docs.openstack.org/"
"developer/swift/logs.html#swift-source for more information."
+msgid ""
+"Per-service ``auto_create_account_prefix`` settings are now deprecated and "
+"may be ignored in a future release; if you need to use this, please set it "
+"in the ``[swift-constraints]`` section of ``/etc/swift/swift.conf``."
+msgstr ""
+"Per-service ``auto_create_account_prefix`` settings are now deprecated and "
+"may be ignored in a future release; if you need to use this, please set it "
+"in the ``[swift-constraints]`` section of ``/etc/swift/swift.conf``."
+
msgid "Pike Series Release Notes"
msgstr "Pike Series Release Notes"
msgid ""
+"Prevent PyKMIP's kmip_protocol logger from logging at DEBUG. Previously, "
+"some versions of PyKMIP would include all wire data when the root logger was "
+"configured to log at DEBUG; this could expose key material in logs. Only the "
+"``kmip_keymaster`` was affected."
+msgstr ""
+"Prevent PyKMIP's kmip_protocol logger from logging at DEBUG. Previously, "
+"some versions of PyKMIP would include all wire data when the root logger was "
+"configured to log at DEBUG; this could expose key material in logs. Only the "
+"``kmip_keymaster`` was affected."
+
+msgid ""
+"Prevent PyKMIP's kmip_protocol logger from logging at DEBUG. Previously, "
+"some versions of PyKMIP would include all wire data when the root logger was "
+"configured to log at DEBUG; this could expose key material in logs. Only the "
+"kmip_keymaster was affected."
+msgstr ""
+"Prevent PyKMIP's kmip_protocol logger from logging at DEBUG. Previously, "
+"some versions of PyKMIP would include all wire data when the root logger was "
+"configured to log at DEBUG; this could expose key material in logs. Only the "
+"kmip_keymaster was affected."
+
+msgid ""
"Prevent logged traceback in object-server on client disconnect for chunked "
"transfers to replicated policies."
msgstr ""
@@ -1071,10 +1686,43 @@ msgstr ""
"Provide an S3 API compatibility layer. The external \"swift3\" project has "
"been imported into Swift's codebase as the \"s3api\" middleware."
+msgid ""
+"Provide useful status codes in logs for some versioning and symlink "
+"subrequests that were previously logged as 499."
+msgstr ""
+"Provide useful status codes in logs for some versioning and symlink "
+"subrequests that were previously logged as 499."
+
+msgid ""
+"Proxy, account, container, and object servers now support \"seamless reloads"
+"\" via ``SIGUSR1``. This is similar to the existing graceful restarts but "
+"keeps the server socket open the whole time, reducing service downtime."
+msgstr ""
+"Proxy, account, container, and object servers now support \"seamless reloads"
+"\" via ``SIGUSR1``. This is similar to the existing graceful restarts but "
+"keeps the server socket open the whole time, reducing service downtime."
+
+msgid "Python 3 fixes:"
+msgstr "Python 3 fixes:"
+
+msgid ""
+"Python 3.6 and 3.7 are now fully supported. If you've been testing Swift on "
+"Python 3, upgrade at your earliest convenience."
+msgstr ""
+"Python 3.6 and 3.7 are now fully supported. If you've been testing Swift on "
+"Python 3, upgrade at your earliest convenience."
+
msgid "Queens Series Release Notes"
msgstr "Queens Series Release Notes"
msgid ""
+"Reduced object-replicator and object-reconstructor CPU usage by only "
+"checking that the device list is current when rings change."
+msgstr ""
+"Reduced object-replicator and object-reconstructor CPU usage by only "
+"checking that the device list is current when rings change."
+
+msgid ""
"Remove ``swift-temp-url`` script. The functionality has been in swiftclient "
"for a long time and this script has been deprecated since 2.10.0."
msgstr ""
@@ -1100,6 +1748,11 @@ msgstr ""
"Large-Object metadata."
msgid ""
+"Removed a request-smuggling vector when running a mixed py2/py3 cluster."
+msgstr ""
+"Removed a request-smuggling vector when running a mixed py2/py3 cluster."
+
+msgid ""
"Removed all ``post_as_copy`` related code and configs. The option has been "
"deprecated since 2.13.0."
msgstr ""
@@ -1124,6 +1777,13 @@ msgstr ""
"deprecated, but continues to function for now. If both values are defined, "
"the old ``replication_one_per_device`` is ignored."
+msgid ""
+"Requesting multiple ranges from a Dynamic Large Object now returns the "
+"entire object instead of incorrect data. This was previously fixed in 2.23.0."
+msgstr ""
+"Requesting multiple ranges from a Dynamic Large Object now returns the "
+"entire object instead of incorrect data. This was previously fixed in 2.23.0."
+
msgid "Require that known-bad EC schemes be deprecated"
msgstr "Require that known-bad EC schemes be deprecated"
@@ -1170,6 +1830,22 @@ msgstr ""
msgid "Rocky Series Release Notes"
msgstr "Rocky Series Release Notes"
+msgid "S3 API compatibility updates"
+msgstr "S3 API compatibility updates"
+
+msgid "S3 API improvements"
+msgstr "S3 API improvements"
+
+msgid ""
+"S3 API now translates ``503 Service Unavailable`` responses to a more S3-"
+"like response instead of raising an error."
+msgstr ""
+"S3 API now translates ``503 Service Unavailable`` responses to a more S3-"
+"like response instead of raising an error."
+
+msgid "S3 ETag for SLOs now include a '-'."
+msgstr "S3 ETag for SLOs now include a '-'."
+
msgid ""
"SLO manifest PUT requests can now be properly validated by sending an ETag "
"header of the md5 sum of the concatenated md5 sums of the referenced "
@@ -1191,6 +1867,15 @@ msgstr ""
"setting in the \"[filter:slo]\" section of the proxy server config."
msgid ""
+"SSYNC replication mode now removes as much of the directory structure as "
+"possible as soon at it observes that the directory is empty. This reduces "
+"the work needed for subsequent replication passes."
+msgstr ""
+"SSYNC replication mode now removes as much of the directory structure as "
+"possible as soon at it observes that the directory is empty. This reduces "
+"the work needed for subsequent replication passes."
+
+msgid ""
"Save the ring when dispersion improves, even if balance doesn't improve."
msgstr ""
"Save the ring when dispersion improves, even if balance doesn't improve."
@@ -1198,6 +1883,12 @@ msgstr ""
msgid "Send ETag header in 206 Partial Content responses to SLO reads."
msgstr "Send ETag header in 206 Partial Content responses to SLO reads."
+msgid "Several utility scripts now work better on Python 3:"
+msgstr "Several utility scripts now work better on Python 3:"
+
+msgid "Sharding improvements"
+msgstr "Sharding improvements"
+
msgid ""
"Significant improvements to the api-ref doc available at http://developer."
"openstack.org/api-ref/object-storage/."
@@ -1212,6 +1903,13 @@ msgstr ""
"Static Large Object (SLO) manifest may now (again) have zero-byte last "
"segments."
+msgid ""
+"Static Large Object sizes in listings for versioned containers are now more "
+"accurate."
+msgstr ""
+"Static Large Object sizes in listings for versioned containers are now more "
+"accurate."
+
msgid "Stein Series Release Notes"
msgstr "Stein Series Release Notes"
@@ -1225,6 +1923,15 @@ msgstr ""
msgid "Stopped logging tracebacks when receiving an unexpected response."
msgstr "Stopped logging tracebacks when receiving an unexpected response."
+msgid ""
+"Storage policy definitions in swift.conf can now define the diskfile to use "
+"to access objects. See the included swift.conf-sample file for a description "
+"of usage."
+msgstr ""
+"Storage policy definitions in swift.conf can now define the diskfile to use "
+"to access objects. See the included swift.conf-sample file for a description "
+"of usage."
+
msgid "Support multi-range GETs for static large objects."
msgstr "Support multi-range GETs for static large objects."
@@ -1238,6 +1945,17 @@ msgid "Swift Release Notes"
msgstr "Swift Release Notes"
msgid ""
+"Swift can now cache the S3 secret from Keystone to use for subsequent "
+"requests. This functionality is disabled by default but can be enabled by "
+"setting the ``secret_cache_duration`` in the ``[filter:s3token]`` section of "
+"the proxy server config to a number greater than 0."
+msgstr ""
+"Swift can now cache the S3 secret from Keystone to use for subsequent "
+"requests. This functionality is disabled by default but can be enabled by "
+"setting the ``secret_cache_duration`` in the ``[filter:s3token]`` section of "
+"the proxy server config to a number greater than 0."
+
+msgid ""
"Swift now returns a 503 (instead of a 500) when an account auto-create fails."
msgstr ""
"Swift now returns a 503 (instead of a 500) when an account auto-create fails."
@@ -1267,6 +1985,15 @@ msgstr ""
"including the X-Symlink-Target-Account header."
msgid ""
+"TempURLs now support IP range restrictions. Please see https://docs."
+"openstack.org/swift/latest/middleware.html#client-usage for more information "
+"on how to use this additional restriction."
+msgstr ""
+"TempURLs now support IP range restrictions. Please see https://docs."
+"openstack.org/swift/latest/middleware.html#client-usage for more information "
+"on how to use this additional restriction."
+
+msgid ""
"TempURLs now support a validation against a common prefix. A prefix-based "
"signature grants access to all objects which share the same prefix. This "
"avoids the creation of a large amount of signatures, when a whole container "
@@ -1307,6 +2034,31 @@ msgstr ""
"on servers with many drives."
msgid ""
+"The EC reconstructor will now attempt to remove empty directories "
+"immediately, while the inodes are still cached, rather than waiting until "
+"the next run."
+msgstr ""
+"The EC reconstructor will now attempt to remove empty directories "
+"immediately, while the inodes are still cached, rather than waiting until "
+"the next run."
+
+msgid ""
+"The ``container-replicator`` now correctly enqueues ``container-reconciler`` "
+"work for sharded containers."
+msgstr ""
+"The ``container-replicator`` now correctly enqueues ``container-reconciler`` "
+"work for sharded containers."
+
+msgid ""
+"The ``container-replicator`` now only attempts to fetch shard ranges if the "
+"remote indicates that it has shard ranges. Further, it does so with a "
+"timeout to prevent the process from hanging in certain cases."
+msgstr ""
+"The ``container-replicator`` now only attempts to fetch shard ranges if the "
+"remote indicates that it has shard ranges. Further, it does so with a "
+"timeout to prevent the process from hanging in certain cases."
+
+msgid ""
"The ``domain_remap`` middleware now supports the ``mangle_client_paths`` "
"option. Its default \"false\" value changes ``domain_remap`` parsing to stop "
"stripping the ``path_root`` value from URL paths. If users depend on this "
@@ -1320,6 +2072,67 @@ msgstr ""
"before upgrading."
msgid ""
+"The ``kmip_keymaster`` middleware can now be configured directly in the "
+"proxy-server config file. The existing behavior of using an external config "
+"file is still supported."
+msgstr ""
+"The ``kmip_keymaster`` middleware can now be configured directly in the "
+"proxy-server config file. The existing behaviour of using an external config "
+"file is still supported."
+
+msgid ""
+"The ``object-expirer`` may now be configured in ``object-server.conf``. This "
+"is in anticipation of a future change to allow the ``object-expirer`` to be "
+"deployed on all nodes that run the ``object-server``."
+msgstr ""
+"The ``object-expirer`` may now be configured in ``object-server.conf``. This "
+"is in anticipation of a future change to allow the ``object-expirer`` to be "
+"deployed on all nodes that run the ``object-server``."
+
+msgid ""
+"The ``proxy-server`` now caches 'updating' shards, improving write "
+"performance for sharded containers. A new config option, "
+"``recheck_updating_shard_ranges``, controls the cache time; set it to 0 to "
+"disable caching."
+msgstr ""
+"The ``proxy-server`` now caches 'updating' shards, improving write "
+"performance for sharded containers. A new config option, "
+"``recheck_updating_shard_ranges``, controls the cache time; set it to 0 to "
+"disable caching."
+
+msgid ""
+"The ``proxy-server`` now ignores 404 responses from handoffs that have no "
+"data when deciding on the correct response for object requests, similar to "
+"what it already does for account and container requests."
+msgstr ""
+"The ``proxy-server`` now ignores 404 responses from handoffs that have no "
+"data when deciding on the correct response for object requests, similar to "
+"what it already does for account and container requests."
+
+msgid ""
+"The ``proxy-server`` now ignores 404 responses from handoffs without "
+"databases when deciding on the correct response for account and container "
+"requests."
+msgstr ""
+"The ``proxy-server`` now ignores 404 responses from handoffs without "
+"databases when deciding on the correct response for account and container "
+"requests."
+
+msgid ""
+"The bulk extract middleware once again allows clients to specify metadata "
+"(including expiration timestamps) for all objects in the archive."
+msgstr ""
+"The bulk extract middleware once again allows clients to specify metadata "
+"(including expiration timestamps) for all objects in the archive."
+
+msgid ""
+"The container sharder can now handle containers with special characters in "
+"their names."
+msgstr ""
+"The container sharder can now handle containers with special characters in "
+"their names."
+
+msgid ""
"The container-updater now reports zero objects and bytes used for child DBs "
"in sharded containers. This prevents double-counting in utilization reports."
msgstr ""
@@ -1532,6 +2345,9 @@ msgstr ""
"write a manifest that an out-of-date proxy server will not be able to read. "
"This will resolve itself once the upgrade completes on all nodes."
+msgid "Ussuri Series Release Notes"
+msgstr "Ussuri Series Release Notes"
+
msgid "Various other minor bug fixes and improvements."
msgstr "Various other minor bug fixes and improvements."
@@ -1546,6 +2362,9 @@ msgstr ""
"using isa_l_rs_vand with more than 4 parity, please read https://bugs."
"launchpad.net/swift/+bug/1639691 and take necessary action."
+msgid "WSGI server processes can now notify systemd when they are ready."
+msgstr "WSGI server processes can now notify systemd when they are ready."
+
msgid ""
"We do not yet have CLI tools for creating composite rings, but the "
"functionality has been enabled in the ring modules to support this advanced "
@@ -1569,6 +2388,38 @@ msgstr ""
"unaffected by this change."
msgid ""
+"When looking for the active root secret, only the right-most keymaster is "
+"used."
+msgstr ""
+"When looking for the active root secret, only the right-most keymaster is "
+"used."
+
+msgid ""
+"When making backend requests, the ``proxy-server`` now ensures query "
+"parameters are always properly quoted. Previously, the proxy would encounter "
+"an error on Python 2.7.17 if the client included non-ASCII query parameters "
+"in object requests. This was previously fixed in 2.23.0."
+msgstr ""
+"When making backend requests, the ``proxy-server`` now ensures query "
+"parameters are always properly quoted. Previously, the proxy would encounter "
+"an error on Python 2.7.17 if the client included non-ASCII query parameters "
+"in object requests. This was previously fixed in 2.23.0."
+
+msgid ""
+"When object path is not a directory, just quarantine it, rather than the "
+"whole suffix."
+msgstr ""
+"When object path is not a directory, just quarantine it, rather than the "
+"whole suffix."
+
+msgid ""
+"When refetching Static Large Object manifests, non-manifest responses are "
+"now handled better."
+msgstr ""
+"When refetching Static Large Object manifests, non-manifest responses are "
+"now handled better."
+
+msgid ""
"When requesting objects, return 404 if a tombstone is found and is newer "
"than any data found. Previous behavior was to return stale data."
msgstr ""
@@ -1618,10 +2469,37 @@ msgstr ""
"X-Delete-At computation now uses X-Timestamp instead of system time. This "
"prevents clock skew causing inconsistent expiry data."
+msgid "``Content-Type`` can now be updated when copying an object."
+msgstr "``Content-Type`` can now be updated when copying an object."
+
+msgid "``swift-account-audit``"
+msgstr "``swift-account-audit``"
+
+msgid "``swift-dispersion-populate``"
+msgstr "``swift-dispersion-populate``"
+
+msgid "``swift-drive-recon``"
+msgstr "``swift-drive-recon``"
+
+msgid "``swift-recon``"
+msgstr "``swift-recon``"
+
msgid "``swift-ring-builder`` improvements"
msgstr "``swift-ring-builder`` improvements"
msgid ""
+"``swift_source`` is set for more sub-requests in the proxy-server. See `the "
+"documentation <https://docs.openstack.org/swift/latest/logs.html#swift-"
+"source>`__."
+msgstr ""
+"``swift_source`` is set for more sub-requests in the proxy-server. See `the "
+"documentation <https://docs.openstack.org/swift/latest/logs.html#swift-"
+"source>`__."
+
+msgid "and you want to take advantage of faster updates, then do this::"
+msgstr "and you want to take advantage of faster updates, then do this::"
+
+msgid ""
"cname_lookup middleware now accepts a ``nameservers`` config variable that, "
"if defined, will be used for DNS lookups instead of the system default."
msgstr ""
diff --git a/requirements.txt b/requirements.txt
index 0d8e80445..ecfdac0ba 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,6 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-dnspython>=1.15.0;python_version=='2.7' # http://www.dnspython.org/LICENSE
eventlet>=0.25.0 # MIT
greenlet>=0.3.2
netifaces>=0.8,!=0.10.0,!=0.10.1
@@ -13,6 +12,14 @@ six>=1.10.0
xattr>=0.4;sys_platform!='win32' # MIT
PyECLib>=1.3.1 # BSD
cryptography>=2.0.2 # BSD/Apache-2.0
-ipaddress>=1.0.16;python_version<'3.3' # PSF
fusepy>=2.0.4
protobuf>=3.9.1
+
+# For python 2.7, the following requirements are needed; they are not
+# included since the requirments-check check will fail otherwise since
+# global requirements do not support these anymore.
+# Fortunately, these packages come in as dependencies from others and
+# thus the py27 jobs still work.
+#
+# dnspython>=1.15.0;python_version=='2.7' # http://www.dnspython.org/LICENSE
+# ipaddress>=1.0.16;python_version<'3.3' # PSF
diff --git a/roles/additional-keystone-users/tasks/main.yaml b/roles/additional-keystone-users/tasks/main.yaml
new file mode 100644
index 000000000..546729bfe
--- /dev/null
+++ b/roles/additional-keystone-users/tasks/main.yaml
@@ -0,0 +1,134 @@
+- name: Set S3 endpoint
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: s3_storage_url
+ value: http://localhost:8080
+ become: true
+
+- name: Create primary S3 user
+ shell: >
+ openstack --os-auth-url http://localhost/identity
+ --os-project-domain-id default --os-project-name admin
+ --os-user-domain-id default --os-username admin
+ --os-password secretadmin
+ credential create --type ec2 --project swiftprojecttest1 swiftusertest1
+ '{"access": "s3-user1", "secret": "s3-secret1"}'
+- name: Add primary S3 user to test.conf
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: s3_access_key
+ value: s3-user1
+ become: true
+- name: Add primary S3 user secret to test.conf
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: s3_secret_key
+ value: s3-secret1
+ become: true
+
+- name: Clear secondary S3 user from test.conf
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: s3_access_key2
+ value: ""
+ become: true
+
+- name: Create restricted S3 user
+ shell: >
+ openstack --os-auth-url http://localhost/identity
+ --os-project-domain-id default --os-project-name admin
+ --os-user-domain-id default --os-username admin
+ --os-password secretadmin
+ credential create --type ec2 --project swiftprojecttest1 swiftusertest3
+ '{"access": "s3-user3", "secret": "s3-secret3"}'
+- name: Add restricted S3 user to test.conf
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: s3_access_key3
+ value: s3-user3
+ become: true
+- name: Add restricted S3 user secret to test.conf
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: s3_secret_key3
+ value: s3-secret3
+ become: true
+
+- name: Create service role
+ shell: >
+ openstack --os-auth-url http://localhost/identity
+ --os-project-domain-id default --os-project-name admin
+ --os-user-domain-id default --os-username admin
+ --os-password secretadmin
+ role create swift_service
+- name: Create service project
+ shell: >
+ openstack --os-auth-url http://localhost/identity
+ --os-project-domain-id default --os-project-name admin
+ --os-user-domain-id default --os-username admin
+ --os-password secretadmin
+ project create swiftprojecttest5
+- name: Create service user
+ shell: >
+ openstack --os-auth-url http://localhost/identity
+ --os-project-domain-id default --os-project-name admin
+ --os-user-domain-id default --os-username admin
+ --os-password secretadmin
+ user create --project swiftprojecttest5 swiftusertest5 --password testing5
+- name: Assign service role
+ shell: >
+ openstack --os-auth-url http://localhost/identity
+ --os-project-domain-id default --os-project-name admin
+ --os-user-domain-id default --os-username admin
+ --os-password secretadmin
+ role add --project swiftprojecttest5 --user swiftusertest5 swift_service
+
+- name: Add service_roles to proxy-server.conf
+ ini_file:
+ path: /etc/swift/proxy-server.conf
+ section: filter:keystoneauth
+ option: SERVICE_KEY_service_roles
+ value: swift_service
+ become: true
+- name: Update reseller prefixes in proxy-server.conf
+ ini_file:
+ path: /etc/swift/proxy-server.conf
+ section: filter:keystoneauth
+ option: reseller_prefix
+ value: AUTH, SERVICE_KEY
+ become: true
+
+- name: Add service account to test.conf
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: account5
+ value: swiftprojecttest5
+ become: true
+- name: Add service user to test.conf
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: username5
+ value: swiftusertest5
+ become: true
+- name: Add service password to test.conf
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: password5
+ value: testing5
+ become: true
+- name: Add service prefix to test.conf
+ ini_file:
+ path: /etc/swift/test.conf
+ section: func_test
+ option: service_prefix
+ value: SERVICE_KEY
+ become: true
diff --git a/roles/additional-tempauth-users/tasks/main.yaml b/roles/additional-tempauth-users/tasks/main.yaml
new file mode 100644
index 000000000..e906af952
--- /dev/null
+++ b/roles/additional-tempauth-users/tasks/main.yaml
@@ -0,0 +1,47 @@
+- name: Configure service auth prefix for tempauth tests
+ ini_file:
+ path: /etc/swift/proxy-server.conf
+ section: filter:tempauth
+ option: reseller_prefix
+ value: TEMPAUTH, SERVICE_TA
+ become: true
+
+- name: Configure service group for tempauth tests
+ ini_file:
+ path: /etc/swift/proxy-server.conf
+ section: filter:tempauth
+ option: SERVICE_TA_require_group
+ value: service
+ become: true
+
+- name: Configure service account for tempauth tests
+ ini_file:
+ path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
+ section: func_test
+ option: account5
+ value: test5
+ become: true
+
+- name: Configure service username for tempauth tests
+ ini_file:
+ path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
+ section: func_test
+ option: username5
+ value: tester5
+ become: true
+
+- name: Configure service user password for tempauth tests
+ ini_file:
+ path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
+ section: func_test
+ option: password5
+ value: testing5
+ become: true
+
+- name: Configure service prefix for tempauth tests
+ ini_file:
+ path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
+ section: func_test
+ option: service_prefix
+ value: SERVICE_TA
+ become: true
diff --git a/roles/dsvm-additional-middlewares/tasks/main.yaml b/roles/dsvm-additional-middlewares/tasks/main.yaml
index f149e519f..42db2701c 100644
--- a/roles/dsvm-additional-middlewares/tasks/main.yaml
+++ b/roles/dsvm-additional-middlewares/tasks/main.yaml
@@ -1,8 +1,8 @@
-- name: Add more middlewares to pipeline
+- name: Add domain_remap and etag-quoter to pipeline
replace:
- path: "/etc/swift/proxy-server.conf"
- regexp: "cache listing_formats"
- replace: "cache domain_remap etag-quoter listing_formats"
+ path: "/etc/swift/proxy-server.conf"
+ regexp: "cache listing_formats"
+ replace: "cache domain_remap etag-quoter listing_formats"
become: true
- name: Set domain_remap domain
@@ -13,7 +13,7 @@
value: example.com
become: true
-- name: Set storage_domain in test.conf
+- name: Set storage_domain in test.conf (for Keystone tests)
ini_file:
path: /etc/swift/test.conf
section: func_test
@@ -21,6 +21,14 @@
value: example.com
become: true
+- name: Set storage_domain in test/sample.conf (for tempauth tests)
+ ini_file:
+ path: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test/sample.conf"
+ section: func_test
+ option: storage_domain
+ value: example.com
+ become: true
+
- name: Enable object versioning
ini_file:
path: /etc/swift/proxy-server.conf
@@ -29,6 +37,14 @@
value: true
become: true
+- name: Configure s3api force_swift_request_proxy_log
+ ini_file:
+ path: /etc/swift/proxy-server.conf
+ section: filter:s3api
+ option: force_swift_request_proxy_log
+ value: true
+ become: true
+
- name: Copy ring for Policy-1
copy:
remote_src: true
diff --git a/setup.cfg b/setup.cfg
index 75da6a4fc..c0526f237 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -19,6 +19,7 @@ classifier =
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
[pbr]
skip_authors = True
diff --git a/swift/cli/info.py b/swift/cli/info.py
index 49c8da88e..dc29faded 100644
--- a/swift/cli/info.py
+++ b/swift/cli/info.py
@@ -57,6 +57,8 @@ def parse_get_node_args(options, args):
else:
raise InfoSystemExit('Ring file does not exist')
+ if options.quoted:
+ args = [urllib.parse.unquote(arg) for arg in args]
if len(args) == 1:
args = args[0].strip('/').split('/', 2)
@@ -614,15 +616,15 @@ def print_item_locations(ring, ring_name=None, account=None, container=None,
ring = POLICIES.get_object_ring(policy_index, swift_dir)
ring_name = (POLICIES.get_by_name(policy_name)).ring_name
- if account is None and (container is not None or obj is not None):
+ if (container or obj) and not account:
print('No account specified')
raise InfoSystemExit()
- if container is None and obj is not None:
+ if obj and not container:
print('No container specified')
raise InfoSystemExit()
- if account is None and part is None:
+ if not account and not part:
print('No target specified')
raise InfoSystemExit()
@@ -654,8 +656,11 @@ def print_item_locations(ring, ring_name=None, account=None, container=None,
print('Warning: account specified ' +
'but ring not named "account"')
- print('\nAccount \t%s' % account)
- print('Container\t%s' % container)
- print('Object \t%s\n\n' % obj)
+ if account:
+ print('\nAccount \t%s' % urllib.parse.quote(account))
+ if container:
+ print('Container\t%s' % urllib.parse.quote(container))
+ if obj:
+ print('Object \t%s\n\n' % urllib.parse.quote(obj))
print_ring_locations(ring, loc, account, container, obj, part, all_nodes,
policy_index=policy_index)
diff --git a/swift/cli/relinker.py b/swift/cli/relinker.py
index b7b4aaf73..630c0e98e 100644
--- a/swift/cli/relinker.py
+++ b/swift/cli/relinker.py
@@ -14,8 +14,12 @@
# limitations under the License.
+import errno
+import fcntl
+import json
import logging
import os
+from functools import partial
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import DiskFileDeleted, DiskFileNotExist, \
DiskFileQuarantined
@@ -24,10 +28,126 @@ from swift.common.utils import replace_partition_in_path, \
from swift.obj import diskfile
+LOCK_FILE = '.relink.{datadir}.lock'
+STATE_FILE = 'relink.{datadir}.json'
+STATE_TMP_FILE = '.relink.{datadir}.json.tmp'
+STEP_RELINK = 'relink'
+STEP_CLEANUP = 'cleanup'
+
+
+def devices_filter(device, _, devices):
+ if device:
+ devices = [d for d in devices if d == device]
+
+ return set(devices)
+
+
+def hook_pre_device(locks, states, datadir, device_path):
+ lock_file = os.path.join(device_path, LOCK_FILE.format(datadir=datadir))
+
+ fd = os.open(lock_file, os.O_CREAT | os.O_WRONLY)
+ fcntl.flock(fd, fcntl.LOCK_EX)
+ locks[0] = fd
+
+ state_file = os.path.join(device_path, STATE_FILE.format(datadir=datadir))
+ states.clear()
+ try:
+ with open(state_file, 'rt') as f:
+ tmp = json.load(f)
+ states.update(tmp)
+ except ValueError:
+ # Invalid JSON: remove the file to restart from scratch
+ os.unlink(state_file)
+ except IOError as err:
+ # Ignore file not found error
+ if err.errno != errno.ENOENT:
+ raise
+
+
+def hook_post_device(locks, _):
+ os.close(locks[0])
+ locks[0] = None
+
+
+def partitions_filter(states, step, part_power, next_part_power,
+ datadir_path, partitions):
+ # Remove all non partitions first (eg: auditor_status_ALL.json)
+ partitions = [p for p in partitions if p.isdigit()]
+
+ if not (step == STEP_CLEANUP and part_power == next_part_power):
+ # This is not a cleanup after cancel, partitions in the upper half are
+ # new partitions, there is nothing to relink/cleanup from there
+ partitions = [p for p in partitions
+ if int(p) < 2 ** next_part_power / 2]
+
+ # Format: { 'part': [relinked, cleaned] }
+ if states:
+ missing = list(set(partitions) - set(states.keys()))
+ if missing:
+ # All missing partitions was created after the first run of
+ # relink, so after the new ring was distribued, so they already
+ # are hardlinked in both partitions, but they will need to
+ # cleaned.. Just update the state file.
+ for part in missing:
+ states[part] = [True, False]
+ if step == STEP_RELINK:
+ partitions = [str(p) for p, (r, c) in states.items() if not r]
+ elif step == STEP_CLEANUP:
+ partitions = [str(p) for p, (r, c) in states.items() if not c]
+ else:
+ states.update({str(p): [False, False] for p in partitions})
+
+ # Always scan the partitions in reverse order to minimize the amount of IO
+ # (it actually only matters for relink, not for cleanup).
+ #
+ # Initial situation:
+ # objects/0/000/00000000000000000000000000000000/12345.data
+ # -> relinked to objects/1/000/10000000000000000000000000000000/12345.data
+ #
+ # If the relinker then scan partition 1, it will listdir that object while
+ # it's unnecessary. By working in reverse order of partitions, this is
+ # avoided.
+ partitions = sorted(partitions, key=lambda x: int(x), reverse=True)
+
+ return partitions
+
+
+# Save states when a partition is done
+def hook_post_partition(states, step,
+ partition_path):
+ part = os.path.basename(os.path.abspath(partition_path))
+ datadir_path = os.path.dirname(os.path.abspath(partition_path))
+ device_path = os.path.dirname(os.path.abspath(datadir_path))
+ datadir_name = os.path.basename(os.path.abspath(datadir_path))
+ state_tmp_file = os.path.join(device_path,
+ STATE_TMP_FILE.format(datadir=datadir_name))
+ state_file = os.path.join(device_path,
+ STATE_FILE.format(datadir=datadir_name))
+
+ if step == STEP_RELINK:
+ states[part][0] = True
+ elif step == STEP_CLEANUP:
+ states[part][1] = True
+ with open(state_tmp_file, 'wt') as f:
+ json.dump(states, f)
+ os.fsync(f.fileno())
+ os.rename(state_tmp_file, state_file)
+
+
+def hashes_filter(next_part_power, suff_path, hashes):
+ hashes = list(hashes)
+ for hsh in hashes:
+ fname = os.path.join(suff_path, hsh, 'fake-file-name')
+ if replace_partition_in_path(fname, next_part_power) == fname:
+ hashes.remove(hsh)
+ return hashes
+
+
def relink(swift_dir='/etc/swift',
devices='/srv/node',
skip_mount_check=False,
- logger=logging.getLogger()):
+ logger=logging.getLogger(),
+ device=None):
mount_check = not skip_mount_check
run = False
relinked = errors = 0
@@ -41,10 +161,31 @@ def relink(swift_dir='/etc/swift',
logging.info('Relinking files for policy %s under %s',
policy.name, devices)
run = True
+ datadir = diskfile.get_data_dir(policy)
+
+ locks = [None]
+ states = {}
+ relink_devices_filter = partial(devices_filter, device)
+ relink_hook_pre_device = partial(hook_pre_device, locks, states,
+ datadir)
+ relink_hook_post_device = partial(hook_post_device, locks)
+ relink_partition_filter = partial(partitions_filter,
+ states, STEP_RELINK,
+ part_power, next_part_power)
+ relink_hook_post_partition = partial(hook_post_partition,
+ states, STEP_RELINK)
+ relink_hashes_filter = partial(hashes_filter, next_part_power)
+
locations = audit_location_generator(
devices,
- diskfile.get_data_dir(policy),
- mount_check=mount_check)
+ datadir,
+ mount_check=mount_check,
+ devices_filter=relink_devices_filter,
+ hook_pre_device=relink_hook_pre_device,
+ hook_post_device=relink_hook_post_device,
+ partitions_filter=relink_partition_filter,
+ hook_post_partition=relink_hook_post_partition,
+ hashes_filter=relink_hashes_filter)
for fname, _, _ in locations:
newfname = replace_partition_in_path(fname, next_part_power)
try:
@@ -67,7 +208,8 @@ def relink(swift_dir='/etc/swift',
def cleanup(swift_dir='/etc/swift',
devices='/srv/node',
skip_mount_check=False,
- logger=logging.getLogger()):
+ logger=logging.getLogger(),
+ device=None):
mount_check = not skip_mount_check
conf = {'devices': devices, 'mount_check': mount_check}
diskfile_router = diskfile.DiskFileRouter(conf, get_logger(conf))
@@ -83,10 +225,31 @@ def cleanup(swift_dir='/etc/swift',
logging.info('Cleaning up files for policy %s under %s',
policy.name, devices)
run = True
+ datadir = diskfile.get_data_dir(policy)
+
+ locks = [None]
+ states = {}
+ cleanup_devices_filter = partial(devices_filter, device)
+ cleanup_hook_pre_device = partial(hook_pre_device, locks, states,
+ datadir)
+ cleanup_hook_post_device = partial(hook_post_device, locks)
+ cleanup_partition_filter = partial(partitions_filter,
+ states, STEP_CLEANUP,
+ part_power, next_part_power)
+ cleanup_hook_post_partition = partial(hook_post_partition,
+ states, STEP_CLEANUP)
+ cleanup_hashes_filter = partial(hashes_filter, next_part_power)
+
locations = audit_location_generator(
devices,
- diskfile.get_data_dir(policy),
- mount_check=mount_check)
+ datadir,
+ mount_check=mount_check,
+ devices_filter=cleanup_devices_filter,
+ hook_pre_device=cleanup_hook_pre_device,
+ hook_post_device=cleanup_hook_post_device,
+ partitions_filter=cleanup_partition_filter,
+ hook_post_partition=cleanup_hook_post_partition,
+ hashes_filter=cleanup_hashes_filter)
for fname, device, partition in locations:
expected_fname = replace_partition_in_path(fname, part_power)
if fname == expected_fname:
@@ -152,8 +315,10 @@ def main(args):
if args.action == 'relink':
return relink(
- args.swift_dir, args.devices, args.skip_mount_check, logger)
+ args.swift_dir, args.devices, args.skip_mount_check, logger,
+ device=args.device)
if args.action == 'cleanup':
return cleanup(
- args.swift_dir, args.devices, args.skip_mount_check, logger)
+ args.swift_dir, args.devices, args.skip_mount_check, logger,
+ device=args.device)
diff --git a/swift/common/db.py b/swift/common/db.py
index c6df12aa3..e06baf5c6 100644
--- a/swift/common/db.py
+++ b/swift/common/db.py
@@ -53,6 +53,9 @@ PICKLE_PROTOCOL = 2
# records will be merged.
PENDING_CAP = 131072
+SQLITE_ARG_LIMIT = 999
+RECLAIM_PAGE_SIZE = 10000
+
def utf8encode(*args):
return [(s.encode('utf8') if isinstance(s, six.text_type) else s)
@@ -981,16 +984,48 @@ class DatabaseBroker(object):
with lock_parent_directory(self.pending_file,
self.pending_timeout):
self._commit_puts()
- with self.get() as conn:
- self._reclaim(conn, age_timestamp, sync_timestamp)
- self._reclaim_metadata(conn, age_timestamp)
- conn.commit()
+ marker = ''
+ finished = False
+ while not finished:
+ with self.get() as conn:
+ marker = self._reclaim(conn, age_timestamp, marker)
+ if not marker:
+ finished = True
+ self._reclaim_other_stuff(
+ conn, age_timestamp, sync_timestamp)
+ conn.commit()
+
+ def _reclaim_other_stuff(self, conn, age_timestamp, sync_timestamp):
+ """
+ This is only called once at the end of reclaim after _reclaim has been
+ called for each page.
+ """
+ self._reclaim_sync(conn, sync_timestamp)
+ self._reclaim_metadata(conn, age_timestamp)
+
+ def _reclaim(self, conn, age_timestamp, marker):
+ clean_batch_qry = '''
+ DELETE FROM %s WHERE deleted = 1
+ AND name > ? AND %s < ?
+ ''' % (self.db_contains_type, self.db_reclaim_timestamp)
+ curs = conn.execute('''
+ SELECT name FROM %s WHERE deleted = 1
+ AND name > ?
+ ORDER BY NAME LIMIT 1 OFFSET ?
+ ''' % (self.db_contains_type,), (marker, RECLAIM_PAGE_SIZE))
+ row = curs.fetchone()
+ if row:
+ # do a single book-ended DELETE and bounce out
+ end_marker = row[0]
+ conn.execute(clean_batch_qry + ' AND name <= ?', (
+ marker, age_timestamp, end_marker))
+ else:
+ # delete off the end and reset marker to indicate we're done
+ end_marker = ''
+ conn.execute(clean_batch_qry, (marker, age_timestamp))
+ return end_marker
- def _reclaim(self, conn, age_timestamp, sync_timestamp):
- conn.execute('''
- DELETE FROM %s WHERE deleted = 1 AND %s < ?
- ''' % (self.db_contains_type, self.db_reclaim_timestamp),
- (age_timestamp,))
+ def _reclaim_sync(self, conn, sync_timestamp):
try:
conn.execute('''
DELETE FROM outgoing_sync WHERE updated_at < ?
diff --git a/swift/common/memcached.py b/swift/common/memcached.py
index a80fa0fb6..08da5c7ba 100644
--- a/swift/common/memcached.py
+++ b/swift/common/memcached.py
@@ -160,7 +160,7 @@ class MemcacheRing(object):
def __init__(self, servers, connect_timeout=CONN_TIMEOUT,
io_timeout=IO_TIMEOUT, pool_timeout=POOL_TIMEOUT,
tries=TRY_COUNT, allow_pickle=False, allow_unpickle=False,
- max_conns=2):
+ max_conns=2, logger=None):
self._ring = {}
self._errors = dict(((serv, []) for serv in servers))
self._error_limited = dict(((serv, 0) for serv in servers))
@@ -178,18 +178,23 @@ class MemcacheRing(object):
self._pool_timeout = pool_timeout
self._allow_pickle = allow_pickle
self._allow_unpickle = allow_unpickle or allow_pickle
+ if logger is None:
+ self.logger = logging.getLogger()
+ else:
+ self.logger = logger
def _exception_occurred(self, server, e, action='talking',
sock=None, fp=None, got_connection=True):
if isinstance(e, Timeout):
- logging.error("Timeout %(action)s to memcached: %(server)s",
- {'action': action, 'server': server})
+ self.logger.error("Timeout %(action)s to memcached: %(server)s",
+ {'action': action, 'server': server})
elif isinstance(e, (socket.error, MemcacheConnectionError)):
- logging.error("Error %(action)s to memcached: %(server)s: %(err)s",
- {'action': action, 'server': server, 'err': e})
+ self.logger.error(
+ "Error %(action)s to memcached: %(server)s: %(err)s",
+ {'action': action, 'server': server, 'err': e})
else:
- logging.exception("Error %(action)s to memcached: %(server)s",
- {'action': action, 'server': server})
+ self.logger.exception("Error %(action)s to memcached: %(server)s",
+ {'action': action, 'server': server})
try:
if fp:
fp.close()
@@ -213,7 +218,7 @@ class MemcacheRing(object):
if err > now - ERROR_LIMIT_TIME]
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._error_limited[server] = now + ERROR_LIMIT_DURATION
- logging.error('Error limiting server %s', server)
+ self.logger.error('Error limiting server %s', server)
def _get_conns(self, key):
"""
diff --git a/swift/common/middleware/memcache.py b/swift/common/middleware/memcache.py
index e846749cb..b5b9569a5 100644
--- a/swift/common/middleware/memcache.py
+++ b/swift/common/middleware/memcache.py
@@ -19,6 +19,7 @@ from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError
from swift.common.memcached import (MemcacheRing, CONN_TIMEOUT, POOL_TIMEOUT,
IO_TIMEOUT, TRY_COUNT)
+from swift.common.utils import get_logger
class MemcacheMiddleware(object):
@@ -28,6 +29,7 @@ class MemcacheMiddleware(object):
def __init__(self, app, conf):
self.app = app
+ self.logger = get_logger(conf, log_route='memcache')
self.memcache_servers = conf.get('memcache_servers')
serialization_format = conf.get('memcache_serialization_support')
try:
@@ -102,7 +104,8 @@ class MemcacheMiddleware(object):
io_timeout=io_timeout,
allow_pickle=(serialization_format == 0),
allow_unpickle=(serialization_format <= 1),
- max_conns=max_conns)
+ max_conns=max_conns,
+ logger=self.logger)
def __call__(self, env, start_response):
env['swift.cache'] = self.memcache
diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py
index 72e1d6a40..9d3ff2fdd 100644
--- a/swift/common/middleware/ratelimit.py
+++ b/swift/common/middleware/ratelimit.py
@@ -242,6 +242,10 @@ class RateLimitMiddleware(object):
if not self.memcache_client:
return None
+ if req.environ.get('swift.ratelimit.handled'):
+ return None
+ req.environ['swift.ratelimit.handled'] = True
+
try:
account_info = get_account_info(req.environ, self.app,
swift_source='RL')
diff --git a/swift/common/middleware/s3api/controllers/obj.py b/swift/common/middleware/s3api/controllers/obj.py
index 293b14702..716c837b6 100644
--- a/swift/common/middleware/s3api/controllers/obj.py
+++ b/swift/common/middleware/s3api/controllers/obj.py
@@ -26,7 +26,7 @@ from swift.common.middleware.versioned_writes.object_versioning import \
from swift.common.middleware.s3api.utils import S3Timestamp, sysmeta_header
from swift.common.middleware.s3api.controllers.base import Controller
from swift.common.middleware.s3api.s3response import S3NotImplemented, \
- InvalidRange, NoSuchKey, InvalidArgument, HTTPNoContent, \
+ InvalidRange, NoSuchKey, NoSuchVersion, InvalidArgument, HTTPNoContent, \
PreconditionFailed
@@ -88,7 +88,15 @@ class ObjectController(Controller):
if version_id not in ('null', None) and \
'object_versioning' not in get_swift_info():
raise S3NotImplemented()
+
query = {} if version_id is None else {'version-id': version_id}
+ if version_id not in ('null', None):
+ container_info = req.get_container_info(self.app)
+ if not container_info.get(
+ 'sysmeta', {}).get('versions-container', ''):
+ # Versioning has never been enabled
+ raise NoSuchVersion(object_name, version_id)
+
resp = req.get_response(self.app, query=query)
if req.method == 'HEAD':
@@ -193,17 +201,25 @@ class ObjectController(Controller):
'object_versioning' not in get_swift_info():
raise S3NotImplemented()
+ version_id = req.params.get('versionId')
+ if version_id not in ('null', None):
+ container_info = req.get_container_info(self.app)
+ if not container_info.get(
+ 'sysmeta', {}).get('versions-container', ''):
+ # Versioning has never been enabled
+ return HTTPNoContent(headers={'x-amz-version-id': version_id})
+
try:
try:
query = req.gen_multipart_manifest_delete_query(
- self.app, version=req.params.get('versionId'))
+ self.app, version=version_id)
except NoSuchKey:
query = {}
req.headers['Content-Type'] = None # Ignore client content-type
- if 'versionId' in req.params:
- query['version-id'] = req.params['versionId']
+ if version_id is not None:
+ query['version-id'] = version_id
query['symlink'] = 'get'
resp = req.get_response(self.app, query=query)
diff --git a/swift/common/middleware/symlink.py b/swift/common/middleware/symlink.py
index d2c644438..bde163aa0 100644
--- a/swift/common/middleware/symlink.py
+++ b/swift/common/middleware/symlink.py
@@ -205,7 +205,8 @@ from swift.common.utils import get_logger, register_swift_info, split_path, \
MD5_OF_EMPTY_STRING, close_if_possible, closing_if_possible, \
config_true_value, drain_and_close
from swift.common.constraints import check_account_format
-from swift.common.wsgi import WSGIContext, make_subrequest
+from swift.common.wsgi import WSGIContext, make_subrequest, \
+ make_pre_authed_request
from swift.common.request_helpers import get_sys_meta_prefix, \
check_path_header, get_container_update_override_key, \
update_ignore_range_header
@@ -442,7 +443,9 @@ class SymlinkObjectContext(WSGIContext):
content_type='text/plain')
def _recursive_get_head(self, req, target_etag=None,
- follow_softlinks=True):
+ follow_softlinks=True, orig_req=None):
+ if not orig_req:
+ orig_req = req
resp = self._app_call(req.environ)
def build_traversal_req(symlink_target):
@@ -457,9 +460,20 @@ class SymlinkObjectContext(WSGIContext):
'/', version, account,
symlink_target.lstrip('/'))
self._last_target_path = target_path
- new_req = make_subrequest(
- req.environ, path=target_path, method=req.method,
- headers=req.headers, swift_source='SYM')
+
+ subreq_headers = dict(req.headers)
+ if self._response_header_value(ALLOW_RESERVED_NAMES):
+ # this symlink's sysmeta says it can point to reserved names,
+ # we're infering that some piece of middleware had previously
+ # authorized this request because users can't access reserved
+ # names directly
+ subreq_meth = make_pre_authed_request
+ subreq_headers['X-Backend-Allow-Reserved-Names'] = 'true'
+ else:
+ subreq_meth = make_subrequest
+ new_req = subreq_meth(orig_req.environ, path=target_path,
+ method=req.method, headers=subreq_headers,
+ swift_source='SYM')
new_req.headers.pop('X-Backend-Storage-Policy-Index', None)
return new_req
@@ -484,11 +498,8 @@ class SymlinkObjectContext(WSGIContext):
if not config_true_value(
self._response_header_value(SYMLOOP_EXTEND)):
self._loop_count += 1
- if config_true_value(
- self._response_header_value(ALLOW_RESERVED_NAMES)):
- new_req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
-
- return self._recursive_get_head(new_req, target_etag=resp_etag)
+ return self._recursive_get_head(new_req, target_etag=resp_etag,
+ orig_req=req)
else:
final_etag = self._response_header_value('etag')
if final_etag and target_etag and target_etag != final_etag:
diff --git a/swift/common/middleware/versioned_writes/object_versioning.py b/swift/common/middleware/versioned_writes/object_versioning.py
index 5c9b72d5c..508972f72 100644
--- a/swift/common/middleware/versioned_writes/object_versioning.py
+++ b/swift/common/middleware/versioned_writes/object_versioning.py
@@ -152,7 +152,7 @@ from cgi import parse_header
from six.moves.urllib.parse import unquote
from swift.common.constraints import MAX_FILE_SIZE, valid_api_version, \
- ACCOUNT_LISTING_LIMIT
+ ACCOUNT_LISTING_LIMIT, CONTAINER_LISTING_LIMIT
from swift.common.http import is_success, is_client_error, HTTP_NOT_FOUND, \
HTTP_CONFLICT
from swift.common.request_helpers import get_sys_meta_prefix, \
@@ -1191,7 +1191,7 @@ class ContainerContext(ObjectVersioningContext):
'hash': item['hash'],
'last_modified': item['last_modified'],
})
- limit = constrain_req_limit(req, ACCOUNT_LISTING_LIMIT)
+ limit = constrain_req_limit(req, CONTAINER_LISTING_LIMIT)
body = build_listing(
null_listing, subdir_listing, broken_listing,
reverse=config_true_value(params.get('reverse', 'no')),
@@ -1256,7 +1256,7 @@ class ContainerContext(ObjectVersioningContext):
'last_modified': item['last_modified'],
})
- limit = constrain_req_limit(req, ACCOUNT_LISTING_LIMIT)
+ limit = constrain_req_limit(req, CONTAINER_LISTING_LIMIT)
body = build_listing(
null_listing, versions_listing,
subdir_listing, broken_listing,
diff --git a/swift/common/swob.py b/swift/common/swob.py
index 61b66793c..76fb2fbc9 100644
--- a/swift/common/swob.py
+++ b/swift/common/swob.py
@@ -43,7 +43,6 @@ from email.utils import parsedate
import re
import random
import functools
-import inspect
from io import BytesIO
import six
@@ -1563,23 +1562,15 @@ def wsgify(func):
return a Response object into WSGI callables. Also catches any raised
HTTPExceptions and treats them as a returned Response.
"""
- argspec = inspect.getargspec(func)
- if argspec.args and argspec.args[0] == 'self':
- @functools.wraps(func)
- def _wsgify_self(self, env, start_response):
- try:
- return func(self, Request(env))(env, start_response)
- except HTTPException as err_resp:
- return err_resp(env, start_response)
- return _wsgify_self
- else:
- @functools.wraps(func)
- def _wsgify_bare(env, start_response):
- try:
- return func(Request(env))(env, start_response)
- except HTTPException as err_resp:
- return err_resp(env, start_response)
- return _wsgify_bare
+ @functools.wraps(func)
+ def _wsgify(*args):
+ env, start_response = args[-2:]
+ new_args = args[:-2] + (Request(env), )
+ try:
+ return func(*new_args)(env, start_response)
+ except HTTPException as err_resp:
+ return err_resp(env, start_response)
+ return _wsgify
class StatusMap(object):
diff --git a/swift/common/utils.py b/swift/common/utils.py
index df8713d3a..23a137e6c 100644
--- a/swift/common/utils.py
+++ b/swift/common/utils.py
@@ -3152,11 +3152,26 @@ def remove_directory(path):
def audit_location_generator(devices, datadir, suffix='',
- mount_check=True, logger=None):
+ mount_check=True, logger=None,
+ devices_filter=None, partitions_filter=None,
+ suffixes_filter=None, hashes_filter=None,
+ hook_pre_device=None, hook_post_device=None,
+ hook_pre_partition=None, hook_post_partition=None,
+ hook_pre_suffix=None, hook_post_suffix=None,
+ hook_pre_hash=None, hook_post_hash=None):
"""
Given a devices path and a data directory, yield (path, device,
partition) for all files in that directory
+ (devices|partitions|suffixes|hashes)_filter are meant to modify the list of
+ elements that will be iterated. eg: they can be used to exclude some
+ elements based on a custom condition defined by the caller.
+
+ hook_pre_(device|partition|suffix|hash) are called before yielding the
+ element, hook_pos_(device|partition|suffix|hash) are called after the
+ element was yielded. They are meant to do some pre/post processing.
+ eg: saving a progress status.
+
:param devices: parent directory of the devices to be audited
:param datadir: a directory located under self.devices. This should be
one of the DATADIR constants defined in the account,
@@ -3165,11 +3180,31 @@ def audit_location_generator(devices, datadir, suffix='',
:param mount_check: Flag to check if a mount check should be performed
on devices
:param logger: a logger object
+ :devices_filter: a callable taking (devices, [list of devices]) as
+ parameters and returning a [list of devices]
+ :partitions_filter: a callable taking (datadir_path, [list of parts]) as
+ parameters and returning a [list of parts]
+ :suffixes_filter: a callable taking (part_path, [list of suffixes]) as
+ parameters and returning a [list of suffixes]
+ :hashes_filter: a callable taking (suff_path, [list of hashes]) as
+ parameters and returning a [list of hashes]
+ :hook_pre_device: a callable taking device_path as parameter
+ :hook_post_device: a callable taking device_path as parameter
+ :hook_pre_partition: a callable taking part_path as parameter
+ :hook_post_partition: a callable taking part_path as parameter
+ :hook_pre_suffix: a callable taking suff_path as parameter
+ :hook_post_suffix: a callable taking suff_path as parameter
+ :hook_pre_hash: a callable taking hash_path as parameter
+ :hook_post_hash: a callable taking hash_path as parameter
"""
device_dir = listdir(devices)
# randomize devices in case of process restart before sweep completed
shuffle(device_dir)
+ if devices_filter:
+ device_dir = devices_filter(devices, device_dir)
for device in device_dir:
+ if hook_pre_device:
+ hook_pre_device(os.path.join(devices, device))
if mount_check and not ismount(os.path.join(devices, device)):
if logger:
logger.warning(
@@ -3183,24 +3218,36 @@ def audit_location_generator(devices, datadir, suffix='',
logger.warning(_('Skipping %(datadir)s because %(err)s'),
{'datadir': datadir_path, 'err': e})
continue
+ if partitions_filter:
+ partitions = partitions_filter(datadir_path, partitions)
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
+ if hook_pre_partition:
+ hook_pre_partition(part_path)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
+ if suffixes_filter:
+ suffixes = suffixes_filter(part_path, suffixes)
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
+ if hook_pre_suffix:
+ hook_pre_suffix(suff_path)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
+ if hashes_filter:
+ hashes = hashes_filter(suff_path, hashes)
for hsh in hashes:
hash_path = os.path.join(suff_path, hsh)
+ if hook_pre_hash:
+ hook_pre_hash(hash_path)
try:
files = sorted(listdir(hash_path), reverse=True)
except OSError as e:
@@ -3212,6 +3259,14 @@ def audit_location_generator(devices, datadir, suffix='',
continue
path = os.path.join(hash_path, fname)
yield path, device, partition
+ if hook_post_hash:
+ hook_post_hash(hash_path)
+ if hook_post_suffix:
+ hook_post_suffix(suff_path)
+ if hook_post_partition:
+ hook_post_partition(part_path)
+ if hook_post_device:
+ hook_post_device(os.path.join(devices, device))
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
@@ -4814,6 +4869,8 @@ class ShardRange(object):
value.
:param epoch: optional epoch timestamp which represents the time at which
sharding was enabled for a container.
+ :param reported: optional indicator that this shard and its stats have
+ been reported to the root container.
"""
FOUND = 10
CREATED = 20
@@ -4864,7 +4921,8 @@ class ShardRange(object):
def __init__(self, name, timestamp, lower=MIN, upper=MAX,
object_count=0, bytes_used=0, meta_timestamp=None,
- deleted=False, state=None, state_timestamp=None, epoch=None):
+ deleted=False, state=None, state_timestamp=None, epoch=None,
+ reported=False):
self.account = self.container = self._timestamp = \
self._meta_timestamp = self._state_timestamp = self._epoch = None
self._lower = ShardRange.MIN
@@ -4883,6 +4941,7 @@ class ShardRange(object):
self.state = self.FOUND if state is None else state
self.state_timestamp = state_timestamp
self.epoch = epoch
+ self.reported = reported
@classmethod
def _encode(cls, value):
@@ -5063,8 +5122,14 @@ class ShardRange(object):
cast to an int, or if meta_timestamp is neither None nor can be
cast to a :class:`~swift.common.utils.Timestamp`.
"""
- self.object_count = int(object_count)
- self.bytes_used = int(bytes_used)
+ if self.object_count != int(object_count):
+ self.object_count = int(object_count)
+ self.reported = False
+
+ if self.bytes_used != int(bytes_used):
+ self.bytes_used = int(bytes_used)
+ self.reported = False
+
if meta_timestamp is None:
self.meta_timestamp = Timestamp.now()
else:
@@ -5145,6 +5210,14 @@ class ShardRange(object):
def epoch(self, epoch):
self._epoch = self._to_timestamp(epoch)
+ @property
+ def reported(self):
+ return self._reported
+
+ @reported.setter
+ def reported(self, value):
+ self._reported = bool(value)
+
def update_state(self, state, state_timestamp=None):
"""
Set state to the given value and optionally update the state_timestamp
@@ -5161,6 +5234,7 @@ class ShardRange(object):
self.state = state
if state_timestamp is not None:
self.state_timestamp = state_timestamp
+ self.reported = False
return True
@property
@@ -5283,6 +5357,7 @@ class ShardRange(object):
yield 'state', self.state
yield 'state_timestamp', self.state_timestamp.internal
yield 'epoch', self.epoch.internal if self.epoch is not None else None
+ yield 'reported', 1 if self.reported else 0
def copy(self, timestamp=None, **kwargs):
"""
@@ -5314,7 +5389,8 @@ class ShardRange(object):
params['name'], params['timestamp'], params['lower'],
params['upper'], params['object_count'], params['bytes_used'],
params['meta_timestamp'], params['deleted'], params['state'],
- params['state_timestamp'], params['epoch'])
+ params['state_timestamp'], params['epoch'],
+ params.get('reported', 0))
def find_shard_range(item, ranges):
diff --git a/swift/container/backend.py b/swift/container/backend.py
index 0a18fe48f..bdf34f7d8 100644
--- a/swift/container/backend.py
+++ b/swift/container/backend.py
@@ -34,9 +34,7 @@ from swift.common.utils import Timestamp, encode_timestamps, \
get_db_files, parse_db_filename, make_db_file_path, split_path, \
RESERVED_BYTE
from swift.common.db import DatabaseBroker, utf8encode, BROKER_TIMEOUT, \
- zero_like, DatabaseAlreadyExists
-
-SQLITE_ARG_LIMIT = 999
+ zero_like, DatabaseAlreadyExists, SQLITE_ARG_LIMIT
DATADIR = 'containers'
@@ -62,7 +60,7 @@ SHARD_UPDATE_STATES = [ShardRange.CREATED, ShardRange.CLEAVED,
# tuples and vice-versa
SHARD_RANGE_KEYS = ('name', 'timestamp', 'lower', 'upper', 'object_count',
'bytes_used', 'meta_timestamp', 'deleted', 'state',
- 'state_timestamp', 'epoch')
+ 'state_timestamp', 'epoch', 'reported')
POLICY_STAT_TABLE_CREATE = '''
CREATE TABLE policy_stat (
@@ -269,6 +267,7 @@ def merge_shards(shard_data, existing):
if existing['timestamp'] < shard_data['timestamp']:
# note that currently we do not roll forward any meta or state from
# an item that was created at older time, newer created time trumps
+ shard_data['reported'] = 0 # reset the latch
return True
elif existing['timestamp'] > shard_data['timestamp']:
return False
@@ -285,6 +284,18 @@ def merge_shards(shard_data, existing):
else:
new_content = True
+ # We can latch the reported flag
+ if existing['reported'] and \
+ existing['object_count'] == shard_data['object_count'] and \
+ existing['bytes_used'] == shard_data['bytes_used'] and \
+ existing['state'] == shard_data['state'] and \
+ existing['epoch'] == shard_data['epoch']:
+ shard_data['reported'] = 1
+ else:
+ shard_data.setdefault('reported', 0)
+ if shard_data['reported'] and not existing['reported']:
+ new_content = True
+
if (existing['state_timestamp'] == shard_data['state_timestamp']
and shard_data['state'] > existing['state']):
new_content = True
@@ -597,7 +608,8 @@ class ContainerBroker(DatabaseBroker):
deleted INTEGER DEFAULT 0,
state INTEGER,
state_timestamp TEXT,
- epoch TEXT
+ epoch TEXT,
+ reported INTEGER DEFAULT 0
);
""" % SHARD_RANGE_TABLE)
@@ -1430,10 +1442,13 @@ class ContainerBroker(DatabaseBroker):
# sqlite3.OperationalError: cannot start a transaction
# within a transaction
conn.rollback()
- if ('no such table: %s' % SHARD_RANGE_TABLE) not in str(err):
- raise
- self.create_shard_range_table(conn)
- return _really_merge_items(conn)
+ if 'no such column: reported' in str(err):
+ self._migrate_add_shard_range_reported(conn)
+ return _really_merge_items(conn)
+ if ('no such table: %s' % SHARD_RANGE_TABLE) in str(err):
+ self.create_shard_range_table(conn)
+ return _really_merge_items(conn)
+ raise
def get_reconciler_sync(self):
with self.get() as conn:
@@ -1581,9 +1596,20 @@ class ContainerBroker(DatabaseBroker):
CONTAINER_STAT_VIEW_SCRIPT +
'COMMIT;')
- def _reclaim(self, conn, age_timestamp, sync_timestamp):
- super(ContainerBroker, self)._reclaim(conn, age_timestamp,
- sync_timestamp)
+ def _migrate_add_shard_range_reported(self, conn):
+ """
+ Add the reported column to the 'shard_range' table.
+ """
+ conn.executescript('''
+ BEGIN;
+ ALTER TABLE %s
+ ADD COLUMN reported INTEGER DEFAULT 0;
+ COMMIT;
+ ''' % SHARD_RANGE_TABLE)
+
+ def _reclaim_other_stuff(self, conn, age_timestamp, sync_timestamp):
+ super(ContainerBroker, self)._reclaim_other_stuff(
+ conn, age_timestamp, sync_timestamp)
# populate instance cache, but use existing conn to avoid deadlock
# when it has a pending update
self._populate_instance_cache(conn=conn)
@@ -1630,7 +1656,7 @@ class ContainerBroker(DatabaseBroker):
elif states is not None:
included_states.add(states)
- def do_query(conn):
+ def do_query(conn, use_reported_column=True):
condition = ''
conditions = []
params = []
@@ -1648,21 +1674,27 @@ class ContainerBroker(DatabaseBroker):
params.append(self.path)
if conditions:
condition = ' WHERE ' + ' AND '.join(conditions)
+ if use_reported_column:
+ columns = SHARD_RANGE_KEYS
+ else:
+ columns = SHARD_RANGE_KEYS[:-1] + ('0 as reported', )
sql = '''
SELECT %s
FROM %s%s;
- ''' % (', '.join(SHARD_RANGE_KEYS), SHARD_RANGE_TABLE, condition)
+ ''' % (', '.join(columns), SHARD_RANGE_TABLE, condition)
data = conn.execute(sql, params)
data.row_factory = None
return [row for row in data]
- try:
- with self.maybe_get(connection) as conn:
+ with self.maybe_get(connection) as conn:
+ try:
return do_query(conn)
- except sqlite3.OperationalError as err:
- if ('no such table: %s' % SHARD_RANGE_TABLE) not in str(err):
+ except sqlite3.OperationalError as err:
+ if ('no such table: %s' % SHARD_RANGE_TABLE) in str(err):
+ return []
+ if 'no such column: reported' in str(err):
+ return do_query(conn, use_reported_column=False)
raise
- return []
@classmethod
def resolve_shard_range_states(cls, states):
diff --git a/swift/container/server.py b/swift/container/server.py
index c8d7647aa..db9ac0291 100644
--- a/swift/container/server.py
+++ b/swift/container/server.py
@@ -155,6 +155,8 @@ class ContainerController(BaseStorageServer):
conf['auto_create_account_prefix']
else:
self.auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
+ self.shards_account_prefix = (
+ self.auto_create_account_prefix + 'shards_')
if config_true_value(conf.get('allow_versions', 'f')):
self.save_headers.append('x-versions-location')
if 'allow_versions' in conf:
@@ -375,14 +377,12 @@ class ContainerController(BaseStorageServer):
# auto create accounts)
obj_policy_index = self.get_and_validate_policy_index(req) or 0
broker = self._get_container_broker(drive, part, account, container)
- if account.startswith(self.auto_create_account_prefix) and obj and \
- not os.path.exists(broker.db_file):
- try:
- broker.initialize(req_timestamp.internal, obj_policy_index)
- except DatabaseAlreadyExists:
- pass
- if not os.path.exists(broker.db_file):
+ if obj:
+ self._maybe_autocreate(broker, req_timestamp, account,
+ obj_policy_index, req)
+ elif not os.path.exists(broker.db_file):
return HTTPNotFound()
+
if obj: # delete object
# redirect if a shard range exists for the object name
redirect = self._redirect_to_shard(req, broker, obj)
@@ -449,11 +449,25 @@ class ContainerController(BaseStorageServer):
broker.update_status_changed_at(timestamp)
return recreated
+ def _should_autocreate(self, account, req):
+ auto_create_header = req.headers.get('X-Backend-Auto-Create')
+ if auto_create_header:
+ # If the caller included an explicit X-Backend-Auto-Create header,
+ # assume they know the behavior they want
+ return config_true_value(auto_create_header)
+ if account.startswith(self.shards_account_prefix):
+ # we have to specical case this subset of the
+ # auto_create_account_prefix because we don't want the updater
+ # accidently auto-creating shards; only the sharder creates
+ # shards and it will explicitly tell the server to do so
+ return False
+ return account.startswith(self.auto_create_account_prefix)
+
def _maybe_autocreate(self, broker, req_timestamp, account,
- policy_index):
+ policy_index, req):
created = False
- if account.startswith(self.auto_create_account_prefix) and \
- not os.path.exists(broker.db_file):
+ should_autocreate = self._should_autocreate(account, req)
+ if should_autocreate and not os.path.exists(broker.db_file):
if policy_index is None:
raise HTTPBadRequest(
'X-Backend-Storage-Policy-Index header is required')
@@ -506,8 +520,8 @@ class ContainerController(BaseStorageServer):
# obj put expects the policy_index header, default is for
# legacy support during upgrade.
obj_policy_index = requested_policy_index or 0
- self._maybe_autocreate(broker, req_timestamp, account,
- obj_policy_index)
+ self._maybe_autocreate(
+ broker, req_timestamp, account, obj_policy_index, req)
# redirect if a shard exists for this object name
response = self._redirect_to_shard(req, broker, obj)
if response:
@@ -531,8 +545,8 @@ class ContainerController(BaseStorageServer):
for sr in json.loads(req.body)]
except (ValueError, KeyError, TypeError) as err:
return HTTPBadRequest('Invalid body: %r' % err)
- created = self._maybe_autocreate(broker, req_timestamp, account,
- requested_policy_index)
+ created = self._maybe_autocreate(
+ broker, req_timestamp, account, requested_policy_index, req)
self._update_metadata(req, broker, req_timestamp, 'PUT')
if shard_ranges:
# TODO: consider writing the shard ranges into the pending
@@ -805,7 +819,7 @@ class ContainerController(BaseStorageServer):
requested_policy_index = self.get_and_validate_policy_index(req)
broker = self._get_container_broker(drive, part, account, container)
self._maybe_autocreate(broker, req_timestamp, account,
- requested_policy_index)
+ requested_policy_index, req)
try:
objs = json.load(req.environ['wsgi.input'])
except ValueError as err:
diff --git a/swift/container/sharder.py b/swift/container/sharder.py
index d9aa7c66d..dd33043ae 100644
--- a/swift/container/sharder.py
+++ b/swift/container/sharder.py
@@ -618,7 +618,8 @@ class ContainerSharder(ContainerReplicator):
def _send_shard_ranges(self, account, container, shard_ranges,
headers=None):
- body = json.dumps([dict(sr) for sr in shard_ranges]).encode('ascii')
+ body = json.dumps([dict(sr, reported=0)
+ for sr in shard_ranges]).encode('ascii')
part, nodes = self.ring.get_nodes(account, container)
headers = headers or {}
headers.update({'X-Backend-Record-Type': RECORD_TYPE_SHARD,
@@ -1148,7 +1149,8 @@ class ContainerSharder(ContainerReplicator):
'X-Backend-Storage-Policy-Index': broker.storage_policy_index,
'X-Container-Sysmeta-Shard-Quoted-Root': quote(
broker.root_path),
- 'X-Container-Sysmeta-Sharding': True}
+ 'X-Container-Sysmeta-Sharding': 'True',
+ 'X-Backend-Auto-Create': 'True'}
# NB: we *used* to send along
# 'X-Container-Sysmeta-Shard-Root': broker.root_path
# but that isn't safe for container names with nulls or newlines
@@ -1468,7 +1470,7 @@ class ContainerSharder(ContainerReplicator):
def _update_root_container(self, broker):
own_shard_range = broker.get_own_shard_range(no_default=True)
- if not own_shard_range:
+ if not own_shard_range or own_shard_range.reported:
return
# persist the reported shard metadata
@@ -1478,9 +1480,12 @@ class ContainerSharder(ContainerReplicator):
include_own=True,
include_deleted=True)
# send everything
- self._send_shard_ranges(
- broker.root_account, broker.root_container,
- shard_ranges)
+ if self._send_shard_ranges(
+ broker.root_account, broker.root_container, shard_ranges):
+ # on success, mark ourselves as reported so we don't keep
+ # hammering the root
+ own_shard_range.reported = True
+ broker.merge_shard_ranges(own_shard_range)
def _process_broker(self, broker, node, part):
broker.get_info() # make sure account/container are populated
diff --git a/swift/obj/updater.py b/swift/obj/updater.py
index 466f294c0..f3a01a824 100644
--- a/swift/obj/updater.py
+++ b/swift/obj/updater.py
@@ -14,12 +14,13 @@
# limitations under the License.
import six.moves.cPickle as pickle
+import errno
import os
import signal
import sys
import time
from swift import gettext_ as _
-from random import random
+from random import random, shuffle
from eventlet import spawn, Timeout
@@ -230,7 +231,9 @@ class ObjectUpdater(Daemon):
'to a valid policy (%(error)s)') % {
'directory': asyncdir, 'error': e})
continue
- for prefix in self._listdir(async_pending):
+ prefix_dirs = self._listdir(async_pending)
+ shuffle(prefix_dirs)
+ for prefix in prefix_dirs:
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
@@ -271,7 +274,11 @@ class ObjectUpdater(Daemon):
if obj_hash == last_obj_hash:
self.stats.unlinks += 1
self.logger.increment('unlinks')
- os.unlink(update_path)
+ try:
+ os.unlink(update_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
else:
last_obj_hash = obj_hash
yield {'device': device, 'policy': policy,
diff --git a/test/functional/__init__.py b/test/functional/__init__.py
index 707f2239a..a4f4c5a6e 100644
--- a/test/functional/__init__.py
+++ b/test/functional/__init__.py
@@ -322,12 +322,16 @@ def _load_encryption(proxy_conf_file, swift_conf_file, **kwargs):
pipeline = pipeline.replace(
"proxy-logging proxy-server",
"keymaster encryption proxy-logging proxy-server")
+ pipeline = pipeline.replace(
+ "cache listing_formats",
+ "cache etag-quoter listing_formats")
conf.set(section, 'pipeline', pipeline)
root_secret = base64.b64encode(os.urandom(32))
if not six.PY2:
root_secret = root_secret.decode('ascii')
conf.set('filter:keymaster', 'encryption_root_secret', root_secret)
conf.set('filter:versioned_writes', 'allow_object_versioning', 'true')
+ conf.set('filter:etag-quoter', 'enable_by_default', 'true')
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
@@ -512,8 +516,6 @@ def _load_losf_as_default_policy(proxy_conf_file, swift_conf_file, **kwargs):
conf_loaders = {
'encryption': _load_encryption,
'ec': _load_ec_as_default_policy,
- 'domain_remap_staticweb': _load_domain_remap_staticweb,
- 's3api': _load_s3api,
'losf': _load_losf_as_default_policy,
}
@@ -552,6 +554,11 @@ def in_process_setup(the_object_server=object_server):
swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
_info('prepared swift.conf: %s' % swift_conf)
+ # load s3api and staticweb configs
+ proxy_conf, swift_conf = _load_s3api(proxy_conf, swift_conf)
+ proxy_conf, swift_conf = _load_domain_remap_staticweb(proxy_conf,
+ swift_conf)
+
# Call the associated method for the value of
# 'SWIFT_TEST_IN_PROCESS_CONF_LOADER', if one exists
conf_loader_label = os.environ.get(
@@ -621,6 +628,7 @@ def in_process_setup(the_object_server=object_server):
# Below are values used by the functional test framework, as well as
# by the various in-process swift servers
'auth_uri': 'http://127.0.0.1:%d/auth/v1.0/' % prolis.getsockname()[1],
+ 's3_storage_url': 'http://%s:%d/' % prolis.getsockname(),
# Primary functional test account (needs admin access to the
# account)
'account': 'test',
@@ -902,6 +910,8 @@ def setup_package():
443 if parsed.scheme == 'https' else 80),
'auth_prefix': parsed.path,
})
+ config.setdefault('s3_storage_url',
+ urlunsplit(parsed[:2] + ('', None, None)))
elif 'auth_host' in config:
scheme = 'http'
if config_true_value(config.get('auth_ssl', 'no')):
@@ -914,6 +924,8 @@ def setup_package():
auth_prefix += 'v1.0'
config['auth_uri'] = swift_test_auth = urlunsplit(
(scheme, netloc, auth_prefix, None, None))
+ config.setdefault('s3_storage_url', urlunsplit(
+ (scheme, netloc, '', None, None)))
# else, neither auth_uri nor auth_host; swift_test_auth will be unset
# and we'll skip everything later
diff --git a/test/functional/s3api/__init__.py b/test/functional/s3api/__init__.py
index db443db80..7ad2c077b 100644
--- a/test/functional/s3api/__init__.py
+++ b/test/functional/s3api/__init__.py
@@ -37,7 +37,11 @@ class S3ApiBase(unittest.TestCase):
if 's3api' not in tf.cluster_info:
raise tf.SkipTest('s3api middleware is not enabled')
try:
- self.conn = Connection()
+ self.conn = Connection(
+ tf.config['s3_access_key'], tf.config['s3_secret_key'],
+ user_id='%s:%s' % (tf.config['account'],
+ tf.config['username']))
+
self.conn.reset()
except Exception:
message = '%s got an error during initialize process.\n\n%s' % \
@@ -67,7 +71,8 @@ class S3ApiBaseBoto3(S3ApiBase):
if 's3api' not in tf.cluster_info:
raise tf.SkipTest('s3api middleware is not enabled')
try:
- self.conn = get_boto3_conn()
+ self.conn = get_boto3_conn(
+ tf.config['s3_access_key'], tf.config['s3_secret_key'])
self.endpoint_url = self.conn._endpoint.host
self.access_key = self.conn._request_signer._credentials.access_key
self.region = self.conn._client_config.region_name
diff --git a/test/functional/s3api/s3_test_client.py b/test/functional/s3api/s3_test_client.py
index 6eea9dc69..a5d22bce4 100644
--- a/test/functional/s3api/s3_test_client.py
+++ b/test/functional/s3api/s3_test_client.py
@@ -15,6 +15,7 @@
import logging
import os
+from six.moves.urllib.parse import urlparse
import test.functional as tf
import boto3
from botocore.exceptions import ClientError
@@ -46,9 +47,9 @@ class Connection(object):
"""
Connection class used for S3 functional testing.
"""
- def __init__(self, aws_access_key='test:tester',
- aws_secret_key='testing',
- user_id='test:tester'):
+ def __init__(self, aws_access_key,
+ aws_secret_key,
+ user_id=None):
"""
Initialize method.
@@ -64,15 +65,16 @@ class Connection(object):
"""
self.aws_access_key = aws_access_key
self.aws_secret_key = aws_secret_key
- self.user_id = user_id
- # NOTE: auth_host and auth_port can be different from storage location
- self.host = tf.config['auth_host']
- self.port = int(tf.config['auth_port'])
+ self.user_id = user_id or aws_access_key
+ parsed = urlparse(tf.config['s3_storage_url'])
+ self.host = parsed.hostname
+ self.port = parsed.port
self.conn = \
- S3Connection(aws_access_key, aws_secret_key, is_secure=False,
+ S3Connection(aws_access_key, aws_secret_key,
+ is_secure=(parsed.scheme == 'https'),
host=self.host, port=self.port,
calling_format=OrdinaryCallingFormat())
- self.conn.auth_region_name = 'us-east-1'
+ self.conn.auth_region_name = tf.config.get('s3_region', 'us-east-1')
def reset(self):
"""
@@ -140,22 +142,26 @@ class Connection(object):
url = self.conn.generate_url(expires_in, method, bucket, obj)
if os.environ.get('S3_USE_SIGV4') == "True":
# V4 signatures are known-broken in boto, but we can work around it
- if url.startswith('https://'):
+ if url.startswith('https://') and not tf.config[
+ 's3_storage_url'].startswith('https://'):
url = 'http://' + url[8:]
- return url, {'Host': '%(host)s:%(port)d:%(port)d' % {
- 'host': self.host, 'port': self.port}}
+ if self.port is None:
+ return url, {}
+ else:
+ return url, {'Host': '%(host)s:%(port)d:%(port)d' % {
+ 'host': self.host, 'port': self.port}}
return url, {}
-def get_boto3_conn(aws_access_key='test:tester', aws_secret_key='testing'):
- host = tf.config['auth_host']
- port = int(tf.config['auth_port'])
+def get_boto3_conn(aws_access_key, aws_secret_key):
+ endpoint_url = tf.config['s3_storage_url']
config = boto3.session.Config(s3={'addressing_style': 'path'})
return boto3.client(
's3', aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
- config=config, region_name='us-east-1', use_ssl=False,
- endpoint_url='http://{}:{}'.format(host, port))
+ config=config, region_name=tf.config.get('s3_region', 'us-east-1'),
+ use_ssl=endpoint_url.startswith('https:'),
+ endpoint_url=endpoint_url)
def tear_down_s3(conn):
diff --git a/test/functional/s3api/test_acl.py b/test/functional/s3api/test_acl.py
index 610efe0a9..7a3d4f98d 100644
--- a/test/functional/s3api/test_acl.py
+++ b/test/functional/s3api/test_acl.py
@@ -93,7 +93,7 @@ class TestS3Acl(S3ApiBase):
def test_put_bucket_acl_error(self):
req_headers = {'x-amz-acl': 'public-read'}
- aws_error_conn = Connection(aws_secret_key='invalid')
+ aws_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
aws_error_conn.make_request('PUT', self.bucket,
headers=req_headers, query='acl')
@@ -110,7 +110,7 @@ class TestS3Acl(S3ApiBase):
self.assertEqual(get_error_code(body), 'AccessDenied')
def test_get_bucket_acl_error(self):
- aws_error_conn = Connection(aws_secret_key='invalid')
+ aws_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
aws_error_conn.make_request('GET', self.bucket, query='acl')
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
@@ -126,7 +126,7 @@ class TestS3Acl(S3ApiBase):
def test_get_object_acl_error(self):
self.conn.make_request('PUT', self.bucket, self.obj)
- aws_error_conn = Connection(aws_secret_key='invalid')
+ aws_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
aws_error_conn.make_request('GET', self.bucket, self.obj,
query='acl')
diff --git a/test/functional/s3api/test_bucket.py b/test/functional/s3api/test_bucket.py
index 2197ce823..1e427434d 100644
--- a/test/functional/s3api/test_bucket.py
+++ b/test/functional/s3api/test_bucket.py
@@ -42,11 +42,15 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
self.assertIn('ETag', obj)
self.assertIn('Size', obj)
self.assertEqual(obj['StorageClass'], 'STANDARD')
- if expect_owner:
+ if not expect_owner:
+ self.assertNotIn('Owner', obj)
+ elif tf.cluster_info['s3api'].get('s3_acl'):
self.assertEqual(obj['Owner']['ID'], self.access_key)
self.assertEqual(obj['Owner']['DisplayName'], self.access_key)
else:
- self.assertNotIn('Owner', obj)
+ self.assertIn('Owner', obj)
+ self.assertIn('ID', obj['Owner'])
+ self.assertIn('DisplayName', obj['Owner'])
def test_bucket(self):
bucket = 'bucket'
@@ -128,7 +132,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
self.assertEqual(
ctx.exception.response['Error']['Code'], 'InvalidBucketName')
- auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
+ auth_error_conn = get_boto3_conn(tf.config['s3_access_key'], 'invalid')
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
auth_error_conn.create_bucket(Bucket='bucket')
self.assertEqual(
@@ -201,7 +205,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
self.assertEqual(
ctx.exception.response['Error']['Code'], 'InvalidBucketName')
- auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
+ auth_error_conn = get_boto3_conn(tf.config['s3_access_key'], 'invalid')
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
auth_error_conn.list_objects(Bucket='bucket')
self.assertEqual(
@@ -388,7 +392,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
ctx.exception.response[
'ResponseMetadata']['HTTPHeaders']['content-length'], '0')
- auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
+ auth_error_conn = get_boto3_conn(tf.config['s3_access_key'], 'invalid')
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
auth_error_conn.head_bucket(Bucket='bucket')
self.assertEqual(
@@ -419,7 +423,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
self.assertEqual(
ctx.exception.response['Error']['Code'], 'InvalidBucketName')
- auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
+ auth_error_conn = get_boto3_conn(tf.config['s3_access_key'], 'invalid')
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
auth_error_conn.delete_bucket(Bucket='bucket')
self.assertEqual(
diff --git a/test/functional/s3api/test_multi_delete.py b/test/functional/s3api/test_multi_delete.py
index 31e18bb5f..1489d5477 100644
--- a/test/functional/s3api/test_multi_delete.py
+++ b/test/functional/s3api/test_multi_delete.py
@@ -134,7 +134,7 @@ class TestS3ApiMultiDelete(S3ApiBase):
content_md5 = calculate_md5(xml)
query = 'delete'
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, body=xml,
headers={
diff --git a/test/functional/s3api/test_multi_upload.py b/test/functional/s3api/test_multi_upload.py
index de61551e0..c2e1c0f93 100644
--- a/test/functional/s3api/test_multi_upload.py
+++ b/test/functional/s3api/test_multi_upload.py
@@ -304,9 +304,8 @@ class TestS3ApiMultiUpload(S3ApiBase):
self.assertTrue(lines[0].startswith(b'<?xml'), body)
self.assertTrue(lines[0].endswith(b'?>'), body)
elem = fromstring(body, 'CompleteMultipartUploadResult')
- # TODO: use tf.config value
self.assertEqual(
- 'http://%s:%s/bucket/obj1' % (self.conn.host, self.conn.port),
+ '%s/bucket/obj1' % tf.config['s3_storage_url'].rstrip('/'),
elem.find('Location').text)
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertEqual(elem.find('Key').text, key)
@@ -428,7 +427,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
self.conn.make_request('PUT', bucket)
query = 'uploads'
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
@@ -442,7 +441,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
self.conn.make_request('PUT', bucket)
query = 'uploads'
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('GET', bucket, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
@@ -462,7 +461,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
upload_id = elem.find('UploadId').text
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
@@ -500,7 +499,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
upload_id = elem.find('UploadId').text
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', bucket, key,
headers={
@@ -541,7 +540,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
upload_id = elem.find('UploadId').text
query = 'uploadId=%s' % upload_id
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('GET', bucket, key, query=query)
@@ -568,7 +567,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
self._upload_part(bucket, key, upload_id)
query = 'uploadId=%s' % upload_id
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
@@ -612,7 +611,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# invalid credentials
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
@@ -881,6 +880,8 @@ class TestS3ApiMultiUpload(S3ApiBase):
self.assertEqual(headers['content-length'], '0')
def test_object_multi_upload_part_copy_version(self):
+ if 'object_versioning' not in tf.cluster_info:
+ self.skipTest('Object Versioning not enabled')
bucket = 'bucket'
keys = ['obj1']
uploads = []
diff --git a/test/functional/s3api/test_object.py b/test/functional/s3api/test_object.py
index 5b518eaa8..8079c157b 100644
--- a/test/functional/s3api/test_object.py
+++ b/test/functional/s3api/test_object.py
@@ -147,7 +147,7 @@ class TestS3ApiObject(S3ApiBase):
self.assertCommonResponseHeaders(headers)
def test_put_object_error(self):
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', self.bucket, 'object')
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
@@ -166,7 +166,7 @@ class TestS3ApiObject(S3ApiBase):
dst_obj = 'dst_object'
headers = {'x-amz-copy-source': '/%s/%s' % (self.bucket, obj)}
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', dst_bucket, dst_obj, headers)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
@@ -197,7 +197,7 @@ class TestS3ApiObject(S3ApiBase):
obj = 'object'
self.conn.make_request('PUT', self.bucket, obj)
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('GET', self.bucket, obj)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
@@ -216,7 +216,7 @@ class TestS3ApiObject(S3ApiBase):
obj = 'object'
self.conn.make_request('PUT', self.bucket, obj)
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('HEAD', self.bucket, obj)
self.assertEqual(status, 403)
@@ -239,7 +239,7 @@ class TestS3ApiObject(S3ApiBase):
obj = 'object'
self.conn.make_request('PUT', self.bucket, obj)
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = \
auth_error_conn.make_request('DELETE', self.bucket, obj)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
diff --git a/test/functional/s3api/test_service.py b/test/functional/s3api/test_service.py
index 3eb75dff5..77779cba0 100644
--- a/test/functional/s3api/test_service.py
+++ b/test/functional/s3api/test_service.py
@@ -69,7 +69,7 @@ class TestS3ApiService(S3ApiBase):
self.assertTrue(b.find('CreationDate') is not None)
def test_service_error_signature_not_match(self):
- auth_error_conn = Connection(aws_secret_key='invalid')
+ auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = auth_error_conn.make_request('GET')
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
self.assertEqual(headers['content-type'], 'application/xml')
diff --git a/test/functional/test_object.py b/test/functional/test_object.py
index dbc72acef..6145d4a98 100644
--- a/test/functional/test_object.py
+++ b/test/functional/test_object.py
@@ -1726,7 +1726,7 @@ class TestObject(unittest.TestCase):
if 'etag_quoter' not in tf.cluster_info:
raise SkipTest("etag-quoter middleware is not enabled")
- def do_head(expect_quoted=False):
+ def do_head(expect_quoted=None):
def head(url, token, parsed, conn):
conn.request('HEAD', '%s/%s/%s' % (
parsed.path, self.container, self.obj), '',
@@ -1736,6 +1736,11 @@ class TestObject(unittest.TestCase):
resp = retry(head)
resp.read()
self.assertEqual(resp.status, 200)
+
+ if expect_quoted is None:
+ expect_quoted = tf.cluster_info.get('etag_quoter', {}).get(
+ 'enable_by_default', False)
+
expected_etag = hashlib.md5(b'test').hexdigest()
if expect_quoted:
expected_etag = '"%s"' % expected_etag
@@ -1771,7 +1776,7 @@ class TestObject(unittest.TestCase):
post_container('')
do_head(expect_quoted=True)
post_container('f')
- do_head()
+ do_head(expect_quoted=False)
finally:
# Don't leave a dirty account
post_account('')
diff --git a/test/functional/test_object_versioning.py b/test/functional/test_object_versioning.py
index ebfca68f6..d7db187c0 100644
--- a/test/functional/test_object_versioning.py
+++ b/test/functional/test_object_versioning.py
@@ -26,6 +26,7 @@ from six.moves.urllib.parse import quote, unquote
import test.functional as tf
+from swift.common.swob import normalize_etag
from swift.common.utils import MD5_OF_EMPTY_STRING, config_true_value
from swift.common.middleware.versioned_writes.object_versioning import \
DELETE_MARKER_CONTENT_TYPE
@@ -331,6 +332,80 @@ class TestObjectVersioning(TestObjectVersioningBase):
# listing, though, we'll only ever have the two entries.
self.assertTotalVersions(container, 2)
+ def test_get_if_match(self):
+ body = b'data'
+ oname = Utils.create_name()
+ obj = self.env.unversioned_container.file(oname)
+ resp = obj.write(body, return_resp=True)
+ etag = resp.getheader('etag')
+ self.assertEqual(md5(body).hexdigest(), normalize_etag(etag))
+
+ # un-versioned object is cool with with if-match
+ self.assertEqual(body, obj.read(hdrs={'if-match': etag}))
+ with self.assertRaises(ResponseError) as cm:
+ obj.read(hdrs={'if-match': 'not-the-etag'})
+ self.assertEqual(412, cm.exception.status)
+
+ v_obj = self.env.container.file(oname)
+ resp = v_obj.write(body, return_resp=True)
+ self.assertEqual(resp.getheader('etag'), etag)
+
+ # versioned object is too with with if-match
+ self.assertEqual(body, v_obj.read(hdrs={
+ 'if-match': normalize_etag(etag)}))
+ # works quoted, too
+ self.assertEqual(body, v_obj.read(hdrs={
+ 'if-match': '"%s"' % normalize_etag(etag)}))
+ with self.assertRaises(ResponseError) as cm:
+ v_obj.read(hdrs={'if-match': 'not-the-etag'})
+ self.assertEqual(412, cm.exception.status)
+
+ def test_container_acls(self):
+ if tf.skip3:
+ raise SkipTest('Username3 not set')
+
+ obj = self.env.container.file(Utils.create_name())
+ resp = obj.write(b"data", return_resp=True)
+ version_id = resp.getheader('x-object-version-id')
+ self.assertIsNotNone(version_id)
+
+ with self.assertRaises(ResponseError) as cm:
+ obj.read(hdrs={'X-Auth-Token': self.env.conn3.storage_token})
+ self.assertEqual(403, cm.exception.status)
+
+ # Container ACLs work more or less like they always have
+ self.env.container.update_metadata(
+ hdrs={'X-Container-Read': self.env.conn3.user_acl})
+ self.assertEqual(b"data", obj.read(hdrs={
+ 'X-Auth-Token': self.env.conn3.storage_token}))
+
+ # But the version-specifc GET still requires a swift owner
+ with self.assertRaises(ResponseError) as cm:
+ obj.read(hdrs={'X-Auth-Token': self.env.conn3.storage_token},
+ parms={'version-id': version_id})
+ self.assertEqual(403, cm.exception.status)
+
+ # If it's pointing to a symlink that points elsewhere, that still needs
+ # to be authed
+ tgt_name = Utils.create_name()
+ self.env.unversioned_container.file(tgt_name).write(b'link')
+ sym_tgt_header = quote(unquote('%s/%s' % (
+ self.env.unversioned_container.name, tgt_name)))
+ obj.write(hdrs={'X-Symlink-Target': sym_tgt_header})
+
+ # So, user1's good...
+ self.assertEqual(b'link', obj.read())
+ # ...but user3 can't
+ with self.assertRaises(ResponseError) as cm:
+ obj.read(hdrs={'X-Auth-Token': self.env.conn3.storage_token})
+ self.assertEqual(403, cm.exception.status)
+
+ # unless we add the acl to the unversioned_container
+ self.env.unversioned_container.update_metadata(
+ hdrs={'X-Container-Read': self.env.conn3.user_acl})
+ self.assertEqual(b'link', obj.read(
+ hdrs={'X-Auth-Token': self.env.conn3.storage_token}))
+
def _test_overwriting_setup(self, obj_name=None):
# sanity
container = self.env.container
@@ -919,13 +994,13 @@ class TestObjectVersioning(TestObjectVersioningBase):
'Content-Type': 'text/jibberish32'
}, return_resp=True)
v1_version_id = resp.getheader('x-object-version-id')
- v1_etag = resp.getheader('etag')
+ v1_etag = normalize_etag(resp.getheader('etag'))
resp = obj.write(b'version2', hdrs={
'Content-Type': 'text/jibberish33'
}, return_resp=True)
v2_version_id = resp.getheader('x-object-version-id')
- v2_etag = resp.getheader('etag')
+ v2_etag = normalize_etag(resp.getheader('etag'))
# sanity
self.assertEqual(b'version2', obj.read())
@@ -992,7 +1067,7 @@ class TestObjectVersioning(TestObjectVersioningBase):
self.assertEqual(b'version1', obj.read())
obj_info = obj.info()
self.assertEqual('text/jibberish32', obj_info['content_type'])
- self.assertEqual(v1_etag, obj_info['etag'])
+ self.assertEqual(v1_etag, normalize_etag(obj_info['etag']))
def test_delete_with_version_api_old_object(self):
versioned_obj_name = Utils.create_name()
@@ -2308,7 +2383,7 @@ class TestSloWithVersioning(TestObjectVersioningBase):
expected = {
'bytes': file_info['content_length'],
'content_type': 'application/octet-stream',
- 'hash': manifest_info['etag'],
+ 'hash': normalize_etag(manifest_info['etag']),
'name': 'my-slo-manifest',
'slo_etag': file_info['etag'],
'version_symlink': True,
@@ -2340,7 +2415,7 @@ class TestSloWithVersioning(TestObjectVersioningBase):
expected = {
'bytes': file_info['content_length'],
'content_type': 'application/octet-stream',
- 'hash': manifest_info['etag'],
+ 'hash': normalize_etag(manifest_info['etag']),
'name': 'my-slo-manifest',
'slo_etag': file_info['etag'],
'version_symlink': True,
@@ -2688,16 +2763,11 @@ class TestVersioningContainerTempurl(TestObjectVersioningBase):
obj.write(b"version2")
# get v2 object (reading from versions container)
- # cross container tempurl does not work for container tempurl key
- try:
- obj.read(parms=get_parms, cfg={'no_auth_token': True})
- except ResponseError as e:
- self.assertEqual(e.status, 401)
- else:
- self.fail('request did not error')
- try:
- obj.info(parms=get_parms, cfg={'no_auth_token': True})
- except ResponseError as e:
- self.assertEqual(e.status, 401)
- else:
- self.fail('request did not error')
+ # versioning symlink allows us to bypass the normal
+ # container-tempurl-key scoping
+ contents = obj.read(parms=get_parms, cfg={'no_auth_token': True})
+ self.assert_status([200])
+ self.assertEqual(contents, b"version2")
+ # HEAD works, too
+ obj.info(parms=get_parms, cfg={'no_auth_token': True})
+ self.assert_status([200])
diff --git a/test/functional/test_slo.py b/test/functional/test_slo.py
index c055f7bbd..8003a2d70 100644
--- a/test/functional/test_slo.py
+++ b/test/functional/test_slo.py
@@ -23,6 +23,8 @@ from copy import deepcopy
import six
+from swift.common.swob import normalize_etag
+
import test.functional as tf
from test.functional import cluster_info, SkipTest
from test.functional.tests import Utils, Base, Base2, BaseEnv
@@ -299,8 +301,14 @@ class TestSlo(Base):
# a POST.
file_item.initialize(parms={'multipart-manifest': 'get'})
manifest_etag = file_item.etag
- self.assertFalse(manifest_etag.startswith('"'))
- self.assertFalse(manifest_etag.endswith('"'))
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ self.assertTrue(manifest_etag.startswith('"'))
+ self.assertTrue(manifest_etag.endswith('"'))
+ # ...but in the listing, it'll be stripped
+ manifest_etag = manifest_etag[1:-1]
+ else:
+ self.assertFalse(manifest_etag.startswith('"'))
+ self.assertFalse(manifest_etag.endswith('"'))
file_item.initialize()
slo_etag = file_item.etag
@@ -715,6 +723,8 @@ class TestSlo(Base):
source_contents = source.read(parms={'multipart-manifest': 'get'})
source_json = json.loads(source_contents)
manifest_etag = hashlib.md5(source_contents).hexdigest()
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ manifest_etag = '"%s"' % manifest_etag
self.assertEqual(manifest_etag, source.etag)
source.initialize()
@@ -752,14 +762,14 @@ class TestSlo(Base):
actual = names['manifest-abcde']
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
self.assertEqual('application/octet-stream', actual['content_type'])
- self.assertEqual(manifest_etag, actual['hash'])
+ self.assertEqual(normalize_etag(manifest_etag), actual['hash'])
self.assertEqual(slo_etag, actual['slo_etag'])
self.assertIn('copied-abcde-manifest-only', names)
actual = names['copied-abcde-manifest-only']
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
self.assertEqual('application/octet-stream', actual['content_type'])
- self.assertEqual(manifest_etag, actual['hash'])
+ self.assertEqual(normalize_etag(manifest_etag), actual['hash'])
self.assertEqual(slo_etag, actual['slo_etag'])
# Test copy manifest including data segments
@@ -789,6 +799,8 @@ class TestSlo(Base):
source_contents = source.read(parms={'multipart-manifest': 'get'})
source_json = json.loads(source_contents)
manifest_etag = hashlib.md5(source_contents).hexdigest()
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ manifest_etag = '"%s"' % manifest_etag
self.assertEqual(manifest_etag, source.etag)
source.initialize()
@@ -831,14 +843,14 @@ class TestSlo(Base):
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
self.assertEqual('application/octet-stream', actual['content_type'])
# the container listing should have the etag of the manifest contents
- self.assertEqual(manifest_etag, actual['hash'])
+ self.assertEqual(normalize_etag(manifest_etag), actual['hash'])
self.assertEqual(slo_etag, actual['slo_etag'])
self.assertIn('copied-abcde-manifest-only', names)
actual = names['copied-abcde-manifest-only']
self.assertEqual(4 * 1024 * 1024 + 1, actual['bytes'])
self.assertEqual('image/jpeg', actual['content_type'])
- self.assertEqual(manifest_etag, actual['hash'])
+ self.assertEqual(normalize_etag(manifest_etag), actual['hash'])
self.assertEqual(slo_etag, actual['slo_etag'])
def test_slo_copy_the_manifest_account(self):
@@ -1098,12 +1110,7 @@ class TestSlo(Base):
manifest = self.env.container.file("manifest-db")
got_body = manifest.read(parms={'multipart-manifest': 'get',
'format': 'raw'})
- body_md5 = hashlib.md5(got_body).hexdigest()
- headers = dict(
- (h.lower(), v)
- for h, v in manifest.conn.response.getheaders())
- self.assertIn('etag', headers)
- self.assertEqual(headers['etag'], body_md5)
+ self.assert_etag(hashlib.md5(got_body).hexdigest())
# raw format should have the actual manifest object content-type
self.assertEqual('application/octet-stream', manifest.content_type)
diff --git a/test/functional/test_symlink.py b/test/functional/test_symlink.py
index 5cd66d510..1b6ec820f 100755
--- a/test/functional/test_symlink.py
+++ b/test/functional/test_symlink.py
@@ -25,6 +25,7 @@ from six.moves import urllib
from uuid import uuid4
from swift.common.http import is_success
+from swift.common.swob import normalize_etag
from swift.common.utils import json, MD5_OF_EMPTY_STRING
from swift.common.middleware.slo import SloGetContext
from test.functional import check_response, retry, requires_acls, \
@@ -1135,7 +1136,7 @@ class TestSymlink(Base):
etag=self.env.tgt_etag)
# overwrite tgt object
- old_tgt_etag = self.env.tgt_etag
+ old_tgt_etag = normalize_etag(self.env.tgt_etag)
self.env._create_tgt_object(body='updated target body')
# sanity
@@ -1380,7 +1381,7 @@ class TestSymlink(Base):
object_list[0]['symlink_path'])
obj_info = object_list[0]
self.assertIn('symlink_etag', obj_info)
- self.assertEqual(self.env.tgt_etag,
+ self.assertEqual(normalize_etag(self.env.tgt_etag),
obj_info['symlink_etag'])
self.assertEqual(int(self.env.tgt_length),
obj_info['symlink_bytes'])
@@ -1550,7 +1551,7 @@ class TestSymlinkSlo(Base):
'symlink_path': '/v1/%s/%s/manifest-abcde' % (
self.account_name, self.env.container2.name),
'symlink_bytes': 4 * 2 ** 20 + 1,
- 'symlink_etag': manifest_etag,
+ 'symlink_etag': normalize_etag(manifest_etag),
})
def test_static_link_target_slo_manifest_wrong_etag(self):
@@ -1740,7 +1741,11 @@ class TestSymlinkToSloSegments(Base):
self.assertEqual(1024 * 1024, f_dict['bytes'])
self.assertEqual('application/octet-stream',
f_dict['content_type'])
- self.assertEqual(manifest_etag, f_dict['hash'])
+ if tf.cluster_info.get('etag_quoter', {}).get(
+ 'enable_by_default'):
+ self.assertEqual(manifest_etag, '"%s"' % f_dict['hash'])
+ else:
+ self.assertEqual(manifest_etag, f_dict['hash'])
self.assertEqual(slo_etag, f_dict['slo_etag'])
break
else:
@@ -1759,7 +1764,11 @@ class TestSymlinkToSloSegments(Base):
self.assertEqual(1024 * 1024, f_dict['bytes'])
self.assertEqual(file_item.content_type,
f_dict['content_type'])
- self.assertEqual(manifest_etag, f_dict['hash'])
+ if tf.cluster_info.get('etag_quoter', {}).get(
+ 'enable_by_default'):
+ self.assertEqual(manifest_etag, '"%s"' % f_dict['hash'])
+ else:
+ self.assertEqual(manifest_etag, f_dict['hash'])
self.assertEqual(slo_etag, f_dict['slo_etag'])
break
else:
@@ -1778,7 +1787,11 @@ class TestSymlinkToSloSegments(Base):
self.assertEqual(1024 * 1024, f_dict['bytes'])
self.assertEqual(file_item.content_type,
f_dict['content_type'])
- self.assertEqual(manifest_etag, f_dict['hash'])
+ if tf.cluster_info.get('etag_quoter', {}).get(
+ 'enable_by_default'):
+ self.assertEqual(manifest_etag, '"%s"' % f_dict['hash'])
+ else:
+ self.assertEqual(manifest_etag, f_dict['hash'])
self.assertEqual(slo_etag, f_dict['slo_etag'])
break
else:
@@ -1811,6 +1824,8 @@ class TestSymlinkToSloSegments(Base):
source_contents = source.read(parms={'multipart-manifest': 'get'})
source_json = json.loads(source_contents)
manifest_etag = hashlib.md5(source_contents).hexdigest()
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ manifest_etag = '"%s"' % manifest_etag
source.initialize()
slo_etag = source.etag
@@ -1857,14 +1872,20 @@ class TestSymlinkToSloSegments(Base):
actual = names['manifest-linkto-ab']
self.assertEqual(2 * 1024 * 1024, actual['bytes'])
self.assertEqual('application/octet-stream', actual['content_type'])
- self.assertEqual(manifest_etag, actual['hash'])
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ self.assertEqual(manifest_etag, '"%s"' % actual['hash'])
+ else:
+ self.assertEqual(manifest_etag, actual['hash'])
self.assertEqual(slo_etag, actual['slo_etag'])
self.assertIn('copied-ab-manifest-only', names)
actual = names['copied-ab-manifest-only']
self.assertEqual(2 * 1024 * 1024, actual['bytes'])
self.assertEqual('application/octet-stream', actual['content_type'])
- self.assertEqual(manifest_etag, actual['hash'])
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ self.assertEqual(manifest_etag, '"%s"' % actual['hash'])
+ else:
+ self.assertEqual(manifest_etag, actual['hash'])
self.assertEqual(slo_etag, actual['slo_etag'])
@@ -2000,13 +2021,13 @@ class TestSymlinkTargetObjectComparison(Base):
else:
self.assertEqual(b'', body)
self.assert_status(200)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
hdrs = {'If-Match': 'bogus'}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(412)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
def testIfMatchMultipleEtags(self):
for file_item in self.env.files:
@@ -2022,13 +2043,13 @@ class TestSymlinkTargetObjectComparison(Base):
else:
self.assertEqual(b'', body)
self.assert_status(200)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(412)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
def testIfNoneMatch(self):
for file_item in self.env.files:
@@ -2044,13 +2065,13 @@ class TestSymlinkTargetObjectComparison(Base):
else:
self.assertEqual(b'', body)
self.assert_status(200)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
hdrs = {'If-None-Match': md5}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(304)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
self.assert_header('accept-ranges', 'bytes')
def testIfNoneMatchMultipleEtags(self):
@@ -2067,14 +2088,14 @@ class TestSymlinkTargetObjectComparison(Base):
else:
self.assertEqual(b'', body)
self.assert_status(200)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
hdrs = {'If-None-Match':
'"bogus1", "bogus2", "%s"' % md5}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(304)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
self.assert_header('accept-ranges', 'bytes')
def testIfModifiedSince(self):
@@ -2091,19 +2112,19 @@ class TestSymlinkTargetObjectComparison(Base):
else:
self.assertEqual(b'', body)
self.assert_status(200)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
self.assertTrue(file_symlink.info(hdrs=hdrs, parms=self.env.parms))
hdrs = {'If-Modified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(304)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
self.assert_header('accept-ranges', 'bytes')
self.assertRaises(ResponseError, file_symlink.info, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(304)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
self.assert_header('accept-ranges', 'bytes')
def testIfUnmodifiedSince(self):
@@ -2120,18 +2141,18 @@ class TestSymlinkTargetObjectComparison(Base):
else:
self.assertEqual(b'', body)
self.assert_status(200)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
self.assertTrue(file_symlink.info(hdrs=hdrs, parms=self.env.parms))
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(412)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
self.assertRaises(ResponseError, file_symlink.info, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(412)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
def testIfMatchAndUnmodified(self):
for file_item in self.env.files:
@@ -2148,21 +2169,21 @@ class TestSymlinkTargetObjectComparison(Base):
else:
self.assertEqual(b'', body)
self.assert_status(200)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
hdrs = {'If-Match': 'bogus',
'If-Unmodified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(412)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
hdrs = {'If-Match': md5,
'If-Unmodified-Since': self.env.time_old_f3}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(412)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
def testLastModified(self):
file_item = self.env.container.file(Utils.create_name())
@@ -2186,7 +2207,7 @@ class TestSymlinkTargetObjectComparison(Base):
hdrs = {'If-Modified-Since': last_modified}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs)
self.assert_status(304)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
self.assert_header('accept-ranges', 'bytes')
hdrs = {'If-Unmodified-Since': last_modified}
@@ -2227,20 +2248,20 @@ class TestSymlinkComparison(TestSymlinkTargetObjectComparison):
body = file_symlink.read(hdrs=hdrs, parms=self.env.parms)
self.assertEqual(b'', body)
self.assert_status(200)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
hdrs = {'If-Modified-Since': last_modified}
self.assertRaises(ResponseError, file_symlink.read, hdrs=hdrs,
parms=self.env.parms)
self.assert_status(304)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
self.assert_header('accept-ranges', 'bytes')
hdrs = {'If-Unmodified-Since': last_modified}
body = file_symlink.read(hdrs=hdrs, parms=self.env.parms)
self.assertEqual(b'', body)
self.assert_status(200)
- self.assert_header('etag', md5)
+ self.assert_etag(md5)
class TestSymlinkAccountTempurl(Base):
diff --git a/test/functional/test_versioned_writes.py b/test/functional/test_versioned_writes.py
index d58da88e6..7521825f2 100644
--- a/test/functional/test_versioned_writes.py
+++ b/test/functional/test_versioned_writes.py
@@ -684,7 +684,11 @@ class TestObjectVersioning(Base):
prev_version = versions_container.file(versioned_obj_name)
prev_version_info = prev_version.info(parms={'symlink': 'get'})
self.assertEqual(b"aaaaa", prev_version.read())
- self.assertEqual(MD5_OF_EMPTY_STRING, prev_version_info['etag'])
+ symlink_etag = prev_version_info['etag']
+ if symlink_etag.startswith('"') and symlink_etag.endswith('"') and \
+ symlink_etag[1:-1]:
+ symlink_etag = symlink_etag[1:-1]
+ self.assertEqual(MD5_OF_EMPTY_STRING, symlink_etag)
self.assertEqual(sym_tgt_header,
prev_version_info['x_symlink_target'])
return symlink, tgt_a
@@ -698,7 +702,10 @@ class TestObjectVersioning(Base):
symlink.delete()
sym_info = symlink.info(parms={'symlink': 'get'})
self.assertEqual(b"aaaaa", symlink.read())
- self.assertEqual(MD5_OF_EMPTY_STRING, sym_info['etag'])
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ self.assertEqual('"%s"' % MD5_OF_EMPTY_STRING, sym_info['etag'])
+ else:
+ self.assertEqual(MD5_OF_EMPTY_STRING, sym_info['etag'])
self.assertEqual(
quote(unquote('%s/%s' % (self.env.container.name, target.name))),
sym_info['x_symlink_target'])
diff --git a/test/functional/tests.py b/test/functional/tests.py
index dc149cffa..51b4c663f 100644
--- a/test/functional/tests.py
+++ b/test/functional/tests.py
@@ -27,6 +27,7 @@ import uuid
from copy import deepcopy
import eventlet
from swift.common.http import is_success, is_client_error
+from swift.common.swob import normalize_etag
from email.utils import parsedate
if six.PY2:
@@ -131,6 +132,13 @@ class Base(unittest.TestCase):
'Expected header name %r not found in response.' % header_name)
self.assertEqual(expected_value, actual_value)
+ def assert_etag(self, unquoted_value):
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ expected = '"%s"' % unquoted_value
+ else:
+ expected = unquoted_value
+ self.assert_header('etag', expected)
+
class Base2(object):
@classmethod
@@ -874,7 +882,11 @@ class TestContainer(Base):
for actual in file_list:
name = actual['name']
self.assertIn(name, expected)
- self.assertEqual(expected[name]['etag'], actual['hash'])
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ self.assertEqual(expected[name]['etag'],
+ '"%s"' % actual['hash'])
+ else:
+ self.assertEqual(expected[name]['etag'], actual['hash'])
self.assertEqual(
expected[name]['content_type'], actual['content_type'])
self.assertEqual(
@@ -1365,6 +1377,8 @@ class TestFile(Base):
'x-delete-at': mock.ANY,
'x-trans-id': mock.ANY,
'x-openstack-request-id': mock.ANY}
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ expected_headers['etag'] = '"%s"' % expected_headers['etag']
unexpected_headers = ['connection', 'x-delete-after']
do_test(put_headers, {}, expected_headers, unexpected_headers)
@@ -1420,7 +1434,7 @@ class TestFile(Base):
self.fail('Failed to find %s in listing' % dest_filename)
self.assertEqual(file_item.size, obj['bytes'])
- self.assertEqual(file_item.etag, obj['hash'])
+ self.assertEqual(normalize_etag(file_item.etag), obj['hash'])
self.assertEqual(file_item.content_type, obj['content_type'])
file_copy = cont.file(dest_filename)
@@ -1470,7 +1484,7 @@ class TestFile(Base):
self.fail('Failed to find %s in listing' % dest_filename)
self.assertEqual(file_item.size, obj['bytes'])
- self.assertEqual(file_item.etag, obj['hash'])
+ self.assertEqual(normalize_etag(file_item.etag), obj['hash'])
self.assertEqual(
'application/test-changed', obj['content_type'])
@@ -1505,7 +1519,7 @@ class TestFile(Base):
self.fail('Failed to find %s in listing' % dest_filename)
self.assertEqual(file_item.size, obj['bytes'])
- self.assertEqual(file_item.etag, obj['hash'])
+ self.assertEqual(normalize_etag(file_item.etag), obj['hash'])
self.assertEqual(
'application/test-updated', obj['content_type'])
@@ -2088,7 +2102,7 @@ class TestFile(Base):
self.assertEqual(file_item.read(hdrs=hdrs), data[-i:])
self.assert_header('content-range', 'bytes %d-%d/%d' % (
file_length - i, file_length - 1, file_length))
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
range_string = 'bytes=%d-' % (i)
@@ -2102,7 +2116,7 @@ class TestFile(Base):
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(416)
self.assert_header('content-range', 'bytes */%d' % file_length)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
@@ -2416,14 +2430,16 @@ class TestFile(Base):
file_item.content_type = content_type
file_item.write_random(self.env.file_size)
- md5 = file_item.md5
+ expected_etag = file_item.md5
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ expected_etag = '"%s"' % expected_etag
file_item = self.env.container.file(file_name)
info = file_item.info()
self.assert_status(200)
self.assertEqual(info['content_length'], self.env.file_size)
- self.assertEqual(info['etag'], md5)
+ self.assertEqual(info['etag'], expected_etag)
self.assertEqual(info['content_type'], content_type)
self.assertIn('last_modified', info)
@@ -2612,14 +2628,7 @@ class TestFile(Base):
file_item = self.env.container.file(Utils.create_name())
data = io.BytesIO(file_item.write_random(512))
- etag = File.compute_md5sum(data)
-
- headers = dict((h.lower(), v)
- for h, v in self.env.conn.response.getheaders())
- self.assertIn('etag', headers.keys())
-
- header_etag = headers['etag'].strip('"')
- self.assertEqual(etag, header_etag)
+ self.assert_etag(File.compute_md5sum(data))
def testChunkedPut(self):
if (tf.web_front_end == 'apache2'):
@@ -2645,7 +2654,7 @@ class TestFile(Base):
self.assertEqual(data, file_item.read())
info = file_item.info()
- self.assertEqual(etag, info['etag'])
+ self.assertEqual(normalize_etag(info['etag']), etag)
def test_POST(self):
# verify consistency between object and container listing metadata
@@ -2670,7 +2679,10 @@ class TestFile(Base):
self.fail('Failed to find file %r in listing' % file_name)
self.assertEqual(1024, f_dict['bytes'])
self.assertEqual('text/foobar', f_dict['content_type'])
- self.assertEqual(etag, f_dict['hash'])
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ self.assertEqual(etag, '"%s"' % f_dict['hash'])
+ else:
+ self.assertEqual(etag, f_dict['hash'])
put_last_modified = f_dict['last_modified']
# now POST updated content-type to each file
@@ -2697,7 +2709,10 @@ class TestFile(Base):
self.assertEqual(1024, f_dict['bytes'])
self.assertEqual('image/foobarbaz', f_dict['content_type'])
self.assertLess(put_last_modified, f_dict['last_modified'])
- self.assertEqual(etag, f_dict['hash'])
+ if tf.cluster_info.get('etag_quoter', {}).get('enable_by_default'):
+ self.assertEqual(etag, '"%s"' % f_dict['hash'])
+ else:
+ self.assertEqual(etag, f_dict['hash'])
class TestFileUTF8(Base2, TestFile):
@@ -2742,7 +2757,7 @@ class TestFileComparison(Base):
hdrs = {'If-Match': 'bogus'}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
def testIfMatchMultipleEtags(self):
for file_item in self.env.files:
@@ -2752,7 +2767,7 @@ class TestFileComparison(Base):
hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
def testIfNoneMatch(self):
for file_item in self.env.files:
@@ -2762,7 +2777,7 @@ class TestFileComparison(Base):
hdrs = {'If-None-Match': file_item.md5}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
def testIfNoneMatchMultipleEtags(self):
@@ -2774,7 +2789,7 @@ class TestFileComparison(Base):
'"bogus1", "bogus2", "%s"' % file_item.md5}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
def testIfModifiedSince(self):
@@ -2786,11 +2801,11 @@ class TestFileComparison(Base):
hdrs = {'If-Modified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
self.assert_status(304)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
self.assert_header('accept-ranges', 'bytes')
def testIfUnmodifiedSince(self):
@@ -2802,10 +2817,10 @@ class TestFileComparison(Base):
hdrs = {'If-Unmodified-Since': self.env.time_old_f2}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
self.assertRaises(ResponseError, file_item.info, hdrs=hdrs)
self.assert_status(412)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
def testIfMatchAndUnmodified(self):
for file_item in self.env.files:
@@ -2817,13 +2832,13 @@ class TestFileComparison(Base):
'If-Unmodified-Since': self.env.time_new}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
hdrs = {'If-Match': file_item.md5,
'If-Unmodified-Since': self.env.time_old_f3}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
- self.assert_header('etag', file_item.md5)
+ self.assert_etag(file_item.md5)
def testLastModified(self):
file_name = Utils.create_name()
@@ -2844,7 +2859,7 @@ class TestFileComparison(Base):
hdrs = {'If-Modified-Since': last_modified}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
- self.assert_header('etag', etag)
+ self.assert_etag(etag)
self.assert_header('accept-ranges', 'bytes')
hdrs = {'If-Unmodified-Since': last_modified}
diff --git a/test/probe/test_object_async_update.py b/test/probe/test_object_async_update.py
index 44a25eb14..00aefcccb 100644
--- a/test/probe/test_object_async_update.py
+++ b/test/probe/test_object_async_update.py
@@ -23,6 +23,7 @@ from swiftclient.exceptions import ClientException
from swift.common import direct_client
from swift.common.manager import Manager
+from swift.common.swob import normalize_etag
from test.probe.common import kill_nonprimary_server, \
kill_server, ReplProbeTest, start_server, ECProbeTest
@@ -210,7 +211,7 @@ class TestUpdateOverridesEC(ECProbeTest):
self.assertEqual(1, len(listing))
self.assertEqual('o1', listing[0]['name'])
self.assertEqual(len(content), listing[0]['bytes'])
- self.assertEqual(meta['etag'], listing[0]['hash'])
+ self.assertEqual(normalize_etag(meta['etag']), listing[0]['hash'])
self.assertEqual('test/ctype', listing[0]['content_type'])
def test_update_during_POST_only(self):
@@ -261,7 +262,7 @@ class TestUpdateOverridesEC(ECProbeTest):
self.assertEqual(1, len(listing))
self.assertEqual('o1', listing[0]['name'])
self.assertEqual(len(content), listing[0]['bytes'])
- self.assertEqual(meta['etag'], listing[0]['hash'])
+ self.assertEqual(normalize_etag(meta['etag']), listing[0]['hash'])
self.assertEqual('test/ctype', listing[0]['content_type'])
# Run the object-updaters to send the async pending from the PUT
@@ -328,7 +329,7 @@ class TestUpdateOverridesEC(ECProbeTest):
self.assertEqual(1, len(listing))
self.assertEqual('o1', listing[0]['name'])
self.assertEqual(len(content), listing[0]['bytes'])
- self.assertEqual(meta['etag'], listing[0]['hash'])
+ self.assertEqual(normalize_etag(meta['etag']), listing[0]['hash'])
self.assertEqual('test/ctype', listing[0]['content_type'])
diff --git a/test/probe/test_object_metadata_replication.py b/test/probe/test_object_metadata_replication.py
index c5cb93f98..4b5b0d448 100644
--- a/test/probe/test_object_metadata_replication.py
+++ b/test/probe/test_object_metadata_replication.py
@@ -22,6 +22,7 @@ import uuid
from swift.common.direct_client import direct_get_suffix_hashes
from swift.common.exceptions import DiskFileDeleted
from swift.common.internal_client import UnexpectedResponse
+from swift.common.swob import normalize_etag
from swift.container.backend import ContainerBroker
from swift.common import utils
from swiftclient import client
@@ -129,7 +130,7 @@ class Test(ReplProbeTest):
def _assert_object_metadata_matches_listing(self, listing, metadata):
self.assertEqual(listing['bytes'], int(metadata['content-length']))
- self.assertEqual(listing['hash'], metadata['etag'])
+ self.assertEqual(listing['hash'], normalize_etag(metadata['etag']))
self.assertEqual(listing['content_type'], metadata['content-type'])
modified = Timestamp(metadata['x-timestamp']).isoformat
self.assertEqual(listing['last_modified'], modified)
diff --git a/test/sample.conf b/test/sample.conf
index 6b6a7ac4d..b54398835 100644
--- a/test/sample.conf
+++ b/test/sample.conf
@@ -24,6 +24,10 @@ auth_uri = http://127.0.0.1:8080/auth/v1.0
#auth_version = 3
#auth_uri = http://localhost:5000/v3/
+# Used by s3api functional tests, which don't contact auth directly
+#s3_storage_url = http://127.0.0.1:8080/
+#s3_region = us-east-1
+
# Primary functional test account (needs admin access to the account)
account = test
username = tester
diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py
index 15422bd13..3556a1ad0 100644
--- a/test/unit/account/test_backend.py
+++ b/test/unit/account/test_backend.py
@@ -180,6 +180,72 @@ class TestAccountBroker(unittest.TestCase):
broker.delete_db(Timestamp.now().internal)
broker.reclaim(Timestamp.now().internal, time())
+ def test_batched_reclaim(self):
+ num_of_containers = 60
+ container_specs = []
+ now = time()
+ top_of_the_minute = now - (now % 60)
+ c = itertools.cycle([True, False])
+ for m, is_deleted in six.moves.zip(range(num_of_containers), c):
+ offset = top_of_the_minute - (m * 60)
+ container_specs.append((Timestamp(offset), is_deleted))
+ random.seed(now)
+ random.shuffle(container_specs)
+ policy_indexes = list(p.idx for p in POLICIES)
+ broker = AccountBroker(':memory:', account='test_account')
+ broker.initialize(Timestamp('1').internal)
+ for i, container_spec in enumerate(container_specs):
+ # with container12 before container2 and shuffled ts.internal we
+ # shouldn't be able to accidently rely on any implicit ordering
+ name = 'container%s' % i
+ pidx = random.choice(policy_indexes)
+ ts, is_deleted = container_spec
+ if is_deleted:
+ broker.put_container(name, 0, ts.internal, 0, 0, pidx)
+ else:
+ broker.put_container(name, ts.internal, 0, 0, 0, pidx)
+
+ def count_reclaimable(conn, reclaim_age):
+ return conn.execute(
+ "SELECT count(*) FROM container "
+ "WHERE deleted = 1 AND delete_timestamp < ?", (reclaim_age,)
+ ).fetchone()[0]
+
+ # This is intended to divide the set of timestamps exactly in half
+ # regardless of the value of now
+ reclaim_age = top_of_the_minute + 1 - (num_of_containers / 2 * 60)
+ with broker.get() as conn:
+ self.assertEqual(count_reclaimable(conn, reclaim_age),
+ num_of_containers / 4)
+
+ orig__reclaim = broker._reclaim
+ trace = []
+
+ def tracing_reclaim(conn, age_timestamp, marker):
+ trace.append((age_timestamp, marker,
+ count_reclaimable(conn, age_timestamp)))
+ return orig__reclaim(conn, age_timestamp, marker)
+
+ with mock.patch.object(broker, '_reclaim', new=tracing_reclaim), \
+ mock.patch('swift.common.db.RECLAIM_PAGE_SIZE', 10):
+ broker.reclaim(reclaim_age, reclaim_age)
+ with broker.get() as conn:
+ self.assertEqual(count_reclaimable(conn, reclaim_age), 0)
+ self.assertEqual(3, len(trace), trace)
+ self.assertEqual([age for age, marker, reclaimable in trace],
+ [reclaim_age] * 3)
+ # markers are in-order
+ self.assertLess(trace[0][1], trace[1][1])
+ self.assertLess(trace[1][1], trace[2][1])
+ # reclaimable count gradually decreases
+ # generally, count1 > count2 > count3, but because of the randomness
+ # we may occassionally have count1 == count2 or count2 == count3
+ self.assertGreaterEqual(trace[0][2], trace[1][2])
+ self.assertGreaterEqual(trace[1][2], trace[2][2])
+ # technically, this might happen occasionally, but *really* rarely
+ self.assertTrue(trace[0][2] > trace[1][2] or
+ trace[1][2] > trace[2][2])
+
def test_delete_db_status(self):
start = next(self.ts)
broker = AccountBroker(':memory:', account='a')
diff --git a/test/unit/cli/test_info.py b/test/unit/cli/test_info.py
index ffe10dc69..f1a77d0b1 100644
--- a/test/unit/cli/test_info.py
+++ b/test/unit/cli/test_info.py
@@ -497,13 +497,10 @@ Shard Ranges (3):
print_item_locations(None, partition=part, policy_name='zero',
swift_dir=self.testdir)
exp_part_msg = 'Partition\t%s' % part
- exp_acct_msg = 'Account \tNone'
- exp_cont_msg = 'Container\tNone'
- exp_obj_msg = 'Object \tNone'
self.assertIn(exp_part_msg, out.getvalue())
- self.assertIn(exp_acct_msg, out.getvalue())
- self.assertIn(exp_cont_msg, out.getvalue())
- self.assertIn(exp_obj_msg, out.getvalue())
+ self.assertNotIn('Account', out.getvalue())
+ self.assertNotIn('Container', out.getvalue())
+ self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_dashed_ring_name_partition(self):
out = StringIO()
@@ -513,13 +510,10 @@ Shard Ranges (3):
ring_name='foo-bar', partition=part,
swift_dir=self.testdir)
exp_part_msg = 'Partition\t%s' % part
- exp_acct_msg = 'Account \tNone'
- exp_cont_msg = 'Container\tNone'
- exp_obj_msg = 'Object \tNone'
self.assertIn(exp_part_msg, out.getvalue())
- self.assertIn(exp_acct_msg, out.getvalue())
- self.assertIn(exp_cont_msg, out.getvalue())
- self.assertIn(exp_obj_msg, out.getvalue())
+ self.assertNotIn('Account', out.getvalue())
+ self.assertNotIn('Container', out.getvalue())
+ self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_with_ring(self):
out = StringIO()
@@ -533,11 +527,9 @@ Shard Ranges (3):
'but ring not named "account"'
self.assertIn(exp_warning, out.getvalue())
exp_acct_msg = 'Account \t%s' % account
- exp_cont_msg = 'Container\tNone'
- exp_obj_msg = 'Object \tNone'
self.assertIn(exp_acct_msg, out.getvalue())
- self.assertIn(exp_cont_msg, out.getvalue())
- self.assertIn(exp_obj_msg, out.getvalue())
+ self.assertNotIn('Container', out.getvalue())
+ self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_no_ring(self):
out = StringIO()
@@ -546,11 +538,9 @@ Shard Ranges (3):
print_item_locations(None, account=account,
swift_dir=self.testdir)
exp_acct_msg = 'Account \t%s' % account
- exp_cont_msg = 'Container\tNone'
- exp_obj_msg = 'Object \tNone'
self.assertIn(exp_acct_msg, out.getvalue())
- self.assertIn(exp_cont_msg, out.getvalue())
- self.assertIn(exp_obj_msg, out.getvalue())
+ self.assertNotIn('Container', out.getvalue())
+ self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_container_ring(self):
out = StringIO()
@@ -562,10 +552,9 @@ Shard Ranges (3):
container=container)
exp_acct_msg = 'Account \t%s' % account
exp_cont_msg = 'Container\t%s' % container
- exp_obj_msg = 'Object \tNone'
self.assertIn(exp_acct_msg, out.getvalue())
self.assertIn(exp_cont_msg, out.getvalue())
- self.assertIn(exp_obj_msg, out.getvalue())
+ self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_container_no_ring(self):
out = StringIO()
@@ -576,10 +565,9 @@ Shard Ranges (3):
container=container, swift_dir=self.testdir)
exp_acct_msg = 'Account \t%s' % account
exp_cont_msg = 'Container\t%s' % container
- exp_obj_msg = 'Object \tNone'
self.assertIn(exp_acct_msg, out.getvalue())
self.assertIn(exp_cont_msg, out.getvalue())
- self.assertIn(exp_obj_msg, out.getvalue())
+ self.assertNotIn('Object', out.getvalue())
def test_print_item_locations_account_container_object_ring(self):
out = StringIO()
@@ -691,59 +679,59 @@ Shard Ranges (3):
def test_parse_get_node_args(self):
# Capture error messages
# (without any parameters)
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = ''
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a c
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a c'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a c o
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a c o'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a/c
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a/c'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# a/c/o
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'a/c/o'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# account container junk/test.ring.gz
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'account container junk/test.ring.gz'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# account container object junk/test.ring.gz
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'account container object junk/test.ring.gz'
self.assertRaisesMessage(InfoSystemExit,
'Need to specify policy_name or <ring.gz>',
parse_get_node_args, options, args.split())
# object.ring.gz(without any arguments i.e. a c o)
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'object.ring.gz'
self.assertRaisesMessage(InfoSystemExit,
'Ring file does not exist',
@@ -751,55 +739,55 @@ Shard Ranges (3):
# Valid policy
# -P zero
- options = Namespace(policy_name='zero', partition=None)
+ options = Namespace(policy_name='zero', partition=None, quoted=None)
args = ''
self.assertRaisesMessage(InfoSystemExit,
'No target specified',
parse_get_node_args, options, args.split())
# -P one a/c/o
- options = Namespace(policy_name='one', partition=None)
+ options = Namespace(policy_name='one', partition=None, quoted=None)
args = 'a/c/o'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['a', 'c', 'o'])
# -P one account container photos/cat.jpg
- options = Namespace(policy_name='one', partition=None)
+ options = Namespace(policy_name='one', partition=None, quoted=None)
args = 'account container photos/cat.jpg'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
# -P one account/container/photos/cat.jpg
- options = Namespace(policy_name='one', partition=None)
+ options = Namespace(policy_name='one', partition=None, quoted=None)
args = 'account/container/photos/cat.jpg'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
# -P one account/container/junk/test.ring.gz(object endswith 'ring.gz')
- options = Namespace(policy_name='one', partition=None)
+ options = Namespace(policy_name='one', partition=None, quoted=None)
args = 'account/container/junk/test.ring.gz'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['account', 'container', 'junk/test.ring.gz'])
# -P two a c o hooya
- options = Namespace(policy_name='two', partition=None)
+ options = Namespace(policy_name='two', partition=None, quoted=None)
args = 'a c o hooya'
self.assertRaisesMessage(InfoSystemExit,
'Invalid arguments',
parse_get_node_args, options, args.split())
# -P zero -p 1
- options = Namespace(policy_name='zero', partition='1')
+ options = Namespace(policy_name='zero', partition='1', quoted=None)
args = ''
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertFalse(args)
# -P one -p 1 a/c/o
- options = Namespace(policy_name='one', partition='1')
+ options = Namespace(policy_name='one', partition='1', quoted=None)
args = 'a/c/o'
ring_path, args = parse_get_node_args(options, args.split())
self.assertIsNone(ring_path)
self.assertEqual(args, ['a', 'c', 'o'])
# -P two -p 1 a c o hooya
- options = Namespace(policy_name='two', partition='1')
+ options = Namespace(policy_name='two', partition='1', quoted=None)
args = 'a c o hooya'
self.assertRaisesMessage(InfoSystemExit,
'Invalid arguments',
@@ -853,7 +841,7 @@ Shard Ranges (3):
# Mock tests
# /etc/swift/object.ring.gz(without any arguments i.e. a c o)
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = '/etc/swift/object.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -863,7 +851,7 @@ Shard Ranges (3):
parse_get_node_args, options, args.split())
# Similar ring_path and arguments
# /etc/swift/object.ring.gz /etc/swift/object.ring.gz
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = '/etc/swift/object.ring.gz /etc/swift/object.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -871,7 +859,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, '/etc/swift/object.ring.gz')
self.assertEqual(args, ['etc', 'swift', 'object.ring.gz'])
# /etc/swift/object.ring.gz a/c/etc/swift/object.ring.gz
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = '/etc/swift/object.ring.gz a/c/etc/swift/object.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -880,7 +868,7 @@ Shard Ranges (3):
self.assertEqual(args, ['a', 'c', 'etc/swift/object.ring.gz'])
# Invalid path as mentioned in BUG#1539275
# /etc/swift/object.tar.gz account container object
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = '/etc/swift/object.tar.gz account container object'
self.assertRaisesMessage(
InfoSystemExit,
@@ -888,7 +876,7 @@ Shard Ranges (3):
parse_get_node_args, options, args.split())
# object.ring.gz a/
- options = Namespace(policy_name=None)
+ options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -896,7 +884,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a'])
# object.ring.gz a/c
- options = Namespace(policy_name=None)
+ options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/c'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -904,7 +892,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c'])
# object.ring.gz a/c/o
- options = Namespace(policy_name=None)
+ options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/c/o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -912,7 +900,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz a/c/o/junk/test.ring.gz
- options = Namespace(policy_name=None)
+ options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/c/o/junk/test.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -920,7 +908,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o/junk/test.ring.gz'])
# object.ring.gz a
- options = Namespace(policy_name=None)
+ options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -928,7 +916,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a'])
# object.ring.gz a c
- options = Namespace(policy_name=None)
+ options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a c'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -936,7 +924,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c'])
# object.ring.gz a c o
- options = Namespace(policy_name=None)
+ options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a c o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -944,7 +932,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz a c o blah blah
- options = Namespace(policy_name=None)
+ options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a c o blah blah'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -953,7 +941,7 @@ Shard Ranges (3):
'Invalid arguments',
parse_get_node_args, options, args.split())
# object.ring.gz a/c/o/blah/blah
- options = Namespace(policy_name=None)
+ options = Namespace(policy_name=None, quoted=None)
args = 'object.ring.gz a/c/o/blah/blah'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -962,7 +950,7 @@ Shard Ranges (3):
self.assertEqual(args, ['a', 'c', 'o/blah/blah'])
# object.ring.gz -p 1
- options = Namespace(policy_name=None, partition='1')
+ options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -970,7 +958,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertFalse(args)
# object.ring.gz -p 1 a c o
- options = Namespace(policy_name=None, partition='1')
+ options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a c o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -978,7 +966,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz -p 1 a c o forth_arg
- options = Namespace(policy_name=None, partition='1')
+ options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a c o forth_arg'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -987,7 +975,7 @@ Shard Ranges (3):
'Invalid arguments',
parse_get_node_args, options, args.split())
# object.ring.gz -p 1 a/c/o
- options = Namespace(policy_name=None, partition='1')
+ options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a/c/o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -995,7 +983,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz -p 1 a/c/junk/test.ring.gz
- options = Namespace(policy_name=None, partition='1')
+ options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a/c/junk/test.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -1003,7 +991,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'junk/test.ring.gz'])
# object.ring.gz -p 1 a/c/photos/cat.jpg
- options = Namespace(policy_name=None, partition='1')
+ options = Namespace(policy_name=None, partition='1', quoted=None)
args = 'object.ring.gz a/c/photos/cat.jpg'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -1012,7 +1000,7 @@ Shard Ranges (3):
self.assertEqual(args, ['a', 'c', 'photos/cat.jpg'])
# --all object.ring.gz a
- options = Namespace(all=True, policy_name=None)
+ options = Namespace(all=True, policy_name=None, quoted=None)
args = 'object.ring.gz a'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -1020,7 +1008,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a'])
# --all object.ring.gz a c
- options = Namespace(all=True, policy_name=None)
+ options = Namespace(all=True, policy_name=None, quoted=None)
args = 'object.ring.gz a c'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -1028,7 +1016,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c'])
# --all object.ring.gz a c o
- options = Namespace(all=True, policy_name=None)
+ options = Namespace(all=True, policy_name=None, quoted=None)
args = 'object.ring.gz a c o'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -1036,7 +1024,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['a', 'c', 'o'])
# object.ring.gz account container photos/cat.jpg
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'object.ring.gz account container photos/cat.jpg'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -1044,7 +1032,7 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
# object.ring.gz /account/container/photos/cat.jpg
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'object.ring.gz account/container/photos/cat.jpg'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -1053,7 +1041,7 @@ Shard Ranges (3):
self.assertEqual(args, ['account', 'container', 'photos/cat.jpg'])
# Object name ends with 'ring.gz'
# object.ring.gz /account/container/junk/test.ring.gz
- options = Namespace(policy_name=None, partition=None)
+ options = Namespace(policy_name=None, partition=None, quoted=None)
args = 'object.ring.gz account/container/junk/test.ring.gz'
with mock.patch('swift.cli.info.os.path.exists') as exists:
exists.return_value = True
@@ -1061,6 +1049,32 @@ Shard Ranges (3):
self.assertEqual(ring_path, 'object.ring.gz')
self.assertEqual(args, ['account', 'container', 'junk/test.ring.gz'])
+ # Object name has special characters
+ # object.ring.gz /account/container/obj\nwith%0anewline
+ options = Namespace(policy_name=None, partition=None, quoted=None)
+ args = ['object.ring.gz', 'account/container/obj\nwith%0anewline']
+ with mock.patch('swift.cli.info.os.path.exists') as exists:
+ exists.return_value = True
+ ring_path, args = parse_get_node_args(options, args)
+ self.assertEqual(ring_path, 'object.ring.gz')
+ self.assertEqual(args, ['account', 'container', 'obj\nwith%0anewline'])
+
+ options = Namespace(policy_name=None, partition=None, quoted=True)
+ args = ['object.ring.gz', 'account/container/obj\nwith%0anewline']
+ with mock.patch('swift.cli.info.os.path.exists') as exists:
+ exists.return_value = True
+ ring_path, args = parse_get_node_args(options, args)
+ self.assertEqual(ring_path, 'object.ring.gz')
+ self.assertEqual(args, ['account', 'container', 'obj\nwith\nnewline'])
+
+ options = Namespace(policy_name=None, partition=None, quoted=False)
+ args = ['object.ring.gz', 'account/container/obj\nwith%0anewline']
+ with mock.patch('swift.cli.info.os.path.exists') as exists:
+ exists.return_value = True
+ ring_path, args = parse_get_node_args(options, args)
+ self.assertEqual(ring_path, 'object.ring.gz')
+ self.assertEqual(args, ['account', 'container', 'obj\nwith%0anewline'])
+
class TestPrintObj(TestCliInfoBase):
diff --git a/test/unit/cli/test_manage_shard_ranges.py b/test/unit/cli/test_manage_shard_ranges.py
index 65bcd0dd6..7f0aa8857 100644
--- a/test/unit/cli/test_manage_shard_ranges.py
+++ b/test/unit/cli/test_manage_shard_ranges.py
@@ -189,6 +189,7 @@ class TestManageShardRanges(unittest.TestCase):
' "meta_timestamp": "%s",' % now.internal,
' "name": "a/c",',
' "object_count": 0,',
+ ' "reported": 0,',
' "state": "sharding",',
' "state_timestamp": "%s",' % now.internal,
' "timestamp": "%s",' % now.internal,
@@ -230,6 +231,7 @@ class TestManageShardRanges(unittest.TestCase):
' "meta_timestamp": "%s",' % now.internal,
' "name": "a/c",',
' "object_count": 0,',
+ ' "reported": 0,',
' "state": "sharding",',
' "state_timestamp": "%s",' % now.internal,
' "timestamp": "%s",' % now.internal,
diff --git a/test/unit/cli/test_relinker.py b/test/unit/cli/test_relinker.py
index 8daddb13f..571f1c2d7 100644
--- a/test/unit/cli/test_relinker.py
+++ b/test/unit/cli/test_relinker.py
@@ -12,6 +12,9 @@
# limitations under the License.
import binascii
+import errno
+import fcntl
+import json
import os
import shutil
import struct
@@ -30,6 +33,9 @@ from test.unit import FakeLogger, skip_if_no_xattrs, DEFAULT_TEST_EC_TYPE, \
patch_policies
+PART_POWER = 8
+
+
class TestRelinker(unittest.TestCase):
def setUp(self):
skip_if_no_xattrs()
@@ -40,7 +46,7 @@ class TestRelinker(unittest.TestCase):
os.mkdir(self.testdir)
os.mkdir(self.devices)
- self.rb = ring.RingBuilder(8, 6.0, 1)
+ self.rb = ring.RingBuilder(PART_POWER, 6.0, 1)
for i in range(6):
ip = "127.0.0.%s" % i
@@ -55,10 +61,10 @@ class TestRelinker(unittest.TestCase):
os.mkdir(self.objects)
self._hash = utils.hash_path('a/c/o')
digest = binascii.unhexlify(self._hash)
- part = struct.unpack_from('>I', digest)[0] >> 24
+ self.part = struct.unpack_from('>I', digest)[0] >> 24
self.next_part = struct.unpack_from('>I', digest)[0] >> 23
self.objdir = os.path.join(
- self.objects, str(part), self._hash[-3:], self._hash)
+ self.objects, str(self.part), self._hash[-3:], self._hash)
os.makedirs(self.objdir)
self.object_fname = "1278553064.00000.data"
self.objname = os.path.join(self.objdir, self.object_fname)
@@ -97,6 +103,27 @@ class TestRelinker(unittest.TestCase):
stat_new = os.stat(self.expected_file)
self.assertEqual(stat_old.st_ino, stat_new.st_ino)
+ def test_relink_device_filter(self):
+ self.rb.prepare_increase_partition_power()
+ self._save_ring()
+ relinker.relink(self.testdir, self.devices, True,
+ device=self.existing_device)
+
+ self.assertTrue(os.path.isdir(self.expected_dir))
+ self.assertTrue(os.path.isfile(self.expected_file))
+
+ stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
+ stat_new = os.stat(self.expected_file)
+ self.assertEqual(stat_old.st_ino, stat_new.st_ino)
+
+ def test_relink_device_filter_invalid(self):
+ self.rb.prepare_increase_partition_power()
+ self._save_ring()
+ relinker.relink(self.testdir, self.devices, True, device='none')
+
+ self.assertFalse(os.path.isdir(self.expected_dir))
+ self.assertFalse(os.path.isfile(self.expected_file))
+
def _common_test_cleanup(self, relink=True):
# Create a ring that has prev_part_power set
self.rb.prepare_increase_partition_power()
@@ -121,6 +148,187 @@ class TestRelinker(unittest.TestCase):
self.assertFalse(os.path.isfile(
os.path.join(self.objdir, self.object_fname)))
+ def test_cleanup_device_filter(self):
+ self._common_test_cleanup()
+ self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True,
+ device=self.existing_device))
+
+ # Old objectname should be removed, new should still exist
+ self.assertTrue(os.path.isdir(self.expected_dir))
+ self.assertTrue(os.path.isfile(self.expected_file))
+ self.assertFalse(os.path.isfile(
+ os.path.join(self.objdir, self.object_fname)))
+
+ def test_cleanup_device_filter_invalid(self):
+ self._common_test_cleanup()
+ self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True,
+ device='none'))
+
+ # Old objectname should still exist, new should still exist
+ self.assertTrue(os.path.isdir(self.expected_dir))
+ self.assertTrue(os.path.isfile(self.expected_file))
+ self.assertTrue(os.path.isfile(
+ os.path.join(self.objdir, self.object_fname)))
+
+ def test_relink_cleanup(self):
+ state_file = os.path.join(self.devices, self.existing_device,
+ 'relink.objects.json')
+
+ self.rb.prepare_increase_partition_power()
+ self._save_ring()
+ relinker.relink(self.testdir, self.devices, True)
+ with open(state_file, 'rt') as f:
+ self.assertEqual(json.load(f), {str(self.part): [True, False]})
+
+ self.rb.increase_partition_power()
+ self.rb._ring = None # Force builder to reload ring
+ self._save_ring()
+ relinker.cleanup(self.testdir, self.devices, True)
+ with open(state_file, 'rt') as f:
+ self.assertEqual(json.load(f),
+ {str(self.part): [True, True],
+ str(self.next_part): [True, True]})
+
+ def test_devices_filter_filtering(self):
+ # With no filtering, returns all devices
+ devices = relinker.devices_filter(None, "", [self.existing_device])
+ self.assertEqual(set([self.existing_device]), devices)
+
+ # With a matching filter, returns what is matching
+ devices = relinker.devices_filter(self.existing_device, "",
+ [self.existing_device, 'sda2'])
+ self.assertEqual(set([self.existing_device]), devices)
+
+ # With a non matching filter, returns nothing
+ devices = relinker.devices_filter('none', "", [self.existing_device])
+ self.assertEqual(set(), devices)
+
+ def test_hook_pre_post_device_locking(self):
+ locks = [None]
+ device_path = os.path.join(self.devices, self.existing_device)
+ datadir = 'object'
+ lock_file = os.path.join(device_path, '.relink.%s.lock' % datadir)
+
+ # The first run gets the lock
+ relinker.hook_pre_device(locks, {}, datadir, device_path)
+ self.assertNotEqual([None], locks)
+
+ # A following run would block
+ with self.assertRaises(IOError) as raised:
+ with open(lock_file, 'a') as f:
+ fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
+ self.assertEqual(errno.EAGAIN, raised.exception.errno)
+
+ # Another must not get the lock, so it must return an empty list
+ relinker.hook_post_device(locks, "")
+ self.assertEqual([None], locks)
+
+ with open(lock_file, 'a') as f:
+ fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+ def test_state_file(self):
+ device_path = os.path.join(self.devices, self.existing_device)
+ datadir = 'objects'
+ datadir_path = os.path.join(device_path, datadir)
+ state_file = os.path.join(device_path, 'relink.%s.json' % datadir)
+
+ def call_partition_filter(step, parts):
+ # Partition 312 will be ignored because it must have been created
+ # by the relinker
+ return relinker.partitions_filter(states, step,
+ PART_POWER, PART_POWER + 1,
+ datadir_path, parts)
+
+ # Start relinking
+ states = {}
+
+ # Load the states: As it starts, it must be empty
+ locks = [None]
+ relinker.hook_pre_device(locks, states, datadir, device_path)
+ self.assertEqual({}, states)
+ os.close(locks[0]) # Release the lock
+
+ # Partition 312 is ignored because it must have been created with the
+ # next_part_power, so it does not need to be relinked
+ # 96 and 227 are reverse ordered
+ # auditor_status_ALL.json is ignored because it's not a partition
+ self.assertEqual(['227', '96'],
+ call_partition_filter(relinker.STEP_RELINK,
+ ['96', '227', '312',
+ 'auditor_status.json']))
+ self.assertEqual(states, {'96': [False, False], '227': [False, False]})
+
+ # Ack partition 96
+ relinker.hook_post_partition(states, relinker.STEP_RELINK,
+ os.path.join(datadir_path, '96'))
+ self.assertEqual(states, {'96': [True, False], '227': [False, False]})
+ with open(state_file, 'rt') as f:
+ self.assertEqual(json.load(f), {'96': [True, False],
+ '227': [False, False]})
+
+ # Restart relinking after only part 96 was done
+ self.assertEqual(['227'],
+ call_partition_filter(relinker.STEP_RELINK,
+ ['96', '227', '312']))
+ self.assertEqual(states, {'96': [True, False], '227': [False, False]})
+
+ # Ack partition 227
+ relinker.hook_post_partition(states, relinker.STEP_RELINK,
+ os.path.join(datadir_path, '227'))
+ self.assertEqual(states, {'96': [True, False], '227': [True, False]})
+ with open(state_file, 'rt') as f:
+ self.assertEqual(json.load(f), {'96': [True, False],
+ '227': [True, False]})
+
+ # If the process restarts, it reload the state
+ locks = [None]
+ states = {}
+ relinker.hook_pre_device(locks, states, datadir, device_path)
+ self.assertEqual(states, {'96': [True, False], '227': [True, False]})
+ os.close(locks[0]) # Release the lock
+
+ # Start cleanup
+ self.assertEqual(['227', '96'],
+ call_partition_filter(relinker.STEP_CLEANUP,
+ ['96', '227', '312']))
+ # Ack partition 227
+ relinker.hook_post_partition(states, relinker.STEP_CLEANUP,
+ os.path.join(datadir_path, '227'))
+ self.assertEqual(states, {'96': [True, False], '227': [True, True]})
+ with open(state_file, 'rt') as f:
+ self.assertEqual(json.load(f), {'96': [True, False],
+ '227': [True, True]})
+
+ # Restart cleanup after only part 227 was done
+ self.assertEqual(['96'],
+ call_partition_filter(relinker.STEP_CLEANUP,
+ ['96', '227', '312']))
+ self.assertEqual(states, {'96': [True, False], '227': [True, True]})
+
+ # Ack partition 96
+ relinker.hook_post_partition(states, relinker.STEP_CLEANUP,
+ os.path.join(datadir_path, '96'))
+ self.assertEqual(states, {'96': [True, True], '227': [True, True]})
+ with open(state_file, 'rt') as f:
+ self.assertEqual(json.load(f), {'96': [True, True],
+ '227': [True, True]})
+
+ # At the end, the state is still accurate
+ locks = [None]
+ states = {}
+ relinker.hook_pre_device(locks, states, datadir, device_path)
+ self.assertEqual(states, {'96': [True, True], '227': [True, True]})
+ os.close(locks[0]) # Release the lock
+
+ # If the file gets corrupted, restart from scratch
+ with open(state_file, 'wt') as f:
+ f.write('NOT JSON')
+ locks = [None]
+ states = {}
+ relinker.hook_pre_device(locks, states, datadir, device_path)
+ self.assertEqual(states, {})
+ os.close(locks[0]) # Release the lock
+
def test_cleanup_not_yet_relinked(self):
self._common_test_cleanup(relink=False)
self.assertEqual(1, relinker.cleanup(self.testdir, self.devices, True))
@@ -176,3 +384,7 @@ class TestRelinker(unittest.TestCase):
self.assertIn('failed audit and was quarantined',
self.logger.get_lines_for_level('warning')[0])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unit/common/middleware/s3api/test_obj.py b/test/unit/common/middleware/s3api/test_obj.py
index 363a1b2cb..3b80cc355 100644
--- a/test/unit/common/middleware/s3api/test_obj.py
+++ b/test/unit/common/middleware/s3api/test_obj.py
@@ -34,7 +34,7 @@ from swift.common.middleware.s3api.subresource import ACL, User, encode_acl, \
from swift.common.middleware.s3api.etree import fromstring
from swift.common.middleware.s3api.utils import mktime, S3Timestamp
from swift.common.middleware.versioned_writes.object_versioning import \
- DELETE_MARKER_CONTENT_TYPE
+ DELETE_MARKER_CONTENT_TYPE, SYSMETA_VERSIONS_CONT, SYSMETA_VERSIONS_ENABLED
class TestS3ApiObj(S3ApiTestCase):
@@ -402,6 +402,10 @@ class TestS3ApiObj(S3ApiTestCase):
@s3acl
def test_object_GET_version_id(self):
+ self.swift.register(
+ 'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent,
+ {SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket'}, None)
+
# GET current version
req = Request.blank('/bucket/object?versionId=null',
environ={'REQUEST_METHOD': 'GET'},
@@ -452,6 +456,28 @@ class TestS3ApiObj(S3ApiTestCase):
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '404')
+ @s3acl(versioning_enabled=False)
+ def test_object_GET_with_version_id_but_not_enabled(self):
+ # Version not found
+ self.swift.register(
+ 'HEAD', '/v1/AUTH_test/bucket',
+ swob.HTTPNoContent, {}, None)
+ req = Request.blank('/bucket/object?versionId=A',
+ environ={'REQUEST_METHOD': 'GET'},
+ headers={'Authorization': 'AWS test:tester:hmac',
+ 'Date': self.get_date_header()})
+ status, headers, body = self.call_s3api(req)
+ self.assertEqual(status.split()[0], '404')
+ elem = fromstring(body, 'Error')
+ self.assertEqual(elem.find('Code').text, 'NoSuchVersion')
+ self.assertEqual(elem.find('Key').text, 'object')
+ self.assertEqual(elem.find('VersionId').text, 'A')
+ expected_calls = []
+ if not self.swift.s3_acl:
+ expected_calls.append(('HEAD', '/v1/AUTH_test/bucket'))
+ # NB: No actual backend GET!
+ self.assertEqual(expected_calls, self.swift.calls)
+
@s3acl
def test_object_PUT_error(self):
code = self._test_method_error('PUT', '/bucket/object',
@@ -1100,6 +1126,9 @@ class TestS3ApiObj(S3ApiTestCase):
def test_object_DELETE_old_version_id(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers, None)
+ self.swift.register(
+ 'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent,
+ {SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket'}, None)
resp_headers = {'X-Object-Current-Version-Id': '1574360804.34906'}
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object'
'?symlink=get&version-id=1574358170.12293',
@@ -1111,6 +1140,7 @@ class TestS3ApiObj(S3ApiTestCase):
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertEqual([
+ ('HEAD', '/v1/AUTH_test/bucket'),
('HEAD', '/v1/AUTH_test/bucket/object'
'?symlink=get&version-id=1574358170.12293'),
('DELETE', '/v1/AUTH_test/bucket/object'
@@ -1118,6 +1148,11 @@ class TestS3ApiObj(S3ApiTestCase):
], self.swift.calls)
def test_object_DELETE_current_version_id(self):
+ self.swift.register(
+ 'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
+ SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
+ SYSMETA_VERSIONS_ENABLED: True},
+ None)
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers, None)
resp_headers = {'X-Object-Current-Version-Id': 'null'}
@@ -1142,6 +1177,7 @@ class TestS3ApiObj(S3ApiTestCase):
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertEqual([
+ ('HEAD', '/v1/AUTH_test/bucket'),
('HEAD', '/v1/AUTH_test/bucket/object'
'?symlink=get&version-id=1574358170.12293'),
('DELETE', '/v1/AUTH_test/bucket/object'
@@ -1152,6 +1188,22 @@ class TestS3ApiObj(S3ApiTestCase):
'?version-id=1574341899.21751'),
], self.swift.calls)
+ @s3acl(versioning_enabled=False)
+ def test_object_DELETE_with_version_id_but_not_enabled(self):
+ self.swift.register('HEAD', '/v1/AUTH_test/bucket',
+ swob.HTTPNoContent, {}, None)
+ req = Request.blank('/bucket/object?versionId=1574358170.12293',
+ method='DELETE', headers={
+ 'Authorization': 'AWS test:tester:hmac',
+ 'Date': self.get_date_header()})
+ status, headers, body = self.call_s3api(req)
+ self.assertEqual(status.split()[0], '204')
+ expected_calls = []
+ if not self.swift.s3_acl:
+ expected_calls.append(('HEAD', '/v1/AUTH_test/bucket'))
+ # NB: No actual backend DELETE!
+ self.assertEqual(expected_calls, self.swift.calls)
+
def test_object_DELETE_version_id_not_implemented(self):
req = Request.blank('/bucket/object?versionId=1574358170.12293',
method='DELETE', headers={
@@ -1164,6 +1216,11 @@ class TestS3ApiObj(S3ApiTestCase):
self.assertEqual(status.split()[0], '501', body)
def test_object_DELETE_current_version_id_is_delete_marker(self):
+ self.swift.register(
+ 'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
+ SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
+ SYSMETA_VERSIONS_ENABLED: True},
+ None)
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers, None)
resp_headers = {'X-Object-Current-Version-Id': 'null'}
@@ -1184,6 +1241,7 @@ class TestS3ApiObj(S3ApiTestCase):
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertEqual([
+ ('HEAD', '/v1/AUTH_test/bucket'),
('HEAD', '/v1/AUTH_test/bucket/object'
'?symlink=get&version-id=1574358170.12293'),
('DELETE', '/v1/AUTH_test/bucket/object'
@@ -1193,6 +1251,11 @@ class TestS3ApiObj(S3ApiTestCase):
], self.swift.calls)
def test_object_DELETE_current_version_id_is_missing(self):
+ self.swift.register(
+ 'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
+ SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
+ SYSMETA_VERSIONS_ENABLED: True},
+ None)
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers, None)
resp_headers = {'X-Object-Current-Version-Id': 'null'}
@@ -1223,6 +1286,7 @@ class TestS3ApiObj(S3ApiTestCase):
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertEqual([
+ ('HEAD', '/v1/AUTH_test/bucket'),
('HEAD', '/v1/AUTH_test/bucket/object'
'?symlink=get&version-id=1574358170.12293'),
('DELETE', '/v1/AUTH_test/bucket/object'
@@ -1236,6 +1300,11 @@ class TestS3ApiObj(S3ApiTestCase):
], self.swift.calls)
def test_object_DELETE_current_version_id_GET_error(self):
+ self.swift.register(
+ 'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
+ SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
+ SYSMETA_VERSIONS_ENABLED: True},
+ None)
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers, None)
resp_headers = {'X-Object-Current-Version-Id': 'null'}
@@ -1251,6 +1320,7 @@ class TestS3ApiObj(S3ApiTestCase):
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual([
+ ('HEAD', '/v1/AUTH_test/bucket'),
('HEAD', '/v1/AUTH_test/bucket/object'
'?symlink=get&version-id=1574358170.12293'),
('DELETE', '/v1/AUTH_test/bucket/object'
@@ -1260,6 +1330,11 @@ class TestS3ApiObj(S3ApiTestCase):
], self.swift.calls)
def test_object_DELETE_current_version_id_PUT_error(self):
+ self.swift.register(
+ 'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
+ SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
+ SYSMETA_VERSIONS_ENABLED: True},
+ None)
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, self.response_headers, None)
resp_headers = {'X-Object-Current-Version-Id': 'null'}
@@ -1283,6 +1358,7 @@ class TestS3ApiObj(S3ApiTestCase):
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '500')
self.assertEqual([
+ ('HEAD', '/v1/AUTH_test/bucket'),
('HEAD', '/v1/AUTH_test/bucket/object'
'?symlink=get&version-id=1574358170.12293'),
('DELETE', '/v1/AUTH_test/bucket/object'
@@ -1325,10 +1401,13 @@ class TestS3ApiObj(S3ApiTestCase):
'X-Object-Version-Id': '1574701081.61553'}
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object',
swob.HTTPNoContent, resp_headers, None)
- self.swift.register('HEAD', '/v1/AUTH_test/bucket',
- swob.HTTPNoContent, {
- 'X-Container-Sysmeta-Versions-Enabled': True},
- None)
+ self.swift.register(
+ 'HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, {
+ SYSMETA_VERSIONS_CONT: '\x00versions\x00bucket',
+ SYSMETA_VERSIONS_ENABLED: True},
+ None)
+ self.swift.register('HEAD', '/v1/AUTH_test/\x00versions\x00bucket',
+ swob.HTTPNoContent, {}, None)
self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
swob.HTTPNotFound, self.response_headers, None)
req = Request.blank('/bucket/object?versionId=1574701081.61553',
@@ -1338,10 +1417,12 @@ class TestS3ApiObj(S3ApiTestCase):
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '204')
self.assertEqual([
+ ('HEAD', '/v1/AUTH_test/bucket'),
('HEAD', '/v1/AUTH_test/bucket/object'
'?symlink=get&version-id=1574701081.61553'),
('HEAD', '/v1/AUTH_test'),
('HEAD', '/v1/AUTH_test/bucket'),
+ ('HEAD', '/v1/AUTH_test/\x00versions\x00bucket'),
('DELETE', '/v1/AUTH_test/bucket/object'
'?symlink=get&version-id=1574701081.61553'),
], self.swift.calls)
diff --git a/test/unit/common/middleware/s3api/test_s3_acl.py b/test/unit/common/middleware/s3api/test_s3_acl.py
index 48f543916..da0472a66 100644
--- a/test/unit/common/middleware/s3api/test_s3_acl.py
+++ b/test/unit/common/middleware/s3api/test_s3_acl.py
@@ -34,13 +34,16 @@ from test.unit.common.middleware.s3api import FakeSwift
XMLNS_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
-def s3acl(func=None, s3acl_only=False):
+def s3acl(func=None, s3acl_only=False, versioning_enabled=True):
"""
NOTE: s3acl decorator needs an instance of s3api testing framework.
(i.e. An instance for first argument is necessary)
"""
if func is None:
- return functools.partial(s3acl, s3acl_only=s3acl_only)
+ return functools.partial(
+ s3acl,
+ s3acl_only=s3acl_only,
+ versioning_enabled=versioning_enabled)
@functools.wraps(func)
def s3acl_decorator(*args, **kwargs):
@@ -57,9 +60,14 @@ def s3acl(func=None, s3acl_only=False):
# @patch(xxx)
# def test_xxxx(self)
+ fake_info = {'status': 204}
+ if versioning_enabled:
+ fake_info['sysmeta'] = {
+ 'versions-container': '\x00versions\x00bucket',
+ }
+
with patch('swift.common.middleware.s3api.s3request.'
- 'get_container_info',
- return_value={'status': 204}):
+ 'get_container_info', return_value=fake_info):
func(*args, **kwargs)
except AssertionError:
# Make traceback message to clarify the assertion
diff --git a/test/unit/common/middleware/test_ratelimit.py b/test/unit/common/middleware/test_ratelimit.py
index 2070af2d6..10042d351 100644
--- a/test/unit/common/middleware/test_ratelimit.py
+++ b/test/unit/common/middleware/test_ratelimit.py
@@ -72,12 +72,20 @@ class FakeMemcache(object):
class FakeApp(object):
+ skip_handled_check = False
def __call__(self, env, start_response):
+ assert self.skip_handled_check or env.get('swift.ratelimit.handled')
start_response('200 OK', [])
return [b'Some Content']
+class FakeReq(object):
+ def __init__(self, method, env=None):
+ self.method = method
+ self.environ = env or {}
+
+
def start_response(*args):
pass
@@ -160,36 +168,29 @@ class TestRateLimit(unittest.TestCase):
{'object_count': '5'}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
- req = lambda: None
- req.environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
+ environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
- req.method = 'DELETE'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
- req, 'a', None, None)), 0)
- req.method = 'PUT'
+ FakeReq('DELETE', environ), 'a', None, None)), 0)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
- req, 'a', 'c', None)), 1)
- req.method = 'DELETE'
+ FakeReq('PUT', environ), 'a', 'c', None)), 1)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
- req, 'a', 'c', None)), 1)
- req.method = 'GET'
+ FakeReq('DELETE', environ), 'a', 'c', None)), 1)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
- req, 'a', 'c', 'o')), 0)
- req.method = 'PUT'
+ FakeReq('GET', environ), 'a', 'c', 'o')), 0)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
- req, 'a', 'c', 'o')), 1)
+ FakeReq('PUT', environ), 'a', 'c', 'o')), 1)
- req.method = 'PUT'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
- req, 'a', 'c', None, global_ratelimit=10)), 2)
+ FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)), 2)
self.assertEqual(the_app.get_ratelimitable_key_tuples(
- req, 'a', 'c', None, global_ratelimit=10)[1],
+ FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)[1],
('ratelimit/global-write/a', 10))
- req.method = 'PUT'
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
- req, 'a', 'c', None, global_ratelimit='notafloat')), 1)
+ FakeReq('PUT', environ), 'a', 'c', None,
+ global_ratelimit='notafloat')), 1)
def test_memcached_container_info_dict(self):
mdict = headers_to_container_info({'x-container-object-count': '45'})
@@ -204,9 +205,8 @@ class TestRateLimit(unittest.TestCase):
{'container_size': 5}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
- req = lambda: None
- req.method = 'PUT'
- req.environ = {'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache}
+ req = FakeReq('PUT', {
+ 'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache})
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
tuples = the_app.get_ratelimitable_key_tuples(req, 'a', 'c', 'o')
@@ -227,8 +227,8 @@ class TestRateLimit(unittest.TestCase):
req = Request.blank('/v1/a%s/c' % meth)
req.method = meth
req.environ['swift.cache'] = FakeMemcache()
- make_app_call = lambda: self.test_ratelimit(req.environ,
- start_response)
+ make_app_call = lambda: self.test_ratelimit(
+ req.environ.copy(), start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate,
check_time=bool(exp_time))
@@ -244,7 +244,7 @@ class TestRateLimit(unittest.TestCase):
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].init_incr_return_neg = True
- make_app_call = lambda: self.test_ratelimit(req.environ,
+ make_app_call = lambda: self.test_ratelimit(req.environ.copy(),
start_response)
begin = time.time()
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
@@ -260,15 +260,15 @@ class TestRateLimit(unittest.TestCase):
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
- req = Request.blank('/')
with mock.patch.object(self.test_ratelimit,
'memcache_client', FakeMemcache()):
self.assertEqual(
- self.test_ratelimit.handle_ratelimit(req, 'a', 'c', 'o'),
+ self.test_ratelimit.handle_ratelimit(
+ Request.blank('/'), 'a', 'c', 'o'),
None)
self.assertEqual(
self.test_ratelimit.handle_ratelimit(
- req, 'b', 'c', 'o').status_int,
+ Request.blank('/'), 'b', 'c', 'o').status_int,
497)
def test_ratelimit_whitelist_sysmeta(self):
@@ -331,7 +331,7 @@ class TestRateLimit(unittest.TestCase):
self.parent = parent
def run(self):
- self.result = self.parent.test_ratelimit(req.environ,
+ self.result = self.parent.test_ratelimit(req.environ.copy(),
start_response)
def get_fake_ratelimit(*args, **kwargs):
@@ -370,18 +370,17 @@ class TestRateLimit(unittest.TestCase):
# simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
- print(repr(r))
+ r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content')
def test_ratelimit_max_rate_double_container(self):
@@ -404,17 +403,17 @@ class TestRateLimit(unittest.TestCase):
# simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content')
def test_ratelimit_max_rate_double_container_listing(self):
@@ -437,17 +436,17 @@ class TestRateLimit(unittest.TestCase):
lambda *args, **kwargs: {}):
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
- r = self.test_ratelimit(req.environ, start_response)
+ r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content')
mc = self.test_ratelimit.memcache_client
try:
@@ -466,9 +465,6 @@ class TestRateLimit(unittest.TestCase):
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
- req = lambda: None
- req.method = 'PUT'
- req.environ = {}
class rate_caller(threading.Thread):
@@ -478,8 +474,8 @@ class TestRateLimit(unittest.TestCase):
def run(self):
for j in range(num_calls):
- self.result = the_app.handle_ratelimit(req, self.myname,
- 'c', None)
+ self.result = the_app.handle_ratelimit(
+ FakeReq('PUT'), self.myname, 'c', None)
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
@@ -541,7 +537,9 @@ class TestRateLimit(unittest.TestCase):
current_rate = 13
num_calls = 5
conf_dict = {'account_ratelimit': current_rate}
- self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
+ fake_app = FakeApp()
+ fake_app.skip_handled_check = True
+ self.test_ratelimit = ratelimit.filter_factory(conf_dict)(fake_app)
req = Request.blank('/v1/a')
req.environ['swift.cache'] = None
make_app_call = lambda: self.test_ratelimit(req.environ,
@@ -551,6 +549,24 @@ class TestRateLimit(unittest.TestCase):
time_took = time.time() - begin
self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting
+ def test_already_handled(self):
+ current_rate = 13
+ num_calls = 5
+ conf_dict = {'container_listing_ratelimit_0': current_rate}
+ self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
+ fake_cache = FakeMemcache()
+ fake_cache.set(
+ get_cache_key('a', 'c'),
+ {'object_count': 1})
+ req = Request.blank('/v1/a/c', environ={'swift.cache': fake_cache})
+ req.environ['swift.ratelimit.handled'] = True
+ make_app_call = lambda: self.test_ratelimit(req.environ,
+ start_response)
+ begin = time.time()
+ self._run(make_app_call, num_calls, current_rate, check_time=False)
+ time_took = time.time() - begin
+ self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting
+
def test_restarting_memcache(self):
current_rate = 2
num_calls = 5
diff --git a/test/unit/common/middleware/test_symlink.py b/test/unit/common/middleware/test_symlink.py
index 01875ba48..a5e6cbab4 100644
--- a/test/unit/common/middleware/test_symlink.py
+++ b/test/unit/common/middleware/test_symlink.py
@@ -24,6 +24,7 @@ from swift.common import swob
from swift.common.middleware import symlink, copy, versioned_writes, \
listing_formats
from swift.common.swob import Request
+from swift.common.request_helpers import get_reserved_name
from swift.common.utils import MD5_OF_EMPTY_STRING, get_swift_info
from test.unit.common.middleware.helpers import FakeSwift
from test.unit.common.middleware.test_versioned_writes import FakeCache
@@ -618,6 +619,55 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
self.assertEqual(req_headers, calls[1].headers)
self.assertFalse(calls[2:])
+ def test_get_symlink_to_reserved_object(self):
+ cont = get_reserved_name('versioned')
+ obj = get_reserved_name('symlink', '9999998765.99999')
+ symlink_target = "%s/%s" % (cont, obj)
+ version_path = '/v1/a/%s' % symlink_target
+ self.app.register('GET', '/v1/a/versioned/symlink', swob.HTTPOk, {
+ symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: symlink_target,
+ symlink.ALLOW_RESERVED_NAMES: 'true',
+ 'x-object-sysmeta-symlink-target-etag': MD5_OF_EMPTY_STRING,
+ 'x-object-sysmeta-symlink-target-bytes': '0',
+ })
+ self.app.register('GET', version_path, swob.HTTPOk, {})
+ req = Request.blank('/v1/a/versioned/symlink', headers={
+ 'Range': 'foo', 'If-Match': 'bar'})
+ status, headers, body = self.call_sym(req)
+ self.assertEqual(status, '200 OK')
+ self.assertIn(('Content-Location', version_path), headers)
+ self.assertEqual(len(self.authorized), 1)
+ self.assertNotIn('X-Backend-Allow-Reserved-Names',
+ self.app.calls_with_headers[0])
+ call_headers = self.app.calls_with_headers[1].headers
+ self.assertEqual('true', call_headers[
+ 'X-Backend-Allow-Reserved-Names'])
+ self.assertEqual('foo', call_headers['Range'])
+ self.assertEqual('bar', call_headers['If-Match'])
+
+ def test_get_symlink_to_reserved_symlink(self):
+ cont = get_reserved_name('versioned')
+ obj = get_reserved_name('symlink', '9999998765.99999')
+ symlink_target = "%s/%s" % (cont, obj)
+ version_path = '/v1/a/%s' % symlink_target
+ self.app.register('GET', '/v1/a/versioned/symlink', swob.HTTPOk, {
+ symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: symlink_target,
+ symlink.ALLOW_RESERVED_NAMES: 'true',
+ 'x-object-sysmeta-symlink-target-etag': MD5_OF_EMPTY_STRING,
+ 'x-object-sysmeta-symlink-target-bytes': '0',
+ })
+ self.app.register('GET', version_path, swob.HTTPOk, {
+ symlink.TGT_OBJ_SYSMETA_SYMLINK_HDR: 'unversioned/obj',
+ 'ETag': MD5_OF_EMPTY_STRING,
+ })
+ self.app.register('GET', '/v1/a/unversioned/obj', swob.HTTPOk, {
+ })
+ req = Request.blank('/v1/a/versioned/symlink')
+ status, headers, body = self.call_sym(req)
+ self.assertEqual(status, '200 OK')
+ self.assertIn(('Content-Location', '/v1/a/unversioned/obj'), headers)
+ self.assertEqual(len(self.authorized), 2)
+
def test_symlink_too_deep(self):
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
{'X-Object-Sysmeta-Symlink-Target': 'c/sym1'})
diff --git a/test/unit/common/test_db.py b/test/unit/common/test_db.py
index eac9f2394..0f3a308dc 100644
--- a/test/unit/common/test_db.py
+++ b/test/unit/common/test_db.py
@@ -1154,7 +1154,7 @@ class TestDatabaseBroker(unittest.TestCase):
return broker
# only testing _reclaim_metadata here
- @patch.object(DatabaseBroker, '_reclaim')
+ @patch.object(DatabaseBroker, '_reclaim', return_value='')
def test_metadata(self, mock_reclaim):
# Initializes a good broker for us
broker = self.get_replication_info_tester(metadata=True)
diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py
index 26cfab555..61ad6082d 100644
--- a/test/unit/common/test_memcached.py
+++ b/test/unit/common/test_memcached.py
@@ -20,6 +20,7 @@ from collections import defaultdict
import errno
from hashlib import md5
import io
+import logging
import six
import socket
import time
@@ -184,9 +185,14 @@ class TestMemcached(unittest.TestCase):
def setUp(self):
self.logger = debug_logger()
- patcher = mock.patch('swift.common.memcached.logging', self.logger)
- self.addCleanup(patcher.stop)
- patcher.start()
+
+ def test_logger_kwarg(self):
+ server_socket = '%s:%s' % ('[::1]', 11211)
+ client = memcached.MemcacheRing([server_socket])
+ self.assertIs(client.logger, logging.getLogger())
+
+ client = memcached.MemcacheRing([server_socket], logger=self.logger)
+ self.assertIs(client.logger, self.logger)
def test_get_conns(self):
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -202,7 +208,8 @@ class TestMemcached(unittest.TestCase):
sock2ipport = '%s:%s' % (sock2ip, memcached.DEFAULT_MEMCACHED_PORT)
# We're deliberately using sock2ip (no port) here to test that the
# default port is used.
- memcache_client = memcached.MemcacheRing([sock1ipport, sock2ip])
+ memcache_client = memcached.MemcacheRing([sock1ipport, sock2ip],
+ logger=self.logger)
one = two = True
while one or two: # Run until we match hosts one and two
key = uuid4().hex.encode('ascii')
@@ -230,7 +237,8 @@ class TestMemcached(unittest.TestCase):
sock.listen(1)
sock_addr = sock.getsockname()
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
- memcache_client = memcached.MemcacheRing([server_socket])
+ memcache_client = memcached.MemcacheRing([server_socket],
+ logger=self.logger)
key = uuid4().hex.encode('ascii')
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
@@ -251,7 +259,8 @@ class TestMemcached(unittest.TestCase):
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
server_host = '[%s]' % sock_addr[0]
memcached.DEFAULT_MEMCACHED_PORT = sock_addr[1]
- memcache_client = memcached.MemcacheRing([server_host])
+ memcache_client = memcached.MemcacheRing([server_host],
+ logger=self.logger)
key = uuid4().hex.encode('ascii')
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
@@ -265,7 +274,7 @@ class TestMemcached(unittest.TestCase):
with self.assertRaises(ValueError):
# IPv6 address with missing [] is invalid
server_socket = '%s:%s' % ('::1', 11211)
- memcached.MemcacheRing([server_socket])
+ memcached.MemcacheRing([server_socket], logger=self.logger)
def test_get_conns_hostname(self):
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
@@ -279,7 +288,8 @@ class TestMemcached(unittest.TestCase):
addrinfo.return_value = [(socket.AF_INET,
socket.SOCK_STREAM, 0, '',
('127.0.0.1', sock_addr[1]))]
- memcache_client = memcached.MemcacheRing([server_socket])
+ memcache_client = memcached.MemcacheRing([server_socket],
+ logger=self.logger)
key = uuid4().hex.encode('ascii')
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
@@ -304,7 +314,8 @@ class TestMemcached(unittest.TestCase):
addrinfo.return_value = [(socket.AF_INET6,
socket.SOCK_STREAM, 0, '',
('::1', sock_addr[1]))]
- memcache_client = memcached.MemcacheRing([server_socket])
+ memcache_client = memcached.MemcacheRing([server_socket],
+ logger=self.logger)
key = uuid4().hex.encode('ascii')
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
@@ -317,7 +328,8 @@ class TestMemcached(unittest.TestCase):
sock.close()
def test_set_get_json(self):
- memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
+ memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
+ logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
@@ -350,7 +362,8 @@ class TestMemcached(unittest.TestCase):
self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1)
def test_get_failed_connection_mid_request(self):
- memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
+ memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
+ logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
@@ -365,18 +378,17 @@ class TestMemcached(unittest.TestCase):
# force the logging through the DebugLogger instead of the nose
# handler. This will use stdout, so we can assert that no stack trace
# is logged.
- logger = debug_logger()
- with patch("sys.stdout", fake_stdout),\
- patch('swift.common.memcached.logging', logger):
+ with patch("sys.stdout", fake_stdout):
mock.read_return_empty_str = True
self.assertIsNone(memcache_client.get('some_key'))
- log_lines = logger.get_lines_for_level('error')
+ log_lines = self.logger.get_lines_for_level('error')
self.assertIn('Error talking to memcached', log_lines[0])
self.assertFalse(log_lines[1:])
self.assertNotIn("Traceback", fake_stdout.getvalue())
def test_incr(self):
- memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
+ memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
+ logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
@@ -396,7 +408,8 @@ class TestMemcached(unittest.TestCase):
self.assertTrue(mock.close_called)
def test_incr_failed_connection_mid_request(self):
- memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
+ memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
+ logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
@@ -411,19 +424,18 @@ class TestMemcached(unittest.TestCase):
# force the logging through the DebugLogger instead of the nose
# handler. This will use stdout, so we can assert that no stack trace
# is logged.
- logger = debug_logger()
- with patch("sys.stdout", fake_stdout), \
- patch('swift.common.memcached.logging', logger):
+ with patch("sys.stdout", fake_stdout):
mock.read_return_empty_str = True
self.assertRaises(memcached.MemcacheConnectionError,
memcache_client.incr, 'some_key', delta=1)
- log_lines = logger.get_lines_for_level('error')
+ log_lines = self.logger.get_lines_for_level('error')
self.assertIn('Error talking to memcached', log_lines[0])
self.assertFalse(log_lines[1:])
self.assertNotIn('Traceback', fake_stdout.getvalue())
def test_incr_w_timeout(self):
- memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
+ memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
+ logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
@@ -455,7 +467,8 @@ class TestMemcached(unittest.TestCase):
self.assertEqual(mock.cache, {cache_key: (b'0', b'0', b'10')})
def test_decr(self):
- memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
+ memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
+ logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
@@ -473,7 +486,7 @@ class TestMemcached(unittest.TestCase):
def test_retry(self):
memcache_client = memcached.MemcacheRing(
- ['1.2.3.4:11211', '1.2.3.5:11211'])
+ ['1.2.3.4:11211', '1.2.3.5:11211'], logger=self.logger)
mock1 = ExplodingMockMemcached()
mock2 = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
@@ -500,7 +513,8 @@ class TestMemcached(unittest.TestCase):
[])
def test_delete(self):
- memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
+ memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
+ logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
@@ -510,7 +524,8 @@ class TestMemcached(unittest.TestCase):
self.assertIsNone(memcache_client.get('some_key'))
def test_multi(self):
- memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
+ memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
+ logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
@@ -560,7 +575,8 @@ class TestMemcached(unittest.TestCase):
def test_multi_delete(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211',
- '1.2.3.5:11211'])
+ '1.2.3.5:11211'],
+ logger=self.logger)
mock1 = MockMemcached()
mock2 = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
@@ -598,7 +614,8 @@ class TestMemcached(unittest.TestCase):
def test_serialization(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
- allow_pickle=True)
+ allow_pickle=True,
+ logger=self.logger)
mock = MockMemcached()
memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool(
[(mock, mock)] * 2)
@@ -643,7 +660,8 @@ class TestMemcached(unittest.TestCase):
mock_sock.connect = wait_connect
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
- connect_timeout=10)
+ connect_timeout=10,
+ logger=self.logger)
# sanity
self.assertEqual(1, len(memcache_client._client_cache))
for server, pool in memcache_client._client_cache.items():
@@ -702,7 +720,8 @@ class TestMemcached(unittest.TestCase):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211',
'1.2.3.5:11211'],
io_timeout=0.5,
- pool_timeout=0.1)
+ pool_timeout=0.1,
+ logger=self.logger)
# Hand out a couple slow connections to 1.2.3.5, leaving 1.2.3.4
# fast. All ten (10) clients should try to talk to .5 first, and
diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py
index 2d645234a..ccbe6d2f9 100644
--- a/test/unit/common/test_swob.py
+++ b/test/unit/common/test_swob.py
@@ -914,6 +914,22 @@ class TestRequest(unittest.TestCase):
self.assertEqual(used_req[0].path, '/hi/there')
self.assertEqual(resp.status_int, 200)
+ def test_wsgify_method(self):
+ class _wsgi_class(object):
+ def __init__(self):
+ self.used_req = []
+
+ @swob.wsgify
+ def __call__(self, req):
+ self.used_req.append(req)
+ return swob.Response(b'200 OK')
+
+ req = swob.Request.blank('/hi/there')
+ handler = _wsgi_class()
+ resp = req.get_response(handler)
+ self.assertIs(handler.used_req[0].environ, req.environ)
+ self.assertEqual(resp.status_int, 200)
+
def test_wsgify_raise(self):
used_req = []
diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py
index b91be1329..26f70656b 100644
--- a/test/unit/common/test_utils.py
+++ b/test/unit/common/test_utils.py
@@ -6080,6 +6080,136 @@ class TestAuditLocationGenerator(unittest.TestCase):
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
+ def test_hooks(self):
+ with temptree([]) as tmpdir:
+ logger = FakeLogger()
+ data = os.path.join(tmpdir, "drive", "data")
+ os.makedirs(data)
+ partition = os.path.join(data, "partition1")
+ os.makedirs(partition)
+ suffix = os.path.join(partition, "suffix1")
+ os.makedirs(suffix)
+ hash_path = os.path.join(suffix, "hash1")
+ os.makedirs(hash_path)
+ obj_path = os.path.join(hash_path, "obj1.dat")
+ with open(obj_path, "w"):
+ pass
+ meta_path = os.path.join(hash_path, "obj1.meta")
+ with open(meta_path, "w"):
+ pass
+ hook_pre_device = MagicMock()
+ hook_post_device = MagicMock()
+ hook_pre_partition = MagicMock()
+ hook_post_partition = MagicMock()
+ hook_pre_suffix = MagicMock()
+ hook_post_suffix = MagicMock()
+ hook_pre_hash = MagicMock()
+ hook_post_hash = MagicMock()
+ locations = utils.audit_location_generator(
+ tmpdir, "data", ".dat", mount_check=False, logger=logger,
+ hook_pre_device=hook_pre_device,
+ hook_post_device=hook_post_device,
+ hook_pre_partition=hook_pre_partition,
+ hook_post_partition=hook_post_partition,
+ hook_pre_suffix=hook_pre_suffix,
+ hook_post_suffix=hook_post_suffix,
+ hook_pre_hash=hook_pre_hash,
+ hook_post_hash=hook_post_hash
+ )
+ list(locations)
+ hook_pre_device.assert_called_once_with(os.path.join(tmpdir,
+ "drive"))
+ hook_post_device.assert_called_once_with(os.path.join(tmpdir,
+ "drive"))
+ hook_pre_partition.assert_called_once_with(partition)
+ hook_post_partition.assert_called_once_with(partition)
+ hook_pre_suffix.assert_called_once_with(suffix)
+ hook_post_suffix.assert_called_once_with(suffix)
+ hook_pre_hash.assert_called_once_with(hash_path)
+ hook_post_hash.assert_called_once_with(hash_path)
+
+ def test_filters(self):
+ with temptree([]) as tmpdir:
+ logger = FakeLogger()
+ data = os.path.join(tmpdir, "drive", "data")
+ os.makedirs(data)
+ partition = os.path.join(data, "partition1")
+ os.makedirs(partition)
+ suffix = os.path.join(partition, "suffix1")
+ os.makedirs(suffix)
+ hash_path = os.path.join(suffix, "hash1")
+ os.makedirs(hash_path)
+ obj_path = os.path.join(hash_path, "obj1.dat")
+ with open(obj_path, "w"):
+ pass
+ meta_path = os.path.join(hash_path, "obj1.meta")
+ with open(meta_path, "w"):
+ pass
+
+ def audit_location_generator(**kwargs):
+ return utils.audit_location_generator(
+ tmpdir, "data", ".dat", mount_check=False, logger=logger,
+ **kwargs)
+
+ # Return the list of devices
+
+ with patch('os.listdir', side_effect=os.listdir) as m_listdir:
+ # devices_filter
+ m_listdir.reset_mock()
+ devices_filter = MagicMock(return_value=["drive"])
+ list(audit_location_generator(devices_filter=devices_filter))
+ devices_filter.assert_called_once_with(tmpdir, ["drive"])
+ self.assertIn(((data,),), m_listdir.call_args_list)
+
+ m_listdir.reset_mock()
+ devices_filter = MagicMock(return_value=[])
+ list(audit_location_generator(devices_filter=devices_filter))
+ devices_filter.assert_called_once_with(tmpdir, ["drive"])
+ self.assertNotIn(((data,),), m_listdir.call_args_list)
+
+ # partitions_filter
+ m_listdir.reset_mock()
+ partitions_filter = MagicMock(return_value=["partition1"])
+ list(audit_location_generator(
+ partitions_filter=partitions_filter))
+ partitions_filter.assert_called_once_with(data,
+ ["partition1"])
+ self.assertIn(((partition,),), m_listdir.call_args_list)
+
+ m_listdir.reset_mock()
+ partitions_filter = MagicMock(return_value=[])
+ list(audit_location_generator(
+ partitions_filter=partitions_filter))
+ partitions_filter.assert_called_once_with(data,
+ ["partition1"])
+ self.assertNotIn(((partition,),), m_listdir.call_args_list)
+
+ # suffixes_filter
+ m_listdir.reset_mock()
+ suffixes_filter = MagicMock(return_value=["suffix1"])
+ list(audit_location_generator(suffixes_filter=suffixes_filter))
+ suffixes_filter.assert_called_once_with(partition, ["suffix1"])
+ self.assertIn(((suffix,),), m_listdir.call_args_list)
+
+ m_listdir.reset_mock()
+ suffixes_filter = MagicMock(return_value=[])
+ list(audit_location_generator(suffixes_filter=suffixes_filter))
+ suffixes_filter.assert_called_once_with(partition, ["suffix1"])
+ self.assertNotIn(((suffix,),), m_listdir.call_args_list)
+
+ # hashes_filter
+ m_listdir.reset_mock()
+ hashes_filter = MagicMock(return_value=["hash1"])
+ list(audit_location_generator(hashes_filter=hashes_filter))
+ hashes_filter.assert_called_once_with(suffix, ["hash1"])
+ self.assertIn(((hash_path,),), m_listdir.call_args_list)
+
+ m_listdir.reset_mock()
+ hashes_filter = MagicMock(return_value=[])
+ list(audit_location_generator(hashes_filter=hashes_filter))
+ hashes_filter.assert_called_once_with(suffix, ["hash1"])
+ self.assertNotIn(((hash_path,),), m_listdir.call_args_list)
+
class TestGreenAsyncPile(unittest.TestCase):
@@ -7224,7 +7354,8 @@ class TestShardRange(unittest.TestCase):
upper='', object_count=0, bytes_used=0,
meta_timestamp=ts_1.internal, deleted=0,
state=utils.ShardRange.FOUND,
- state_timestamp=ts_1.internal, epoch=None)
+ state_timestamp=ts_1.internal, epoch=None,
+ reported=0)
assert_initialisation_ok(dict(empty_run, name='a/c', timestamp=ts_1),
expect)
assert_initialisation_ok(dict(name='a/c', timestamp=ts_1), expect)
@@ -7233,11 +7364,13 @@ class TestShardRange(unittest.TestCase):
upper='u', object_count=2, bytes_used=10,
meta_timestamp=ts_2, deleted=0,
state=utils.ShardRange.CREATED,
- state_timestamp=ts_3.internal, epoch=ts_4)
+ state_timestamp=ts_3.internal, epoch=ts_4,
+ reported=0)
expect.update({'lower': 'l', 'upper': 'u', 'object_count': 2,
'bytes_used': 10, 'meta_timestamp': ts_2.internal,
'state': utils.ShardRange.CREATED,
- 'state_timestamp': ts_3.internal, 'epoch': ts_4})
+ 'state_timestamp': ts_3.internal, 'epoch': ts_4,
+ 'reported': 0})
assert_initialisation_ok(good_run.copy(), expect)
# obj count and bytes used as int strings
@@ -7255,6 +7388,11 @@ class TestShardRange(unittest.TestCase):
assert_initialisation_ok(good_deleted,
dict(expect, deleted=1))
+ good_reported = good_run.copy()
+ good_reported['reported'] = 1
+ assert_initialisation_ok(good_reported,
+ dict(expect, reported=1))
+
assert_initialisation_fails(dict(good_run, timestamp='water balloon'))
assert_initialisation_fails(
@@ -7293,7 +7431,7 @@ class TestShardRange(unittest.TestCase):
'upper': upper, 'object_count': 10, 'bytes_used': 100,
'meta_timestamp': ts_2.internal, 'deleted': 0,
'state': utils.ShardRange.FOUND, 'state_timestamp': ts_3.internal,
- 'epoch': ts_4}
+ 'epoch': ts_4, 'reported': 0}
self.assertEqual(expected, sr_dict)
self.assertIsInstance(sr_dict['lower'], six.string_types)
self.assertIsInstance(sr_dict['upper'], six.string_types)
@@ -7308,6 +7446,14 @@ class TestShardRange(unittest.TestCase):
for key in sr_dict:
bad_dict = dict(sr_dict)
bad_dict.pop(key)
+ if key == 'reported':
+ # This was added after the fact, and we need to be able to eat
+ # data from old servers
+ utils.ShardRange.from_dict(bad_dict)
+ utils.ShardRange(**bad_dict)
+ continue
+
+ # The rest were present from the beginning
with self.assertRaises(KeyError):
utils.ShardRange.from_dict(bad_dict)
# But __init__ still (generally) works!
diff --git a/test/unit/container/test_backend.py b/test/unit/container/test_backend.py
index 33fd5298e..37308c154 100644
--- a/test/unit/container/test_backend.py
+++ b/test/unit/container/test_backend.py
@@ -28,6 +28,7 @@ from contextlib import contextmanager
import sqlite3
import pickle
import json
+import itertools
import six
@@ -558,6 +559,98 @@ class TestContainerBroker(unittest.TestCase):
broker.reclaim(Timestamp.now().internal, time())
broker.delete_db(Timestamp.now().internal)
+ def test_batch_reclaim(self):
+ num_of_objects = 60
+ obj_specs = []
+ now = time()
+ top_of_the_minute = now - (now % 60)
+ c = itertools.cycle([True, False])
+ for m, is_deleted in six.moves.zip(range(num_of_objects), c):
+ offset = top_of_the_minute - (m * 60)
+ obj_specs.append((Timestamp(offset), is_deleted))
+ random.seed(now)
+ random.shuffle(obj_specs)
+ policy_indexes = list(p.idx for p in POLICIES)
+ broker = ContainerBroker(':memory:', account='test_account',
+ container='test_container')
+ broker.initialize(Timestamp('1').internal, 0)
+ for i, obj_spec in enumerate(obj_specs):
+ # with object12 before object2 and shuffled ts.internal we
+ # shouldn't be able to accidently rely on any implicit ordering
+ obj_name = 'object%s' % i
+ pidx = random.choice(policy_indexes)
+ ts, is_deleted = obj_spec
+ if is_deleted:
+ broker.delete_object(obj_name, ts.internal, pidx)
+ else:
+ broker.put_object(obj_name, ts.internal, 0, 'text/plain',
+ 'etag', storage_policy_index=pidx)
+
+ def count_reclaimable(conn, reclaim_age):
+ return conn.execute(
+ "SELECT count(*) FROM object "
+ "WHERE deleted = 1 AND created_at < ?", (reclaim_age,)
+ ).fetchone()[0]
+
+ # This is intended to divide the set of timestamps exactly in half
+ # regardless of the value of now
+ reclaim_age = top_of_the_minute + 1 - (num_of_objects / 2 * 60)
+ with broker.get() as conn:
+ self.assertEqual(count_reclaimable(conn, reclaim_age),
+ num_of_objects / 4)
+
+ orig__reclaim = broker._reclaim
+ trace = []
+
+ def tracing_reclaim(conn, age_timestamp, marker):
+ trace.append((age_timestamp, marker,
+ count_reclaimable(conn, age_timestamp)))
+ return orig__reclaim(conn, age_timestamp, marker)
+
+ with mock.patch.object(broker, '_reclaim', new=tracing_reclaim), \
+ mock.patch('swift.common.db.RECLAIM_PAGE_SIZE', 10):
+ broker.reclaim(reclaim_age, reclaim_age)
+
+ with broker.get() as conn:
+ self.assertEqual(count_reclaimable(conn, reclaim_age), 0)
+ self.assertEqual(3, len(trace), trace)
+ self.assertEqual([age for age, marker, reclaimable in trace],
+ [reclaim_age] * 3)
+ # markers are in-order
+ self.assertLess(trace[0][1], trace[1][1])
+ self.assertLess(trace[1][1], trace[2][1])
+ # reclaimable count gradually decreases
+ # generally, count1 > count2 > count3, but because of the randomness
+ # we may occassionally have count1 == count2 or count2 == count3
+ self.assertGreaterEqual(trace[0][2], trace[1][2])
+ self.assertGreaterEqual(trace[1][2], trace[2][2])
+ # technically, this might happen occasionally, but *really* rarely
+ self.assertTrue(trace[0][2] > trace[1][2] or
+ trace[1][2] > trace[2][2])
+
+ def test_reclaim_with_duplicate_names(self):
+ broker = ContainerBroker(':memory:', account='test_account',
+ container='test_container')
+ broker.initialize(Timestamp('1').internal, 0)
+ now = time()
+ ages_ago = Timestamp(now - (3 * 7 * 24 * 60 * 60))
+ for i in range(10):
+ for spidx in range(10):
+ obj_name = 'object%s' % i
+ broker.delete_object(obj_name, ages_ago.internal, spidx)
+ reclaim_age = now - (2 * 7 * 24 * 60 * 60)
+ with broker.get() as conn:
+ self.assertEqual(conn.execute(
+ "SELECT count(*) FROM object "
+ "WHERE created_at < ?", (reclaim_age,)
+ ).fetchone()[0], 100)
+ with mock.patch('swift.common.db.RECLAIM_PAGE_SIZE', 10):
+ broker.reclaim(reclaim_age, reclaim_age)
+ with broker.get() as conn:
+ self.assertEqual(conn.execute(
+ "SELECT count(*) FROM object "
+ ).fetchone()[0], 0)
+
@with_tempdir
def test_reclaim_deadlock(self, tempdir):
db_path = os.path.join(
@@ -642,10 +735,12 @@ class TestContainerBroker(unittest.TestCase):
self.assertEqual(info['put_timestamp'], start.internal)
self.assertTrue(Timestamp(info['created_at']) >= start)
self.assertEqual(info['delete_timestamp'], '0')
- if self.__class__ in (TestContainerBrokerBeforeMetadata,
- TestContainerBrokerBeforeXSync,
- TestContainerBrokerBeforeSPI,
- TestContainerBrokerBeforeShardRanges):
+ if self.__class__ in (
+ TestContainerBrokerBeforeMetadata,
+ TestContainerBrokerBeforeXSync,
+ TestContainerBrokerBeforeSPI,
+ TestContainerBrokerBeforeShardRanges,
+ TestContainerBrokerBeforeShardRangeReportedColumn):
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(info['status_changed_at'],
@@ -932,6 +1027,8 @@ class TestContainerBroker(unittest.TestCase):
"SELECT object_count FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
+ self.assertEqual(conn.execute(
+ "SELECT reported FROM shard_range").fetchone()[0], 0)
# Reput same event
broker.merge_shard_ranges(
@@ -957,6 +1054,64 @@ class TestContainerBroker(unittest.TestCase):
"SELECT object_count FROM shard_range").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 0)
+ self.assertEqual(conn.execute(
+ "SELECT reported FROM shard_range").fetchone()[0], 0)
+
+ # Mark it as reported
+ broker.merge_shard_ranges(
+ ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
+ 'low', 'up', meta_timestamp=meta_timestamp,
+ reported=True))
+ with broker.get() as conn:
+ self.assertEqual(conn.execute(
+ "SELECT name FROM shard_range").fetchone()[0],
+ '"a/{<shardrange \'&\' name>}"')
+ self.assertEqual(conn.execute(
+ "SELECT timestamp FROM shard_range").fetchone()[0],
+ timestamp)
+ self.assertEqual(conn.execute(
+ "SELECT meta_timestamp FROM shard_range").fetchone()[0],
+ meta_timestamp)
+ self.assertEqual(conn.execute(
+ "SELECT lower FROM shard_range").fetchone()[0], 'low')
+ self.assertEqual(conn.execute(
+ "SELECT upper FROM shard_range").fetchone()[0], 'up')
+ self.assertEqual(conn.execute(
+ "SELECT deleted FROM shard_range").fetchone()[0], 0)
+ self.assertEqual(conn.execute(
+ "SELECT object_count FROM shard_range").fetchone()[0], 0)
+ self.assertEqual(conn.execute(
+ "SELECT bytes_used FROM shard_range").fetchone()[0], 0)
+ self.assertEqual(conn.execute(
+ "SELECT reported FROM shard_range").fetchone()[0], 1)
+
+ # Reporting latches it
+ broker.merge_shard_ranges(
+ ShardRange('"a/{<shardrange \'&\' name>}"', timestamp,
+ 'low', 'up', meta_timestamp=meta_timestamp,
+ reported=False))
+ with broker.get() as conn:
+ self.assertEqual(conn.execute(
+ "SELECT name FROM shard_range").fetchone()[0],
+ '"a/{<shardrange \'&\' name>}"')
+ self.assertEqual(conn.execute(
+ "SELECT timestamp FROM shard_range").fetchone()[0],
+ timestamp)
+ self.assertEqual(conn.execute(
+ "SELECT meta_timestamp FROM shard_range").fetchone()[0],
+ meta_timestamp)
+ self.assertEqual(conn.execute(
+ "SELECT lower FROM shard_range").fetchone()[0], 'low')
+ self.assertEqual(conn.execute(
+ "SELECT upper FROM shard_range").fetchone()[0], 'up')
+ self.assertEqual(conn.execute(
+ "SELECT deleted FROM shard_range").fetchone()[0], 0)
+ self.assertEqual(conn.execute(
+ "SELECT object_count FROM shard_range").fetchone()[0], 0)
+ self.assertEqual(conn.execute(
+ "SELECT bytes_used FROM shard_range").fetchone()[0], 0)
+ self.assertEqual(conn.execute(
+ "SELECT reported FROM shard_range").fetchone()[0], 1)
# Put new event
timestamp = next(self.ts).internal
@@ -984,11 +1139,14 @@ class TestContainerBroker(unittest.TestCase):
"SELECT object_count FROM shard_range").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 2)
+ self.assertEqual(conn.execute(
+ "SELECT reported FROM shard_range").fetchone()[0], 0)
# Put old event
broker.merge_shard_ranges(
ShardRange('"a/{<shardrange \'&\' name>}"', old_put_timestamp,
- 'lower', 'upper', 1, 2, meta_timestamp=meta_timestamp))
+ 'lower', 'upper', 1, 2, meta_timestamp=meta_timestamp,
+ reported=True))
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM shard_range").fetchone()[0],
@@ -1009,6 +1167,8 @@ class TestContainerBroker(unittest.TestCase):
"SELECT object_count FROM shard_range").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT bytes_used FROM shard_range").fetchone()[0], 2)
+ self.assertEqual(conn.execute(
+ "SELECT reported FROM shard_range").fetchone()[0], 0)
# Put old delete event
broker.merge_shard_ranges(
@@ -1885,10 +2045,12 @@ class TestContainerBroker(unittest.TestCase):
self.assertEqual(info['hash'], '00000000000000000000000000000000')
self.assertEqual(info['put_timestamp'], Timestamp(1).internal)
self.assertEqual(info['delete_timestamp'], '0')
- if self.__class__ in (TestContainerBrokerBeforeMetadata,
- TestContainerBrokerBeforeXSync,
- TestContainerBrokerBeforeSPI,
- TestContainerBrokerBeforeShardRanges):
+ if self.__class__ in (
+ TestContainerBrokerBeforeMetadata,
+ TestContainerBrokerBeforeXSync,
+ TestContainerBrokerBeforeSPI,
+ TestContainerBrokerBeforeShardRanges,
+ TestContainerBrokerBeforeShardRangeReportedColumn):
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(info['status_changed_at'],
@@ -3182,10 +3344,12 @@ class TestContainerBroker(unittest.TestCase):
self.assertEqual(0, info['storage_policy_index']) # sanity check
self.assertEqual(0, info['object_count'])
self.assertEqual(0, info['bytes_used'])
- if self.__class__ in (TestContainerBrokerBeforeMetadata,
- TestContainerBrokerBeforeXSync,
- TestContainerBrokerBeforeSPI,
- TestContainerBrokerBeforeShardRanges):
+ if self.__class__ in (
+ TestContainerBrokerBeforeMetadata,
+ TestContainerBrokerBeforeXSync,
+ TestContainerBrokerBeforeSPI,
+ TestContainerBrokerBeforeShardRanges,
+ TestContainerBrokerBeforeShardRangeReportedColumn):
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(timestamp.internal, info['status_changed_at'])
@@ -5222,6 +5386,75 @@ class TestContainerBrokerBeforeShardRanges(ContainerBrokerMigrationMixin,
FROM shard_range''')
+def pre_reported_create_shard_range_table(self, conn):
+ """
+ Copied from ContainerBroker before the
+ reported column was added; used for testing with
+ TestContainerBrokerBeforeShardRangeReportedColumn.
+
+ Create a shard_range table with no 'reported' column.
+
+ :param conn: DB connection object
+ """
+ conn.execute("""
+ CREATE TABLE shard_range (
+ ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT,
+ timestamp TEXT,
+ lower TEXT,
+ upper TEXT,
+ object_count INTEGER DEFAULT 0,
+ bytes_used INTEGER DEFAULT 0,
+ meta_timestamp TEXT,
+ deleted INTEGER DEFAULT 0,
+ state INTEGER,
+ state_timestamp TEXT,
+ epoch TEXT
+ );
+ """)
+
+ conn.execute("""
+ CREATE TRIGGER shard_range_update BEFORE UPDATE ON shard_range
+ BEGIN
+ SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
+ END;
+ """)
+
+
+class TestContainerBrokerBeforeShardRangeReportedColumn(
+ ContainerBrokerMigrationMixin, TestContainerBroker):
+ """
+ Tests for ContainerBroker against databases created
+ before the shard_ranges table was added.
+ """
+ # *grumble grumble* This should include container_info/policy_stat :-/
+ expected_db_tables = {'outgoing_sync', 'incoming_sync', 'object',
+ 'sqlite_sequence', 'container_stat', 'shard_range'}
+
+ def setUp(self):
+ super(TestContainerBrokerBeforeShardRangeReportedColumn,
+ self).setUp()
+ ContainerBroker.create_shard_range_table = \
+ pre_reported_create_shard_range_table
+
+ broker = ContainerBroker(':memory:', account='a', container='c')
+ broker.initialize(Timestamp('1').internal, 0)
+ with self.assertRaises(sqlite3.DatabaseError) as raised, \
+ broker.get() as conn:
+ conn.execute('''SELECT reported
+ FROM shard_range''')
+ self.assertIn('no such column: reported', str(raised.exception))
+
+ def tearDown(self):
+ super(TestContainerBrokerBeforeShardRangeReportedColumn,
+ self).tearDown()
+ broker = ContainerBroker(':memory:', account='a', container='c')
+ broker.initialize(Timestamp('1').internal, 0)
+ with broker.get() as conn:
+ conn.execute('''SELECT reported
+ FROM shard_range''')
+
+
class TestUpdateNewItemFromExisting(unittest.TestCase):
# TODO: add test scenarios that have swift_bytes in content_type
t0 = '1234567890.00000'
diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py
index fc55ff05d..4fd1fcf2e 100644
--- a/test/unit/container/test_server.py
+++ b/test/unit/container/test_server.py
@@ -2380,15 +2380,17 @@ class TestContainerController(unittest.TestCase):
'X-Container-Sysmeta-Test': 'set',
'X-Container-Meta-Test': 'persisted'}
- # PUT shard range to non-existent container with non-autocreate prefix
- req = Request.blank('/sda1/p/a/c', method='PUT', headers=headers,
- body=json.dumps([dict(shard_range)]))
+ # PUT shard range to non-existent container without autocreate flag
+ req = Request.blank(
+ '/sda1/p/.shards_a/shard_c', method='PUT', headers=headers,
+ body=json.dumps([dict(shard_range)]))
resp = req.get_response(self.controller)
self.assertEqual(404, resp.status_int)
- # PUT shard range to non-existent container with autocreate prefix,
+ # PUT shard range to non-existent container with autocreate flag,
# missing storage policy
headers['X-Timestamp'] = next(ts_iter).internal
+ headers['X-Backend-Auto-Create'] = 't'
req = Request.blank(
'/sda1/p/.shards_a/shard_c', method='PUT', headers=headers,
body=json.dumps([dict(shard_range)]))
@@ -2397,7 +2399,7 @@ class TestContainerController(unittest.TestCase):
self.assertIn(b'X-Backend-Storage-Policy-Index header is required',
resp.body)
- # PUT shard range to non-existent container with autocreate prefix
+ # PUT shard range to non-existent container with autocreate flag
headers['X-Timestamp'] = next(ts_iter).internal
policy_index = random.choice(POLICIES).idx
headers['X-Backend-Storage-Policy-Index'] = str(policy_index)
@@ -2407,7 +2409,7 @@ class TestContainerController(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
- # repeat PUT of shard range to autocreated container - 204 response
+ # repeat PUT of shard range to autocreated container - 202 response
headers['X-Timestamp'] = next(ts_iter).internal
headers.pop('X-Backend-Storage-Policy-Index') # no longer required
req = Request.blank(
@@ -2416,7 +2418,7 @@ class TestContainerController(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(202, resp.status_int)
- # regular PUT to autocreated container - 204 response
+ # regular PUT to autocreated container - 202 response
headers['X-Timestamp'] = next(ts_iter).internal
req = Request.blank(
'/sda1/p/.shards_a/shard_c', method='PUT',
@@ -4649,61 +4651,53 @@ class TestContainerController(unittest.TestCase):
"%d on param %s" % (resp.status_int, param))
def test_put_auto_create(self):
- headers = {'x-timestamp': Timestamp(1).internal,
- 'x-size': '0',
- 'x-content-type': 'text/plain',
- 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
-
- req = Request.blank('/sda1/p/a/c/o',
- environ={'REQUEST_METHOD': 'PUT'},
- headers=dict(headers))
- resp = req.get_response(self.controller)
- self.assertEqual(resp.status_int, 404)
-
- req = Request.blank('/sda1/p/.a/c/o',
- environ={'REQUEST_METHOD': 'PUT'},
- headers=dict(headers))
- resp = req.get_response(self.controller)
- self.assertEqual(resp.status_int, 201)
-
- req = Request.blank('/sda1/p/a/.c/o',
- environ={'REQUEST_METHOD': 'PUT'},
- headers=dict(headers))
- resp = req.get_response(self.controller)
- self.assertEqual(resp.status_int, 404)
+ def do_test(expected_status, path, extra_headers=None, body=None):
+ headers = {'x-timestamp': Timestamp(1).internal,
+ 'x-size': '0',
+ 'x-content-type': 'text/plain',
+ 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
+ if extra_headers:
+ headers.update(extra_headers)
+ req = Request.blank('/sda1/p/' + path,
+ environ={'REQUEST_METHOD': 'PUT'},
+ headers=headers, body=body)
+ resp = req.get_response(self.controller)
+ self.assertEqual(resp.status_int, expected_status)
- req = Request.blank('/sda1/p/a/c/.o',
- environ={'REQUEST_METHOD': 'PUT'},
- headers=dict(headers))
- resp = req.get_response(self.controller)
- self.assertEqual(resp.status_int, 404)
+ do_test(404, 'a/c/o')
+ do_test(404, '.a/c/o', {'X-Backend-Auto-Create': 'no'})
+ do_test(201, '.a/c/o')
+ do_test(404, 'a/.c/o')
+ do_test(404, 'a/c/.o')
+ do_test(201, 'a/c/o', {'X-Backend-Auto-Create': 'yes'})
+
+ do_test(404, '.shards_a/c/o')
+ create_shard_headers = {
+ 'X-Backend-Record-Type': 'shard',
+ 'X-Backend-Storage-Policy-Index': '0'}
+ do_test(404, '.shards_a/c', create_shard_headers, '[]')
+ create_shard_headers['X-Backend-Auto-Create'] = 't'
+ do_test(201, '.shards_a/c', create_shard_headers, '[]')
def test_delete_auto_create(self):
- headers = {'x-timestamp': Timestamp(1).internal}
-
- req = Request.blank('/sda1/p/a/c/o',
- environ={'REQUEST_METHOD': 'DELETE'},
- headers=dict(headers))
- resp = req.get_response(self.controller)
- self.assertEqual(resp.status_int, 404)
-
- req = Request.blank('/sda1/p/.a/c/o',
- environ={'REQUEST_METHOD': 'DELETE'},
- headers=dict(headers))
- resp = req.get_response(self.controller)
- self.assertEqual(resp.status_int, 204)
-
- req = Request.blank('/sda1/p/a/.c/o',
- environ={'REQUEST_METHOD': 'DELETE'},
- headers=dict(headers))
- resp = req.get_response(self.controller)
- self.assertEqual(resp.status_int, 404)
+ def do_test(expected_status, path, extra_headers=None):
+ headers = {'x-timestamp': Timestamp(1).internal}
+ if extra_headers:
+ headers.update(extra_headers)
+ req = Request.blank('/sda1/p/' + path,
+ environ={'REQUEST_METHOD': 'DELETE'},
+ headers=headers)
+ resp = req.get_response(self.controller)
+ self.assertEqual(resp.status_int, expected_status)
- req = Request.blank('/sda1/p/a/.c/.o',
- environ={'REQUEST_METHOD': 'DELETE'},
- headers=dict(headers))
- resp = req.get_response(self.controller)
- self.assertEqual(resp.status_int, 404)
+ do_test(404, 'a/c/o')
+ do_test(404, '.a/c/o', {'X-Backend-Auto-Create': 'false'})
+ do_test(204, '.a/c/o')
+ do_test(404, 'a/.c/o')
+ do_test(404, 'a/.c/.o')
+ do_test(404, '.shards_a/c/o')
+ do_test(204, 'a/c/o', {'X-Backend-Auto-Create': 'true'})
+ do_test(204, '.shards_a/c/o', {'X-Backend-Auto-Create': 'true'})
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a/o',
diff --git a/test/unit/container/test_sharder.py b/test/unit/container/test_sharder.py
index 43730a5d9..a54ddb652 100644
--- a/test/unit/container/test_sharder.py
+++ b/test/unit/container/test_sharder.py
@@ -4189,6 +4189,7 @@ class TestSharder(BaseTestSharder):
def capture_send(conn, data):
bodies.append(data)
+ self.assertFalse(broker.get_own_shard_range().reported) # sanity
with self._mock_sharder() as sharder:
with mocked_http_conn(204, 204, 204,
give_send=capture_send) as mock_conn:
@@ -4198,6 +4199,7 @@ class TestSharder(BaseTestSharder):
self.assertEqual('PUT', req['method'])
self.assertEqual([expected_sent] * 3,
[json.loads(b) for b in bodies])
+ self.assertTrue(broker.get_own_shard_range().reported)
def test_update_root_container_own_range(self):
broker = self._make_broker()
@@ -4230,6 +4232,32 @@ class TestSharder(BaseTestSharder):
with annotate_failure(state):
check_only_own_shard_range_sent(state)
+ def test_update_root_container_already_reported(self):
+ broker = self._make_broker()
+
+ def check_already_reported_not_sent(state):
+ own_shard_range = broker.get_own_shard_range()
+
+ own_shard_range.reported = True
+ self.assertTrue(own_shard_range.update_state(
+ state, state_timestamp=next(self.ts_iter)))
+ # Check that updating state clears the flag
+ self.assertFalse(own_shard_range.reported)
+
+ # If we claim to have already updated...
+ own_shard_range.reported = True
+ broker.merge_shard_ranges([own_shard_range])
+
+ # ... then there's nothing to send
+ with self._mock_sharder() as sharder:
+ with mocked_http_conn() as mock_conn:
+ sharder._update_root_container(broker)
+ self.assertFalse(mock_conn.requests)
+
+ for state in ShardRange.STATES:
+ with annotate_failure(state):
+ check_already_reported_not_sent(state)
+
def test_update_root_container_all_ranges(self):
broker = self._make_broker()
other_shard_ranges = self._make_shard_ranges((('', 'h'), ('h', '')))
diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py
index 12849962b..9b862f5d3 100644
--- a/test/unit/proxy/controllers/test_obj.py
+++ b/test/unit/proxy/controllers/test_obj.py
@@ -51,7 +51,8 @@ from swift.common.storage_policy import POLICIES, ECDriverError, \
from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
debug_logger, patch_policies, SlowBody, FakeStatus, \
DEFAULT_TEST_EC_TYPE, encode_frag_archive_bodies, make_ec_object_stub, \
- fake_ec_node_response, StubResponse, mocked_http_conn
+ fake_ec_node_response, StubResponse, mocked_http_conn, \
+ quiet_eventlet_exceptions
from test.unit.proxy.test_server import node_error_count
@@ -1617,7 +1618,8 @@ class TestReplicatedObjController(CommonObjectControllerMixin,
# to the next node rather than hang the request
headers = [{'X-Backend-Timestamp': 'not-a-timestamp'}, {}]
codes = [200, 200]
- with set_http_connect(*codes, headers=headers):
+ with quiet_eventlet_exceptions(), set_http_connect(
+ *codes, headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py
index 1d3257e34..0a4d9de77 100644
--- a/test/unit/proxy/test_server.py
+++ b/test/unit/proxy/test_server.py
@@ -7982,7 +7982,7 @@ class TestObjectDisconnectCleanup(unittest.TestCase):
continue
device_path = os.path.join(_testdir, dev)
for datadir in os.listdir(device_path):
- if 'object' not in datadir:
+ if any(p in datadir for p in ('account', 'container')):
continue
data_path = os.path.join(device_path, datadir)
rmtree(data_path, ignore_errors=True)
diff --git a/tools/playbooks/common/install_dependencies.yaml b/tools/playbooks/common/install_dependencies.yaml
index a99ead44f..d48905aab 100644
--- a/tools/playbooks/common/install_dependencies.yaml
+++ b/tools/playbooks/common/install_dependencies.yaml
@@ -14,12 +14,13 @@
# limitations under the License.
- hosts: all
become: true
+ roles:
+ - ensure-pip
tasks:
- name: installing dependencies
package: name={{ item }} state=present
with_items:
- python-pyeclib
- - python-pip
- python-nose
- python-swiftclient
diff --git a/tools/playbooks/dsvm/pre.yaml b/tools/playbooks/dsvm/pre.yaml
index 351e28097..23c6ec2d2 100644
--- a/tools/playbooks/dsvm/pre.yaml
+++ b/tools/playbooks/dsvm/pre.yaml
@@ -7,4 +7,6 @@
bindep_dir: "{{ zuul_work_dir }}"
- test-setup
- ensure-tox
+ - additional-tempauth-users
+ - additional-keystone-users
- dsvm-additional-middlewares
diff --git a/tools/playbooks/saio_single_node_setup/setup_saio.yaml b/tools/playbooks/saio_single_node_setup/setup_saio.yaml
index d27261e02..c6ba25b72 100644
--- a/tools/playbooks/saio_single_node_setup/setup_saio.yaml
+++ b/tools/playbooks/saio_single_node_setup/setup_saio.yaml
@@ -179,10 +179,12 @@
with_items: "{{ find_result.files }}"
- name: set the options in the proxy config file
- shell:
- cmd: |
- crudini --set /etc/swift/proxy-server.conf app:proxy-server node_timeout 20
- executable: /bin/bash
+ ini_file:
+ path: /etc/swift/proxy-server.conf
+ section: app:proxy-server
+ option: node_timeout
+ value: 20
+ create: no
- name: copy the SAIO scripts for resetting the environment
command: cp -r {{ zuul.project.src_dir }}/doc/saio/bin /home/{{ ansible_ssh_user }}/bin creates=/home/{{ ansible_ssh_user }}/bin
diff --git a/tox.ini b/tox.ini
index 59b74e47e..ed5c24503 100644
--- a/tox.ini
+++ b/tox.ini
@@ -58,24 +58,12 @@ commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=ec
-[testenv:func-s3api-py3]
-basepython = python3
-commands = ./.functests {posargs}
-setenv = SWIFT_TEST_IN_PROCESS=1
- SWIFT_TEST_IN_PROCESS_CONF_LOADER=s3api
-
[testenv:func-encryption-py3]
basepython = python3
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption
-[testenv:func-domain-remap-staticweb-py3]
-basepython = python3
-commands = ./.functests {posargs}
-setenv = SWIFT_TEST_IN_PROCESS=1
- SWIFT_TEST_IN_PROCESS_CONF_LOADER=domain_remap_staticweb
-
[testenv:func]
basepython = python2.7
deps = {[testenv:py27]deps}
@@ -88,13 +76,6 @@ commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption
-[testenv:func-domain-remap-staticweb]
-basepython = python2.7
-deps = {[testenv:py27]deps}
-commands = ./.functests {posargs}
-setenv = SWIFT_TEST_IN_PROCESS=1
- SWIFT_TEST_IN_PROCESS_CONF_LOADER=domain_remap_staticweb
-
[testenv:func-ec]
basepython = python2.7
deps = {[testenv:py27]deps}
@@ -102,13 +83,6 @@ commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=ec
-[testenv:func-s3api]
-basepython = python2.7
-deps = {[testenv:py27]deps}
-commands = ./.functests {posargs}
-setenv = SWIFT_TEST_IN_PROCESS=1
- SWIFT_TEST_IN_PROCESS_CONF_LOADER=s3api
-
[testenv:func-losf]
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1