summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--.zuul.yaml42
-rw-r--r--AUTHORS7
-rw-r--r--CHANGELOG137
-rw-r--r--Dockerfile4
-rw-r--r--Dockerfile-py34
-rw-r--r--doc/s3api/conf/ceph-known-failures-tempauth.yaml1
-rw-r--r--doc/source/admin_guide.rst8
-rw-r--r--doc/source/api/discoverability.rst4
-rw-r--r--doc/source/api/form_post_middleware.rst16
-rw-r--r--doc/source/api/large_objects.rst10
-rw-r--r--doc/source/api/object_api_v1_overview.rst8
-rw-r--r--doc/source/api/object_versioning.rst308
-rw-r--r--doc/source/api/pseudo-hierarchical-folders-directories.rst112
-rw-r--r--doc/source/api/serialized-response-formats.rst50
-rw-r--r--doc/source/api/temporary_url_middleware.rst145
-rw-r--r--doc/source/api/use_content-encoding_metadata.rst12
-rw-r--r--doc/source/api/use_the_content-disposition_metadata.rst20
-rw-r--r--doc/source/conf.py8
-rw-r--r--doc/source/config/account_server_config.rst25
-rw-r--r--doc/source/config/container_server_config.rst25
-rw-r--r--doc/source/cors.rst2
-rw-r--r--doc/source/development_auth.rst26
-rw-r--r--doc/source/development_guidelines.rst20
-rw-r--r--doc/source/development_ondisk_backends.rst2
-rw-r--r--doc/source/getting_started.rst15
-rw-r--r--doc/source/logs.rst1
-rw-r--r--doc/source/misc.rst11
-rw-r--r--doc/source/ops_runbook/diagnose.rst291
-rw-r--r--doc/source/ops_runbook/maintenance.rst24
-rw-r--r--doc/source/ops_runbook/procedures.rst78
-rw-r--r--doc/source/ops_runbook/troubleshooting.rst38
-rw-r--r--doc/source/policies_saio.rst184
-rwxr-xr-xdocker/install_scripts/20_apk_install_py2.sh11
-rwxr-xr-xdocker/install_scripts/99_apk_uninstall_dev.sh3
-rw-r--r--etc/account-server.conf-sample13
-rw-r--r--etc/container-server.conf-sample13
-rw-r--r--etc/object-server.conf-sample13
-rw-r--r--py36-constraints.txt88
-rw-r--r--releasenotes/notes/2_30_0_release-642778c3010848db.yaml167
-rw-r--r--releasenotes/source/conf.py6
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po598
-rw-r--r--swift/cli/manage_shard_ranges.py243
-rw-r--r--swift/cli/ringbuilder.py8
-rw-r--r--swift/common/db_replicator.py35
-rw-r--r--swift/common/digest.py151
-rw-r--r--swift/common/internal_client.py5
-rw-r--r--swift/common/middleware/crypto/decrypter.py2
-rw-r--r--swift/common/middleware/crypto/keymaster.py8
-rw-r--r--swift/common/middleware/formpost.py45
-rw-r--r--swift/common/middleware/proxy_logging.py8
-rw-r--r--swift/common/middleware/s3api/acl_utils.py7
-rw-r--r--swift/common/middleware/s3api/controllers/multi_upload.py13
-rw-r--r--swift/common/middleware/s3api/s3request.py6
-rw-r--r--swift/common/middleware/s3api/subresource.py11
-rw-r--r--swift/common/middleware/slo.py14
-rw-r--r--swift/common/middleware/staticweb.py2
-rw-r--r--swift/common/middleware/tempurl.py121
-rw-r--r--swift/common/ring/builder.py2
-rw-r--r--swift/common/storage_policy.py4
-rw-r--r--swift/common/utils.py85
-rw-r--r--swift/common/wsgi.py7
-rw-r--r--swift/container/backend.py65
-rw-r--r--swift/container/replicator.py47
-rw-r--r--swift/container/sharder.py450
-rw-r--r--swift/container/sync.py44
-rw-r--r--swift/locale/de/LC_MESSAGES/swift.po764
-rw-r--r--swift/locale/en_GB/LC_MESSAGES/swift.po796
-rw-r--r--swift/locale/es/LC_MESSAGES/swift.po682
-rw-r--r--swift/locale/fr/LC_MESSAGES/swift.po589
-rw-r--r--swift/locale/it/LC_MESSAGES/swift.po575
-rw-r--r--swift/locale/ja/LC_MESSAGES/swift.po565
-rw-r--r--swift/locale/ko_KR/LC_MESSAGES/swift.po558
-rw-r--r--swift/locale/pt_BR/LC_MESSAGES/swift.po566
-rw-r--r--swift/locale/ru/LC_MESSAGES/swift.po576
-rw-r--r--swift/locale/tr_TR/LC_MESSAGES/swift.po533
-rw-r--r--swift/locale/zh_CN/LC_MESSAGES/swift.po535
-rw-r--r--swift/locale/zh_TW/LC_MESSAGES/swift.po539
-rw-r--r--swift/obj/diskfile.py4
-rw-r--r--swift/obj/reconstructor.py4
-rw-r--r--swift/obj/replicator.py32
-rw-r--r--swift/obj/ssync_receiver.py68
-rw-r--r--swift/obj/updater.py22
-rw-r--r--swift/proxy/controllers/info.py3
-rw-r--r--test/debug_logger.py13
-rw-r--r--test/functional/s3api/test_object.py10
-rw-r--r--test/functional/test_object_versioning.py4
-rwxr-xr-xtest/functional/test_symlink.py4
-rw-r--r--test/functional/test_tempurl.py37
-rw-r--r--test/probe/test_sharder.py496
-rw-r--r--test/s3api/__init__.py28
-rw-r--r--test/s3api/test_mpu.py100
-rw-r--r--test/unit/__init__.py1
-rw-r--r--test/unit/cli/test_ipv6_output.stub10
-rw-r--r--test/unit/cli/test_manage_shard_ranges.py565
-rw-r--r--test/unit/cli/test_ringbuilder.py2
-rw-r--r--test/unit/common/middleware/helpers.py16
-rw-r--r--test/unit/common/middleware/s3api/test_acl_utils.py51
-rw-r--r--test/unit/common/middleware/s3api/test_multi_upload.py38
-rw-r--r--test/unit/common/middleware/s3api/test_obj.py24
-rw-r--r--test/unit/common/middleware/s3api/test_s3_acl.py11
-rw-r--r--test/unit/common/middleware/s3api/test_s3request.py5
-rw-r--r--test/unit/common/middleware/test_formpost.py227
-rw-r--r--test/unit/common/middleware/test_proxy_logging.py6
-rw-r--r--test/unit/common/middleware/test_slo.py51
-rw-r--r--test/unit/common/middleware/test_staticweb.py20
-rw-r--r--test/unit/common/middleware/test_tempurl.py167
-rw-r--r--test/unit/common/test_db_replicator.py71
-rw-r--r--test/unit/common/test_digest.py191
-rw-r--r--test/unit/common/test_internal_client.py35
-rw-r--r--test/unit/common/test_utils.py20
-rw-r--r--test/unit/common/test_wsgi.py29
-rw-r--r--test/unit/container/test_backend.py131
-rw-r--r--test/unit/container/test_replicator.py78
-rw-r--r--test/unit/container/test_sharder.py486
-rw-r--r--test/unit/obj/test_diskfile.py53
-rw-r--r--test/unit/obj/test_reconstructor.py54
-rw-r--r--test/unit/obj/test_replicator.py223
-rw-r--r--test/unit/proxy/controllers/test_info.py22
-rw-r--r--tools/playbooks/ceph-s3tests/run.yaml9
-rw-r--r--tools/playbooks/common/restart_swift.yaml24
-rw-r--r--tools/playbooks/multinode_setup/run.yaml1
-rw-r--r--tools/playbooks/s3api-tests/run.yaml23
-rw-r--r--tox.ini19
124 files changed, 5942 insertions, 8759 deletions
diff --git a/.mailmap b/.mailmap
index c56451f19..aca9442b6 100644
--- a/.mailmap
+++ b/.mailmap
@@ -132,3 +132,5 @@ Takashi Kajinami <tkajinam@redhat.com> <kajinamit@nttdata.co.jp>
Yuxin Wang <wang.yuxin@ostorage.com.cn> Wang Yuxin
Gilles Biannic <gilles.biannic@corp.ovh.com> gillesbiannic
melissaml <ma.lei@99cloud.net> <malei@maleideMacBook-Pro.local>
+Ashwin Nair <nairashwin952013@gmail.com> indianwhocodes
+Romain de Joux <romain.de-joux@ovhcloud.com> <romain.de-joux@corp.ovh.com>
diff --git a/.zuul.yaml b/.zuul.yaml
index 378842921..e6de70f1e 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -317,12 +317,33 @@
- tools/playbooks/saio_single_node_setup/setup_saio.yaml
- tools/playbooks/saio_single_node_setup/add_s3api.yaml
- tools/playbooks/saio_single_node_setup/make_rings.yaml
+ - tools/playbooks/common/restart_swift.yaml
run: tools/playbooks/ceph-s3tests/run.yaml
post-run:
- tools/playbooks/probetests/post.yaml
- tools/playbooks/ceph-s3tests/post.yaml
- job:
+ name: swift-tox-func-s3api-tests-tempauth
+ parent: unittests
+ nodeset: centos-8-stream
+ description: |
+ Setup a SAIO dev environment and run our s3api test suite
+ timeout: 1800
+ vars:
+ s3_acl: yes
+ bindep_profile: test py36
+ pre-run:
+ - tools/playbooks/common/install_dependencies.yaml
+ - tools/playbooks/saio_single_node_setup/setup_saio.yaml
+ - tools/playbooks/saio_single_node_setup/add_s3api.yaml
+ - tools/playbooks/saio_single_node_setup/make_rings.yaml
+ - tools/playbooks/common/restart_swift.yaml
+ run: tools/playbooks/s3api-tests/run.yaml
+ post-run:
+ - tools/playbooks/probetests/post.yaml
+
+- job:
name: swift-probetests-centos-7
parent: unittests
nodeset: centos-7
@@ -570,6 +591,7 @@
description: |
Functional testing on a FIPS enabled Centos 8 system
vars:
+ nslookup_target: 'opendev.org'
enable_fips: true
- job:
@@ -580,6 +602,7 @@
Functional encryption testing on a FIPS enabled
Centos 8 system
vars:
+ nslookup_target: 'opendev.org'
enable_fips: true
- job:
@@ -589,6 +612,7 @@
description: |
Functional EC testing on a FIPS enabled Centos 8 system
vars:
+ nslookup_target: 'opendev.org'
enable_fips: true
- project-template:
@@ -616,7 +640,7 @@
- swift-tox-func-py36-centos-8-stream-fips:
irrelevant-files: &functest-irrelevant-files
- ^(api-ref|doc|releasenotes)/.*$
- - ^test/(cors|probe)/.*$
+ - ^test/(cors|probe|s3api)/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-tox-func-encryption-py36-centos-8-stream-fips:
irrelevant-files: *functest-irrelevant-files
@@ -633,7 +657,7 @@
- swift-tox-py27:
irrelevant-files: &unittest-irrelevant-files
- ^(api-ref|doc|releasenotes)/.*$
- - ^test/(cors|functional|probe)/.*$
+ - ^test/(cors|functional|probe|s3api)/.*$
- swift-tox-py36:
irrelevant-files: *unittest-irrelevant-files
- swift-tox-py39:
@@ -661,7 +685,7 @@
- ^(api-ref|releasenotes)/.*$
# Keep doc/saio -- we use those sample configs in the saio playbooks
- ^doc/(requirements.txt|(manpages|s3api|source)/.*)$
- - ^test/(unit|functional|probe)/.*$
+ - ^test/(unit|functional|probe|s3api)/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- swift-tox-func-s3api-ceph-s3tests-tempauth:
irrelevant-files:
@@ -669,14 +693,21 @@
# Keep doc/saio -- we use those sample configs in the saio playbooks
# Also keep doc/s3api -- it holds known failures for these tests
- ^doc/(requirements.txt|(manpages|source)/.*)$
- - ^test/(cors|unit|probe)/.*$
+ - ^test/.*$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
+ - swift-tox-func-s3api-tests-tempauth:
+ irrelevant-files:
+ - ^(api-ref|releasenotes)/.*$
+ # Keep doc/saio -- we use those sample configs in the saio playbooks
+ - ^doc/(requirements.txt|(manpages|s3api|source)/.*)$
+ - ^test/(cors|unit|functional|probe)/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-probetests-centos-7:
irrelevant-files: &probetest-irrelevant-files
- ^(api-ref|releasenotes)/.*$
# Keep doc/saio -- we use those sample configs in the saio playbooks
- ^doc/(requirements.txt|(manpages|s3api|source)/.*)$
- - ^test/(cors|unit|functional)/.*$
+ - ^test/(cors|unit|functional|s3api)/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
- swift-probetests-centos-8-stream:
irrelevant-files: *probetest-irrelevant-files
@@ -722,6 +753,7 @@
- swift-tox-func-encryption-py38
- swift-tox-func-ec-py38
- swift-func-cors
+ - swift-tox-func-s3api-tests-tempauth
- swift-probetests-centos-7:
irrelevant-files: *probetest-irrelevant-files
- swift-probetests-centos-8-stream:
diff --git a/AUTHORS b/AUTHORS
index 3d2bc530a..f28613ad8 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -62,6 +62,7 @@ Anne Gentle (anne@openstack.org)
aolivo (aolivo@blizzard.com)
Arnaud JOST (arnaud.jost@ovh.net)
arzhna (arzhna@gmail.com)
+Ashwin Nair (nairashwin952013@gmail.com)
Atsushi Sakai (sakaia@jp.fujitsu.com)
Aymeric Ducroquetz (aymeric.ducroquetz@ovhcloud.com)
Azhagu Selvan SP (tamizhgeek@gmail.com)
@@ -226,6 +227,8 @@ Jesse Andrews (anotherjesse@gmail.com)
Ji-Wei (ji.wei3@zte.com.cn)
Jian Zhang (jian.zhang@intel.com)
Jiangmiao Gao (tolbkni@gmail.com)
+Jianjian Huo (jhuo@nvidia.com)
+jiaqi07 (wangjiaqi07@inspur.com)
Jing Liuqing (jing.liuqing@99cloud.net)
jinyuanliu (liujinyuan@inspur.com)
Joanna H. Huang (joanna.huitzu.huang@gmail.com)
@@ -331,6 +334,7 @@ Nicolas Helgeson (nh202b@att.com)
Nicolas Trangez (ikke@nicolast.be)
Ning Zhang (ning@zmanda.com)
Nirmal Thacker (nirmalthacker@gmail.com)
+niuke (niuke19970315@163.com)
npraveen35 (npraveen35@gmail.com)
Olga Saprycheva (osapryc@us.ibm.com)
Ondrej Novy (ondrej.novy@firma.seznam.cz)
@@ -363,7 +367,7 @@ Richard Hawkins (richard.hawkins@rackspace.com)
ricolin (ricolin@ricolky.com)
Robert Francis (robefran@ca.ibm.com)
Robin Naundorf (r.naundorf@fh-muenster.de)
-Romain de Joux (romain.de-joux@corp.ovh.com)
+Romain de Joux (romain.de-joux@ovhcloud.com)
Russ Nelson (russ@crynwr.com)
Russell Bryant (rbryant@redhat.com)
Sachin Patil (psachin@redhat.com)
@@ -398,6 +402,7 @@ Takashi Kajinami (tkajinam@redhat.com)
Takashi Natsume (natsume.takashi@lab.ntt.co.jp)
TheSriram (sriram@klusterkloud.com)
Thiago da Silva (thiagodasilva@gmail.com)
+Thibault Person (thibault.person@ovhcloud.com)
Thierry Carrez (thierry@openstack.org)
Thomas Goirand (thomas@goirand.fr)
Thomas Herve (therve@redhat.com)
diff --git a/CHANGELOG b/CHANGELOG
index 6aa4084da..08bed3d35 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,140 @@
+swift (2.30.0)
+
+ * Sharding improvements
+
+ * The `swift-manage-shard-ranges` tool has a new mode to repair gaps
+ in the namespace.
+
+ * Misplaced tombstone records are now properly cleaved.
+
+ * Fixed a bug where the sharder could fail to find a device to use for
+ cleaving.
+
+ * Databases marked deleted are now processed by the sharder.
+
+ * More information is now synced to the fresh database when sharding.
+ Previously, a database could lose the fact that it had been marked
+ as deleted.
+
+ * Shard ranges with no rows to cleave could previously be left in the
+ CREATED state after cleaving. Now, they are advanced to CLEAVED.
+
+ * Metrics are now emitted for whether databases used for cleaving
+ were created or already existed, allowing a better understanding
+ of the reason for handoffs in the cluster.
+
+ * Misplaced-record stats are now also emitted to statsd. Previously,
+ these were only available in logs.
+
+ * S3 API improvements
+
+ * Constant-time string comparisons are now used when checking signatures.
+
+ * Fixed cross-policy object copies. Previously, copied data would
+ always be written using the source container's policy. Now, the
+ destination container's policy will be used, avoiding availability
+ issues and unnecessary container-reconciler work.
+
+ * More headers are now copied from multi-part upload markers to their
+ completed objects, including `Content-Encoding`.
+
+ * When running with `s3_acl` disabled, `bucket-owner-full-control` and
+ `bucket-owner-read` canned ACLs will be translated to the same Swift
+ ACLs as `private`.
+
+ * The S3 ACL and Delete Multiple APIs are now less case-sensitive.
+
+ * Improved the error message when deleting a bucket that's ever had
+ versioning enabled and still has versions in it.
+
+ * `LastModified` timestamps in listings are now rounded up to whole
+ seconds, like they are in responses from AWS.
+
+ * Proxy logging for Complete Multipart Upload requests is now more
+ consistent when requests have been retried.
+
+ * Logging improvements
+
+ * Signal handling is more consistently logged at notice level.
+ Previously, signal handling would sometimes be logged at info
+ or error levels.
+
+ * The message template for proxy logging may now include a
+ `{domain}` field for the client-provided `Host` header.
+
+ * The object-replicator now logs successful rsync transfers at debug
+ instead of info.
+
+ * Added a `log_rsync_transfers` option to the object-replicator.
+ Set it to false to disable logging rsync "send" lines; during
+ large rebalances, such logging can overwhelm log aggregation
+ while providing little useful information.
+
+ * Transaction IDs are now only included in daemon log lines
+ in a request/response context.
+
+ * Fixed a socket leak when clients try to delete a non-SLO as though
+ it were a Static Large Object.
+
+ * The formpost digest algorithm is now configurable via the new
+ `allowed_digests` option, and support is added for both SHA-256
+ and SHA-512. Supported formpost digests are exposed to clients in
+ `/info`. Additionally, formpost signatures can now be base64 encoded.
+
+ * Added metrics to the formpost and tempurl middlewares to monitor
+ digest usage in signatures.
+
+ * SHA-1 signatures are now deprecated for the formpost and tempurl
+ middlewares. At some point in the future, SHA-1 will no longer be
+ enabled by default; eventually, support for it will be removed
+ entirely.
+
+ * Improved compatibility with certain FIPS-mode-enabled systems.
+
+ * Added a `ring_ip` option for various object services. This may be
+ used to find own devices in the ring in a containerized environment
+ where the `bind_ip` may not appear in the ring at all.
+
+ * Account and container replicators can now be configured with a
+ `handoff_delete` option, similar to object replicators and
+ reconstructors. See the sample config for more information.
+
+ * Developers using Swift's memcache client may now opt in to having
+ a `MemcacheConnectionError` be raised when no connection succeeded
+ using a new `raise_on_error` keyword argument to `get`/`set`.
+
+ * The tempurl middleware has been updated to return a 503 if storing a
+ token in memcache fails. Third party authentication middlewares are
+ encouraged to also use the new `raise_on_error` keyword argument
+ when storing ephemeral tokens in memcache.
+
+ * Pickle support has been removed from Swift's memcache client. Support
+ had been deprecated since Swift 1.7.0.
+
+ * Device names are now included in new database IDs. This provides more
+ context when examining incoming/outgoing sync tables or sharding
+ CleaveContexts.
+
+ * Database replication connections are now closed following an error
+ or timeout. This prevents a traceback in some cases when the replicator
+ tries to reuse the connection.
+
+ * `ENOENT` and `ENODATA` errors are better handled in the object
+ replicator and auditor.
+
+ * Improved object update throughput by shifting some shard range
+ filtering from Python to SQL.
+
+ * Include `Vary: Origin` header when CORS responses vary by origin.
+
+ * The staticweb middleware now allows empty listings at the root of
+ a container. Previously, this would result in a 404 response.
+
+ * Ring builder output tables better display weights over 1000.
+
+ * Various other minor bug fixes and improvements.
+
+
swift (2.29.1, OpenStack Yoga)
* This is the final stable branch that will support Python 2.7.
diff --git a/Dockerfile b/Dockerfile
index 3584802c4..4eb1c8e2a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,10 +1,10 @@
################################################
#
-# Alpine 3.10.1 Swift-All-In-One
+# Alpine 3.15.6 Swift-All-In-One
#
################################################
-FROM alpine:3.10.1
+FROM alpine:3.15.6
MAINTAINER Openstack Swift
ENV S6_LOGGING 1
diff --git a/Dockerfile-py3 b/Dockerfile-py3
index fd4185365..f4a652ba4 100644
--- a/Dockerfile-py3
+++ b/Dockerfile-py3
@@ -1,10 +1,10 @@
################################################
#
-# Alpine 3.10.1 Swift-All-In-One
+# Alpine 3.16.2 Swift-All-In-One
#
################################################
-FROM alpine:3.10.1
+FROM alpine:3.16.2
MAINTAINER Openstack Swift
ENV S6_LOGGING 1
diff --git a/doc/s3api/conf/ceph-known-failures-tempauth.yaml b/doc/s3api/conf/ceph-known-failures-tempauth.yaml
index d2922dc85..a30bf29c1 100644
--- a/doc/s3api/conf/ceph-known-failures-tempauth.yaml
+++ b/doc/s3api/conf/ceph-known-failures-tempauth.yaml
@@ -72,7 +72,6 @@ ceph_s3:
s3tests_boto3.functional.test_s3.test_bucket_head_extended: {status: KNOWN}
s3tests_boto3.functional.test_s3.test_bucket_list_objects_anonymous: {status: KNOWN}
s3tests_boto3.functional.test_s3.test_bucket_list_objects_anonymous_fail: {status: KNOWN}
- s3tests_boto3.functional.test_s3.test_bucket_list_return_data: {status: KNOWN}
s3tests_boto3.functional.test_s3.test_bucket_list_return_data_versioning: {status: KNOWN}
s3tests_boto3.functional.test_s3.test_bucket_list_unordered: {status: KNOWN}
s3tests_boto3.functional.test_s3.test_bucket_listv2_objects_anonymous: {status: KNOWN}
diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst
index c3f4a1078..d8175e25c 100644
--- a/doc/source/admin_guide.rst
+++ b/doc/source/admin_guide.rst
@@ -324,14 +324,14 @@ In order to prevent rsync replication to specific drives, firstly
setup ``rsync_module`` per disk in your ``object-replicator``.
Set this in ``object-server.conf``:
-.. code::
+.. code:: cfg
[object-replicator]
rsync_module = {replication_ip}::object_{device}
Set the individual drives in ``rsync.conf``. For example:
-.. code::
+.. code:: cfg
[object_sda]
max connections = 4
@@ -387,7 +387,7 @@ monitoring solution to achieve this. The following is an example script:
For the above script to work, ensure ``/etc/rsync.d/`` conf files are
included, by specifying ``&include`` in your ``rsync.conf`` file:
-.. code::
+.. code:: cfg
&include /etc/rsync.d
@@ -395,7 +395,7 @@ Use this in conjunction with a cron job to periodically run the script, for exam
.. highlight:: none
-.. code::
+.. code:: cfg
# /etc/cron.d/devicecheck
* * * * * root /some/path/to/disable_rsync.py
diff --git a/doc/source/api/discoverability.rst b/doc/source/api/discoverability.rst
index 56912ca65..c086d16f7 100644
--- a/doc/source/api/discoverability.rst
+++ b/doc/source/api/discoverability.rst
@@ -12,13 +12,13 @@ that does not support the ``/info`` request.
To use the ``/info`` request, send a **GET** request using the ``/info``
path to the Object Store endpoint as shown in this example:
-.. code::
+.. code:: console
# curl https://storage.clouddrive.com/info
This example shows a truncated response body:
-.. code::
+.. code:: console
{
"swift":{
diff --git a/doc/source/api/form_post_middleware.rst b/doc/source/api/form_post_middleware.rst
index 891607a32..97921d41e 100644
--- a/doc/source/api/form_post_middleware.rst
+++ b/doc/source/api/form_post_middleware.rst
@@ -29,9 +29,8 @@ The format of the form **POST** request is:
**Example 1.14. Form POST format**
-.. code::
+.. code:: xml
- <![CDATA[
<form action="SWIFT_URL"
method="POST"
enctype="multipart/form-data">
@@ -44,7 +43,6 @@ The format of the form **POST** request is:
<br/>
<input type="submit"/>
</form>
- ]]>
**action="SWIFT_URL"**
@@ -53,14 +51,14 @@ Set to full URL where the objects are to be uploaded. The names of
uploaded files are appended to the specified *SWIFT_URL*. So, you
can upload directly to the root of a container with a URL like:
-.. code::
+.. code:: none
https://swift-cluster.example.com/v1/my_account/container/
Optionally, you can include an object prefix to separate uploads, such
as:
-.. code::
+.. code:: none
https://swift-cluster.example.com/v1/my_account/container/OBJECT_PREFIX
@@ -123,7 +121,7 @@ follow the file attributes are ignored.
Optionally, if you want the uploaded files to be temporary you can set x-delete-at or x-delete-after attributes by adding one of these as a form input:
-.. code::
+.. code:: xml
<input type="hidden" name="x_delete_at" value="<unix-timestamp>" />
<input type="hidden" name="x_delete_after" value="<seconds>" />
@@ -169,7 +167,7 @@ The following example code generates a signature for use with form
**Example 1.15. HMAC-SHA1 signature for form POST**
-.. code::
+.. code:: python
import hmac
from hashlib import sha1
@@ -198,13 +196,13 @@ being uploaded is called ``flower.jpg``.
This example uses the **swift-form-signature** script to compute the
``expires`` and ``signature`` values.
-.. code::
+.. code:: console
$ bin/swift-form-signature /v1/my_account/container/photos/ https://example.com/done.html 5373952000 1 200 MYKEY
Expires: 1390825338
Signature: 35129416ebda2f1a21b3c2b8939850dfc63d8f43
-.. code::
+.. code:: console
$ curl -i https://swift-cluster.example.com/v1/my_account/container/photos/ -X POST \
-F max_file_size=5373952000 -F max_file_count=1 -F expires=1390825338 \
diff --git a/doc/source/api/large_objects.rst b/doc/source/api/large_objects.rst
index e417e7467..f05f72ebd 100644
--- a/doc/source/api/large_objects.rst
+++ b/doc/source/api/large_objects.rst
@@ -82,7 +82,7 @@ This example shows three segment objects. You can use several containers
and the object names do not have to conform to a specific pattern, in
contrast to dynamic large objects.
-.. code::
+.. code:: json
[
{
@@ -192,7 +192,7 @@ manifest is still available to download the first set of segments.
**Example Upload segment of large object request: HTTP**
-.. code::
+.. code:: none
PUT /{api_version}/{account}/{container}/{object} HTTP/1.1
Host: storage.clouddrive.com
@@ -214,7 +214,7 @@ uploading the manifest.
**Example Upload next segment of large object request: HTTP**
-.. code::
+.. code:: none
PUT /{api_version}/{account}/{container}/{object} HTTP/1.1
Host: storage.clouddrive.com
@@ -232,7 +232,7 @@ subsequent additional segments.
**Example Upload manifest request: HTTP**
-.. code::
+.. code:: none
PUT /{api_version}/{account}/{container}/{object} HTTP/1.1
Host: storage.clouddrive.com
@@ -244,7 +244,7 @@ subsequent additional segments.
**Example Upload manifest response: HTTP**
-.. code::
+.. code:: none
[...]
diff --git a/doc/source/api/object_api_v1_overview.rst b/doc/source/api/object_api_v1_overview.rst
index c44fcc9b2..37fa28e40 100644
--- a/doc/source/api/object_api_v1_overview.rst
+++ b/doc/source/api/object_api_v1_overview.rst
@@ -97,14 +97,14 @@ interact with the Object Storage API.
Specifically, the resource path reflects this structure and has this
format:
-.. code::
+.. code:: none
/v1/{account}/{container}/{object}
For example, for the ``flowers/rose.jpg`` object in the ``images``
container in the ``12345678912345`` account, the resource path is:
-.. code::
+.. code:: none
/v1/12345678912345/images/flowers/rose.jpg
@@ -133,7 +133,7 @@ parameter ``reverse``, noting that your marker and end_markers should be
switched when applied to a reverse listing. I.e, for a list of objects
``[a, b, c, d, e]`` the non-reversed could be:
-.. code::
+.. code:: none
/v1/{account}/{container}/?marker=a&end_marker=d
b
@@ -141,7 +141,7 @@ switched when applied to a reverse listing. I.e, for a list of objects
However, when reversed marker and end_marker are applied to a reversed list:
-.. code::
+.. code:: none
/v1/{account}/{container}/?marker=d&end_marker=a&reverse=on
c
diff --git a/doc/source/api/object_versioning.rst b/doc/source/api/object_versioning.rst
index a676b7331..b3438a6e4 100644
--- a/doc/source/api/object_versioning.rst
+++ b/doc/source/api/object_versioning.rst
@@ -46,37 +46,37 @@ container, overwriting the current version.
Example Using ``X-Versions-Location``
-------------------------------------
-#. Create the ``current`` container:
+#. Create the ``current`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: archive"
+ # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: archive"
- .. code::
+ .. code:: console
- HTTP/1.1 201 Created
- Content-Length: 0
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: txb91810fb717347d09eec8-0052e18997
- X-Openstack-Request-Id: txb91810fb717347d09eec8-0052e18997
- Date: Thu, 23 Jan 2014 21:28:55 GMT
+ HTTP/1.1 201 Created
+ Content-Length: 0
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: txb91810fb717347d09eec8-0052e18997
+ X-Openstack-Request-Id: txb91810fb717347d09eec8-0052e18997
+ Date: Thu, 23 Jan 2014 21:28:55 GMT
#. Create the first version of an object in the ``current`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/current/my_object --data-binary 1 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token"
+ # curl -i $publicURL/current/my_object --data-binary 1 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 201 Created
- Last-Modified: Thu, 23 Jan 2014 21:31:22 GMT
- Content-Length: 0
- Etag: d41d8cd98f00b204e9800998ecf8427e
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: tx5992d536a4bd4fec973aa-0052e18a2a
- X-Openstack-Request-Id: tx5992d536a4bd4fec973aa-0052e18a2a
- Date: Thu, 23 Jan 2014 21:31:22 GMT
+ HTTP/1.1 201 Created
+ Last-Modified: Thu, 23 Jan 2014 21:31:22 GMT
+ Content-Length: 0
+ Etag: d41d8cd98f00b204e9800998ecf8427e
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: tx5992d536a4bd4fec973aa-0052e18a2a
+ X-Openstack-Request-Id: tx5992d536a4bd4fec973aa-0052e18a2a
+ Date: Thu, 23 Jan 2014 21:31:22 GMT
Nothing is written to the non-current version container when you
initially **PUT** an object in the ``current`` container. However,
@@ -85,9 +85,9 @@ Example Using ``X-Versions-Location``
These non-current versions are named as follows:
- .. code::
+ .. code:: none
- <length><object_name>/<timestamp>
+ <length><object_name>/<timestamp>
Where ``length`` is the 3-character, zero-padded hexadecimal
character length of the object, ``<object_name>`` is the object name,
@@ -96,20 +96,20 @@ Example Using ``X-Versions-Location``
#. Create a second version of the object in the ``current`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/current/my_object --data-binary 2 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token"
+ # curl -i $publicURL/current/my_object --data-binary 2 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 201 Created
- Last-Modified: Thu, 23 Jan 2014 21:41:32 GMT
- Content-Length: 0
- Etag: d41d8cd98f00b204e9800998ecf8427e
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: tx468287ce4fc94eada96ec-0052e18c8c
- X-Openstack-Request-Id: tx468287ce4fc94eada96ec-0052e18c8c
- Date: Thu, 23 Jan 2014 21:41:32 GMT
+ HTTP/1.1 201 Created
+ Last-Modified: Thu, 23 Jan 2014 21:41:32 GMT
+ Content-Length: 0
+ Etag: d41d8cd98f00b204e9800998ecf8427e
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: tx468287ce4fc94eada96ec-0052e18c8c
+ X-Openstack-Request-Id: tx468287ce4fc94eada96ec-0052e18c8c
+ Date: Thu, 23 Jan 2014 21:41:32 GMT
#. Issue a **GET** request to a versioned object to get the current
version of the object. You do not have to do any request redirects or
@@ -117,24 +117,24 @@ Example Using ``X-Versions-Location``
List older versions of the object in the ``archive`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token"
+ # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 200 OK
- Content-Length: 30
- X-Container-Object-Count: 1
- Accept-Ranges: bytes
- X-Timestamp: 1390513280.79684
- X-Container-Bytes-Used: 0
- Content-Type: text/plain; charset=utf-8
- X-Trans-Id: tx9a441884997542d3a5868-0052e18d8e
- X-Openstack-Request-Id: tx9a441884997542d3a5868-0052e18d8e
- Date: Thu, 23 Jan 2014 21:45:50 GMT
+ HTTP/1.1 200 OK
+ Content-Length: 30
+ X-Container-Object-Count: 1
+ Accept-Ranges: bytes
+ X-Timestamp: 1390513280.79684
+ X-Container-Bytes-Used: 0
+ Content-Type: text/plain; charset=utf-8
+ X-Trans-Id: tx9a441884997542d3a5868-0052e18d8e
+ X-Openstack-Request-Id: tx9a441884997542d3a5868-0052e18d8e
+ Date: Thu, 23 Jan 2014 21:45:50 GMT
- 009my_object/1390512682.92052
+ 009my_object/1390512682.92052
.. note::
A **POST** request to a versioned object updates only the metadata
@@ -145,38 +145,38 @@ Example Using ``X-Versions-Location``
current version of the object and replace it with the next-most
current version in the non-current container.
- .. code::
+ .. code:: console
- # curl -i $publicURL/current/my_object -X DELETE -H "X-Auth-Token: $token"
+ # curl -i $publicURL/current/my_object -X DELETE -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 204 No Content
- Content-Length: 0
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: tx006d944e02494e229b8ee-0052e18edd
- X-Openstack-Request-Id: tx006d944e02494e229b8ee-0052e18edd
- Date: Thu, 23 Jan 2014 21:51:25 GMT
+ HTTP/1.1 204 No Content
+ Content-Length: 0
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: tx006d944e02494e229b8ee-0052e18edd
+ X-Openstack-Request-Id: tx006d944e02494e229b8ee-0052e18edd
+ Date: Thu, 23 Jan 2014 21:51:25 GMT
List objects in the ``archive`` container to show that the archived
object was moved back to the ``current`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token"
+ # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 204 No Content
- Content-Length: 0
- X-Container-Object-Count: 0
- Accept-Ranges: bytes
- X-Timestamp: 1390513280.79684
- X-Container-Bytes-Used: 0
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: tx044f2a05f56f4997af737-0052e18eed
- X-Openstack-Request-Id: tx044f2a05f56f4997af737-0052e18eed
- Date: Thu, 23 Jan 2014 21:51:41 GMT
+ HTTP/1.1 204 No Content
+ Content-Length: 0
+ X-Container-Object-Count: 0
+ Accept-Ranges: bytes
+ X-Timestamp: 1390513280.79684
+ X-Container-Bytes-Used: 0
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: tx044f2a05f56f4997af737-0052e18eed
+ X-Openstack-Request-Id: tx044f2a05f56f4997af737-0052e18eed
+ Date: Thu, 23 Jan 2014 21:51:41 GMT
This next-most current version carries with it any metadata last set
on it. If want to completely remove an object and you have five
@@ -185,37 +185,37 @@ Example Using ``X-Versions-Location``
Example Using ``X-History-Location``
------------------------------------
-#. Create the ``current`` container:
+#. Create the ``current`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-History-Location: archive"
+ # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-History-Location: archive"
- .. code::
+ .. code:: console
- HTTP/1.1 201 Created
- Content-Length: 0
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: txb91810fb717347d09eec8-0052e18997
- X-Openstack-Request-Id: txb91810fb717347d09eec8-0052e18997
- Date: Thu, 23 Jan 2014 21:28:55 GMT
+ HTTP/1.1 201 Created
+ Content-Length: 0
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: txb91810fb717347d09eec8-0052e18997
+ X-Openstack-Request-Id: txb91810fb717347d09eec8-0052e18997
+ Date: Thu, 23 Jan 2014 21:28:55 GMT
#. Create the first version of an object in the ``current`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/current/my_object --data-binary 1 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token"
+ # curl -i $publicURL/current/my_object --data-binary 1 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 201 Created
- Last-Modified: Thu, 23 Jan 2014 21:31:22 GMT
- Content-Length: 0
- Etag: d41d8cd98f00b204e9800998ecf8427e
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: tx5992d536a4bd4fec973aa-0052e18a2a
- X-Openstack-Request-Id: tx5992d536a4bd4fec973aa-0052e18a2a
- Date: Thu, 23 Jan 2014 21:31:22 GMT
+ HTTP/1.1 201 Created
+ Last-Modified: Thu, 23 Jan 2014 21:31:22 GMT
+ Content-Length: 0
+ Etag: d41d8cd98f00b204e9800998ecf8427e
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: tx5992d536a4bd4fec973aa-0052e18a2a
+ X-Openstack-Request-Id: tx5992d536a4bd4fec973aa-0052e18a2a
+ Date: Thu, 23 Jan 2014 21:31:22 GMT
Nothing is written to the non-current version container when you
initially **PUT** an object in the ``current`` container. However,
@@ -224,9 +224,9 @@ Example Using ``X-History-Location``
These non-current versions are named as follows:
- .. code::
+ .. code:: none
- <length><object_name>/<timestamp>
+ <length><object_name>/<timestamp>
Where ``length`` is the 3-character, zero-padded hexadecimal
character length of the object, ``<object_name>`` is the object name,
@@ -235,20 +235,20 @@ Example Using ``X-History-Location``
#. Create a second version of the object in the ``current`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/current/my_object --data-binary 2 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token"
+ # curl -i $publicURL/current/my_object --data-binary 2 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 201 Created
- Last-Modified: Thu, 23 Jan 2014 21:41:32 GMT
- Content-Length: 0
- Etag: d41d8cd98f00b204e9800998ecf8427e
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: tx468287ce4fc94eada96ec-0052e18c8c
- X-Openstack-Request-Id: tx468287ce4fc94eada96ec-0052e18c8c
- Date: Thu, 23 Jan 2014 21:41:32 GMT
+ HTTP/1.1 201 Created
+ Last-Modified: Thu, 23 Jan 2014 21:41:32 GMT
+ Content-Length: 0
+ Etag: d41d8cd98f00b204e9800998ecf8427e
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: tx468287ce4fc94eada96ec-0052e18c8c
+ X-Openstack-Request-Id: tx468287ce4fc94eada96ec-0052e18c8c
+ Date: Thu, 23 Jan 2014 21:41:32 GMT
#. Issue a **GET** request to a versioned object to get the current
version of the object. You do not have to do any request redirects or
@@ -256,24 +256,24 @@ Example Using ``X-History-Location``
List older versions of the object in the ``archive`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token"
+ # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 200 OK
- Content-Length: 30
- X-Container-Object-Count: 1
- Accept-Ranges: bytes
- X-Timestamp: 1390513280.79684
- X-Container-Bytes-Used: 0
- Content-Type: text/plain; charset=utf-8
- X-Trans-Id: tx9a441884997542d3a5868-0052e18d8e
- X-Openstack-Request-Id: tx9a441884997542d3a5868-0052e18d8e
- Date: Thu, 23 Jan 2014 21:45:50 GMT
+ HTTP/1.1 200 OK
+ Content-Length: 30
+ X-Container-Object-Count: 1
+ Accept-Ranges: bytes
+ X-Timestamp: 1390513280.79684
+ X-Container-Bytes-Used: 0
+ Content-Type: text/plain; charset=utf-8
+ X-Trans-Id: tx9a441884997542d3a5868-0052e18d8e
+ X-Openstack-Request-Id: tx9a441884997542d3a5868-0052e18d8e
+ Date: Thu, 23 Jan 2014 21:45:50 GMT
- 009my_object/1390512682.92052
+ 009my_object/1390512682.92052
.. note::
A **POST** request to a versioned object updates only the metadata
@@ -285,41 +285,41 @@ Example Using ``X-History-Location``
the current container. Subsequent **GET** requests to the object in the
current container will return ``404 Not Found``.
- .. code::
+ .. code:: console
- # curl -i $publicURL/current/my_object -X DELETE -H "X-Auth-Token: $token"
+ # curl -i $publicURL/current/my_object -X DELETE -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 204 No Content
- Content-Length: 0
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: tx006d944e02494e229b8ee-0052e18edd
- X-Openstack-Request-Id: tx006d944e02494e229b8ee-0052e18edd
- Date: Thu, 23 Jan 2014 21:51:25 GMT
+ HTTP/1.1 204 No Content
+ Content-Length: 0
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: tx006d944e02494e229b8ee-0052e18edd
+ X-Openstack-Request-Id: tx006d944e02494e229b8ee-0052e18edd
+ Date: Thu, 23 Jan 2014 21:51:25 GMT
- List older versions of the object in the ``archive`` container::
+ List older versions of the object in the ``archive`` container:
- .. code::
+ .. code:: console
- # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token"
+ # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token"
- .. code::
+ .. code:: console
- HTTP/1.1 200 OK
- Content-Length: 90
- X-Container-Object-Count: 3
- Accept-Ranges: bytes
- X-Timestamp: 1390513280.79684
- X-Container-Bytes-Used: 0
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: tx044f2a05f56f4997af737-0052e18eed
- X-Openstack-Request-Id: tx044f2a05f56f4997af737-0052e18eed
- Date: Thu, 23 Jan 2014 21:51:41 GMT
+ HTTP/1.1 200 OK
+ Content-Length: 90
+ X-Container-Object-Count: 3
+ Accept-Ranges: bytes
+ X-Timestamp: 1390513280.79684
+ X-Container-Bytes-Used: 0
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: tx044f2a05f56f4997af737-0052e18eed
+ X-Openstack-Request-Id: tx044f2a05f56f4997af737-0052e18eed
+ Date: Thu, 23 Jan 2014 21:51:41 GMT
- 009my_object/1390512682.92052
- 009my_object/1390512692.23062
- 009my_object/1390513885.67732
+ 009my_object/1390512682.92052
+ 009my_object/1390512692.23062
+ 009my_object/1390513885.67732
In addition to the two previous versions of the object, the archive
container has a "delete marker" to record when the object was deleted.
@@ -334,18 +334,18 @@ To disable object versioning for the ``current`` container, remove
its ``X-Versions-Location`` metadata header by sending an empty key
value.
-.. code::
+.. code:: console
- # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: "
+ # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: "
-.. code::
+.. code:: console
- HTTP/1.1 202 Accepted
- Content-Length: 76
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: txe2476de217134549996d0-0052e19038
- X-Openstack-Request-Id: txe2476de217134549996d0-0052e19038
- Date: Thu, 23 Jan 2014 21:57:12 GMT
+ HTTP/1.1 202 Accepted
+ Content-Length: 76
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: txe2476de217134549996d0-0052e19038
+ X-Openstack-Request-Id: txe2476de217134549996d0-0052e19038
+ Date: Thu, 23 Jan 2014 21:57:12 GMT
- <html><h1>Accepted</h1><p>The request is accepted for processing.</p></html>
+ <html><h1>Accepted</h1><p>The request is accepted for processing.</p></html>
diff --git a/doc/source/api/pseudo-hierarchical-folders-directories.rst b/doc/source/api/pseudo-hierarchical-folders-directories.rst
index a46acd576..c7e764f2d 100644
--- a/doc/source/api/pseudo-hierarchical-folders-directories.rst
+++ b/doc/source/api/pseudo-hierarchical-folders-directories.rst
@@ -34,14 +34,14 @@ the requested list of the objects.
.. code-block:: console
- photos/animals/cats/persian.jpg
- photos/animals/cats/siamese.jpg
- photos/animals/dogs/corgi.jpg
- photos/animals/dogs/poodle.jpg
- photos/animals/dogs/terrier.jpg
- photos/me.jpg
- photos/plants/fern.jpg
- photos/plants/rose.jpg
+ photos/animals/cats/persian.jpg
+ photos/animals/cats/siamese.jpg
+ photos/animals/dogs/corgi.jpg
+ photos/animals/dogs/poodle.jpg
+ photos/animals/dogs/terrier.jpg
+ photos/me.jpg
+ photos/plants/fern.jpg
+ photos/plants/rose.jpg
Use the delimiter parameter to limit the displayed results. To use
``delimiter`` with pseudo-directories, you must use the parameter slash
@@ -63,20 +63,20 @@ For example:
.. code-block:: JSON
- [
- {
- "subdir": "photos/"
- }
- ]
+ [
+ {
+ "subdir": "photos/"
+ }
+ ]
.. code-block:: XML
- <?xml version="1.0" encoding="UTF-8"?>
- <container name="backups">
- <subdir name="photos/">
- <name>photos/</name>
- </subdir>
- </container>
+ <?xml version="1.0" encoding="UTF-8"?>
+ <container name="backups">
+ <subdir name="photos/">
+ <name>photos/</name>
+ </subdir>
+ </container>
Use the ``prefix`` and ``delimiter`` parameters to view the objects
inside a pseudo-directory, including further nested pseudo-directories.
@@ -92,46 +92,46 @@ pseudo-directory.
.. code-block:: console
- photos/animals/
- photos/me.jpg
- photos/plants/
+ photos/animals/
+ photos/me.jpg
+ photos/plants/
.. code-block:: JSON
- [
- {
- "subdir": "photos/animals/"
- },
- {
- "hash": "b249a153f8f38b51e92916bbc6ea57ad",
- "last_modified": "2015-12-03T17:31:28.187370",
- "bytes": 2906,
- "name": "photos/me.jpg",
- "content_type": "image/jpeg"
- },
- {
- "subdir": "photos/plants/"
- }
- ]
+ [
+ {
+ "subdir": "photos/animals/"
+ },
+ {
+ "hash": "b249a153f8f38b51e92916bbc6ea57ad",
+ "last_modified": "2015-12-03T17:31:28.187370",
+ "bytes": 2906,
+ "name": "photos/me.jpg",
+ "content_type": "image/jpeg"
+ },
+ {
+ "subdir": "photos/plants/"
+ }
+ ]
.. code-block:: XML
- <?xml version="1.0" encoding="UTF-8"?>
- <container name="backups">
- <subdir name="photos/animals/">
- <name>photos/animals/</name>
- </subdir>
- <object>
- <name>photos/me.jpg</name>
- <hash>b249a153f8f38b51e92916bbc6ea57ad</hash>
- <bytes>2906</bytes>
- <content_type>image/jpeg</content_type>
- <last_modified>2015-12-03T17:31:28.187370</last_modified>
- </object>
- <subdir name="photos/plants/">
- <name>photos/plants/</name>
- </subdir>
- </container>
+ <?xml version="1.0" encoding="UTF-8"?>
+ <container name="backups">
+ <subdir name="photos/animals/">
+ <name>photos/animals/</name>
+ </subdir>
+ <object>
+ <name>photos/me.jpg</name>
+ <hash>b249a153f8f38b51e92916bbc6ea57ad</hash>
+ <bytes>2906</bytes>
+ <content_type>image/jpeg</content_type>
+ <last_modified>2015-12-03T17:31:28.187370</last_modified>
+ </object>
+ <subdir name="photos/plants/">
+ <name>photos/plants/</name>
+ </subdir>
+ </container>
You can create an unlimited number of nested pseudo-directories. To
navigate through them, use a longer ``prefix`` parameter coupled with
@@ -150,6 +150,6 @@ the objects and pseudo-directories within the nested pseudo-directory.
.. code-block:: console
- photos/animals/dogs/corgi.jpg
- photos/animals/dogs/poodle.jpg
- photos/animals/dogs/terrier.jpg
+ photos/animals/dogs/corgi.jpg
+ photos/animals/dogs/poodle.jpg
+ photos/animals/dogs/terrier.jpg
diff --git a/doc/source/api/serialized-response-formats.rst b/doc/source/api/serialized-response-formats.rst
index b0bc7d728..8e60c7fcf 100644
--- a/doc/source/api/serialized-response-formats.rst
+++ b/doc/source/api/serialized-response-formats.rst
@@ -56,18 +56,18 @@ format:
.. code-block:: json
- [
- {
- "count":0,
- "bytes":0,
- "name":"janeausten"
- },
- {
- "count":1,
- "bytes":14,
- "name":"marktwain"
- }
- ]
+ [
+ {
+ "count":0,
+ "bytes":0,
+ "name":"janeausten"
+ },
+ {
+ "count":1,
+ "bytes":14,
+ "name":"marktwain"
+ }
+ ]
Example 2. XML example with Accept header
@@ -100,19 +100,19 @@ format:
.. code-block:: xml
- <?xml version="1.0" encoding="UTF-8"?>
- <account name="AUTH_73f0aa26640f4971864919d0eb0f0880">
- <container>
- <name>janeausten</name>
- <count>2</count>
- <bytes>33</bytes>
- </container>
- <container>
- <name>marktwain</name>
- <count>1</count>
- <bytes>14</bytes>
- </container>
- </account>
+ <?xml version="1.0" encoding="UTF-8"?>
+ <account name="AUTH_73f0aa26640f4971864919d0eb0f0880">
+ <container>
+ <name>janeausten</name>
+ <count>2</count>
+ <bytes>33</bytes>
+ </container>
+ <container>
+ <name>marktwain</name>
+ <count>1</count>
+ <bytes>14</bytes>
+ </container>
+ </account>
The remainder of the examples in this guide use standard, non-serialized
responses. However, all ``GET`` requests that perform list operations
diff --git a/doc/source/api/temporary_url_middleware.rst b/doc/source/api/temporary_url_middleware.rst
index 4e6216115..767b4722a 100644
--- a/doc/source/api/temporary_url_middleware.rst
+++ b/doc/source/api/temporary_url_middleware.rst
@@ -21,11 +21,10 @@ a common prefix. They are useful for sharing a set of objects.
Ask your cloud administrator to enable the temporary URL feature. For
information, see :ref:`tempurl` in the *Source Documentation*.
-Note
-~~~~
+.. note::
-To use **POST** requests to upload objects to specific Object Storage
-locations, use :doc:`form_post_middleware` instead of temporary URL middleware.
+ To use **POST** requests to upload objects to specific Object Storage
+ locations, use :doc:`form_post_middleware` instead of temporary URL middleware.
Temporary URL format
~~~~~~~~~~~~~~~~~~~~
@@ -35,21 +34,23 @@ parameters:
**Example Temporary URL format**
-.. code::
+.. code:: none
- https://swift-cluster.example.com/v1/my_account/container/object
- ?temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709
- &temp_url_expires=1323479485
- &filename=My+Test+File.pdf
+ https://swift-cluster.example.com/v1/my_account/container/object
+ ?temp_url_sig=732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b
+ &temp_url_expires=1323479485
+ &filename=My+Test+File.pdf
The example shows these elements:
**Object URL**: Required. The full path URL to the object.
-**temp\_url\_sig**: Required. An HMAC-SHA1 cryptographic signature that defines
+**temp\_url\_sig**: Required. An HMAC cryptographic signature that defines
the allowed HTTP method, expiration date, full path to the object, and the
-secret key for the temporary URL.
+secret key for the temporary URL. The digest used (for example, SHA-256 or
+SHA-512) must be supported by the cluster; supported digests will be listed
+in the ``tempurl.allowed_digests`` key in the cluster's capabilities.
**temp\_url\_expires**: Required. An expiration date as a UNIX Epoch timestamp
or ISO 8601 UTC timestamp. For example, ``1390852007`` or
@@ -69,12 +70,12 @@ A prefix-based temporary URL is similar but requires the parameter
``temp_url_prefix``, which must be equal to the common prefix shared
by all object names for which the URL is valid.
-.. code::
+.. code:: none
- https://swift-cluster.example.com/v1/my_account/container/my_prefix/object
- ?temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709
- &temp_url_expires=2011-12-10T01:11:25Z
- &temp_url_prefix=my_prefix
+ https://swift-cluster.example.com/v1/my_account/container/my_prefix/object
+ ?temp_url_sig=732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b
+ &temp_url_expires=2011-12-10T01:11:25Z
+ &temp_url_prefix=my_prefix
.. _secret_keys:
@@ -107,25 +108,24 @@ The arbitrary values serve as the secret keys.
For example, use the **swift post** command to set the secret key to
*``MYKEY``*:
-.. code::
+.. code:: console
- $ swift post -m "Temp-URL-Key:MYKEY"
+ $ swift post -m "Temp-URL-Key:MYKEY"
-Note
-~~~~
+.. note::
-Changing these headers invalidates any previously generated temporary
-URLs within 60 seconds, which is the memcache time for the key.
+ Changing these headers invalidates any previously generated temporary
+ URLs within 60 seconds, which is the memcache time for the key.
-HMAC-SHA1 signature for temporary URLs
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+HMAC signature for temporary URLs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Temporary URL middleware uses an HMAC-SHA1 cryptographic signature. This
+Temporary URL middleware uses an HMAC cryptographic signature. This
signature includes these elements:
- The allowed method. Typically, **GET** or **PUT**.
-- Expiry time. In the example for the HMAC-SHA1 signature for temporary
+- Expiry time. In the example for the HMAC-SHA256 signature for temporary
URLs below, the expiry time is set to ``86400`` seconds (or 1 day)
into the future. Please be aware that you have to use a UNIX timestamp
for generating the signature (in the API request it is also allowed to
@@ -141,37 +141,37 @@ signature includes these elements:
These sample Python codes show how to compute a signature for use with
temporary URLs:
-**Example HMAC-SHA1 signature for object-based temporary URLs**
-
-.. code::
-
- import hmac
- from hashlib import sha1
- from time import time
- method = 'GET'
- duration_in_seconds = 60*60*24
- expires = int(time() + duration_in_seconds)
- path = '/v1/my_account/container/object'
- key = 'MYKEY'
- hmac_body = '%s\n%s\n%s' % (method, expires, path)
- signature = hmac.new(key, hmac_body, sha1).hexdigest()
-
-**Example HMAC-SHA1 signature for prefix-based temporary URLs**
-
-.. code::
-
- import hmac
- from hashlib import sha1
- from time import time
- method = 'GET'
- duration_in_seconds = 60*60*24
- expires = int(time() + duration_in_seconds)
- path = 'prefix:/v1/my_account/container/my_prefix'
- key = 'MYKEY'
- hmac_body = '%s\n%s\n%s' % (method, expires, path)
- signature = hmac.new(key, hmac_body, sha1).hexdigest()
-
-Do not URL-encode the path when you generate the HMAC-SHA1 signature.
+**Example HMAC-SHA256 signature for object-based temporary URLs**
+
+.. code:: python
+
+ import hmac
+ from hashlib import sha256
+ from time import time
+ method = 'GET'
+ duration_in_seconds = 60*60*24
+ expires = int(time() + duration_in_seconds)
+ path = '/v1/my_account/container/object'
+ key = 'MYKEY'
+ hmac_body = '%s\n%s\n%s' % (method, expires, path)
+ signature = hmac.new(key, hmac_body, sha256).hexdigest()
+
+**Example HMAC-SHA512 signature for prefix-based temporary URLs**
+
+.. code:: python
+
+ import hmac
+ from hashlib import sha512
+ from time import time
+ method = 'GET'
+ duration_in_seconds = 60*60*24
+ expires = int(time() + duration_in_seconds)
+ path = 'prefix:/v1/my_account/container/my_prefix'
+ key = 'MYKEY'
+ hmac_body = '%s\n%s\n%s' % (method, expires, path)
+ signature = hmac.new(key, hmac_body, sha512).hexdigest()
+
+Do not URL-encode the path when you generate the HMAC signature.
However, when you make the actual HTTP request, you should properly
URL-encode the URL.
@@ -184,10 +184,10 @@ Authentication <http://www.ietf.org/rfc/rfc2104.txt>`__.
If you want to transform a UNIX timestamp into an ISO 8601 UTC timestamp,
you can use following code snippet:
-.. code::
+.. code:: python
- import time
- time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(timestamp))
+ import time
+ time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(timestamp))
Using the ``swift`` tool to generate a Temporary URL
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -196,27 +196,32 @@ The ``swift`` tool provides the tempurl_ option that
auto-generates the *``temp_url_sig``* and *``temp_url_expires``* query
parameters. For example, you might run this command:
-.. code::
+.. code:: console
- $ swift tempurl GET 3600 /v1/my_account/container/object MYKEY
+ $ swift tempurl GET 3600 /v1/my_account/container/object MYKEY
+
+.. note::
+
+ The ``swift`` tool is not yet updated and continues to use the
+ deprecated cipher SHA1.
This command returns the path:
-.. code::
+.. code:: none
- /v1/my_account/container/object
- ?temp_url_sig=5c4cc8886f36a9d0919d708ade98bf0cc71c9e91
- &temp_url_expires=1374497657
+ /v1/my_account/container/object
+ ?temp_url_sig=5c4cc8886f36a9d0919d708ade98bf0cc71c9e91
+ &temp_url_expires=1374497657
To create the temporary URL, prefix this path with the Object Storage
storage host name. For example, prefix the path with
``https://swift-cluster.example.com``, as follows:
-.. code::
+.. code:: none
- https://swift-cluster.example.com/v1/my_account/container/object
- ?temp_url_sig=5c4cc8886f36a9d0919d708ade98bf0cc71c9e91
- &temp_url_expires=1374497657
+ https://swift-cluster.example.com/v1/my_account/container/object
+ ?temp_url_sig=5c4cc8886f36a9d0919d708ade98bf0cc71c9e91
+ &temp_url_expires=1374497657
Note that if the above example is copied exactly, and used in a command
shell, then the ampersand is interpreted as an operator and the URL
diff --git a/doc/source/api/use_content-encoding_metadata.rst b/doc/source/api/use_content-encoding_metadata.rst
index 69b331472..18c94878e 100644
--- a/doc/source/api/use_content-encoding_metadata.rst
+++ b/doc/source/api/use_content-encoding_metadata.rst
@@ -12,11 +12,11 @@ underlying media type (``Content-Type``) of the file, such as a video.
This example assigns an attachment type to the ``Content-Encoding``
header that indicates how the file is downloaded:
-.. code::
+.. code:: none
- PUT /<api version>/<account>/<container>/<object> HTTP/1.1
- Host: storage.clouddrive.com
- X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb
- Content-Type: video/mp4
- Content-Encoding: gzip
+ PUT /<api version>/<account>/<container>/<object> HTTP/1.1
+ Host: storage.clouddrive.com
+ X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb
+ Content-Type: video/mp4
+ Content-Encoding: gzip
diff --git a/doc/source/api/use_the_content-disposition_metadata.rst b/doc/source/api/use_the_content-disposition_metadata.rst
index 8ee6287ff..fc6cf95fc 100644
--- a/doc/source/api/use_the_content-disposition_metadata.rst
+++ b/doc/source/api/use_the_content-disposition_metadata.rst
@@ -14,18 +14,18 @@ This example assigns an attachment type to the ``Content-Disposition``
header. This attachment type indicates that the file is to be downloaded
as ``goodbye.txt``:
-.. code::
+.. code:: console
- # curl -i $publicURL/marktwain/goodbye -X POST -H "X-Auth-Token: $token" -H "Content-Length: 14" -H "Content-Type: application/octet-stream" -H "Content-Disposition: attachment; filename=goodbye.txt"
+ # curl -i $publicURL/marktwain/goodbye -X POST -H "X-Auth-Token: $token" -H "Content-Length: 14" -H "Content-Type: application/octet-stream" -H "Content-Disposition: attachment; filename=goodbye.txt"
-.. code::
+.. code:: console
- HTTP/1.1 202 Accepted
- Content-Length: 76
- Content-Type: text/html; charset=UTF-8
- X-Trans-Id: txa9b5e57d7f354d7ea9f57-0052e17e13
- X-Openstack-Request-Id: txa9b5e57d7f354d7ea9f57-0052e17e13
- Date: Thu, 23 Jan 2014 20:39:47 GMT
+ HTTP/1.1 202 Accepted
+ Content-Length: 76
+ Content-Type: text/html; charset=UTF-8
+ X-Trans-Id: txa9b5e57d7f354d7ea9f57-0052e17e13
+ X-Openstack-Request-Id: txa9b5e57d7f354d7ea9f57-0052e17e13
+ Date: Thu, 23 Jan 2014 20:39:47 GMT
- <html><h1>Accepted</h1><p>The request is accepted for processing.</p></html>
+ <html><h1>Accepted</h1><p>The request is accepted for processing.</p></html>
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 2ca5d8477..c9cba7b4d 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -71,13 +71,13 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-project = u'Swift'
+project = 'Swift'
if 'SOURCE_DATE_EPOCH' in os.environ:
now = float(os.environ.get('SOURCE_DATE_EPOCH'))
now = datetime.datetime.utcfromtimestamp(now)
else:
now = datetime.date.today()
-copyright = u'%d, OpenStack Foundation' % now.year
+copyright = '%d, OpenStack Foundation' % now.year
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -209,8 +209,8 @@ htmlhelp_basename = 'swiftdoc'
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
- ('index', 'doc-swift.tex', u'Swift Documentation',
- u'Swift Team', 'manual'),
+ ('index', 'doc-swift.tex', 'Swift Documentation',
+ 'Swift Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
diff --git a/doc/source/config/account_server_config.rst b/doc/source/config/account_server_config.rst
index 241c3e9d6..f305c021b 100644
--- a/doc/source/config/account_server_config.rst
+++ b/doc/source/config/account_server_config.rst
@@ -170,9 +170,9 @@ ionice_priority None I/O scheduling priority of server
[account-replicator]
********************
-==================== ========================= ===============================
+==================== ========================= =====================================
Option Default Description
--------------------- ------------------------- -------------------------------
+-------------------- ------------------------- -------------------------------------
log_name account-replicator Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
@@ -256,7 +256,26 @@ ionice_priority None I/O scheduling priority of serve
Work only with ionice_class.
Ignored if IOPRIO_CLASS_IDLE
is set.
-==================== ========================= ===============================
+handoffs_only no When handoffs_only mode is enabled
+ the replicator will *only* replicate
+ from handoff nodes to primary nodes
+ and will not sync primary nodes
+ with other primary nodes.
+handoff_delete auto the number of replicas which are
+ ensured in swift. If the number
+ less than the number of replicas
+ is set, account-replicator
+ could delete local handoffs even
+ if all replicas are not ensured in
+ the cluster. The replicator would
+ remove local handoff account database
+ after syncing when the number of
+ successful responses is greater than
+ or equal to this number. By default
+ handoff partitions will be removed
+ when it has successfully replicated
+ to all the canonical nodes.
+==================== ========================= =====================================
*****************
[account-auditor]
diff --git a/doc/source/config/container_server_config.rst b/doc/source/config/container_server_config.rst
index 7961f50e8..6f7d6031a 100644
--- a/doc/source/config/container_server_config.rst
+++ b/doc/source/config/container_server_config.rst
@@ -175,9 +175,9 @@ ionice_priority None I/O scheduling priority of ser
[container-replicator]
**********************
-==================== =========================== =============================
+==================== =========================== =======================================
Option Default Description
--------------------- --------------------------- -----------------------------
+-------------------- --------------------------- ---------------------------------------
log_name container-replicator Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
@@ -266,7 +266,26 @@ ionice_priority None I/O scheduling priority of
Work only with ionice_class.
Ignored if IOPRIO_CLASS_IDLE
is set.
-==================== =========================== =============================
+handoffs_only no When handoffs_only mode is enabled
+ the replicator will *only* replicate
+ from handoff nodes to primary nodes
+ and will not sync primary nodes
+ with other primary nodes.
+handoff_delete auto the number of replicas which are
+ ensured in swift. If the number
+ less than the number of replicas
+ is set, container-replicator
+ could delete local handoffs even
+ if all replicas are not ensured in
+ the cluster. The replicator would
+ remove local handoff container database
+ after syncing when the number of
+ successful responses is greater than
+ or equal to this number. By default
+ handoff partitions will be removed
+ when it has successfully replicated
+ to all the canonical nodes.
+==================== =========================== =======================================
*******************
[container-sharder]
diff --git a/doc/source/cors.rst b/doc/source/cors.rst
index 4b60d68ac..91e1611b5 100644
--- a/doc/source/cors.rst
+++ b/doc/source/cors.rst
@@ -100,7 +100,7 @@ Test CORS Page
A sample cross-site test page is located in the project source tree
``doc/source/test-cors.html``.
- .. literalinclude:: test-cors.html
+.. literalinclude:: test-cors.html
.. _CORS: https://developer.mozilla.org/en-US/docs/HTTP/Access_control_CORS
.. _preflight request: https://developer.mozilla.org/en-US/docs/HTTP/Access_control_CORS#Preflighted_requests
diff --git a/doc/source/development_auth.rst b/doc/source/development_auth.rst
index 59be1adfe..53c50b674 100644
--- a/doc/source/development_auth.rst
+++ b/doc/source/development_auth.rst
@@ -37,7 +37,7 @@ will be omitted.
It is highly recommended that authentication server implementers prefix their
tokens and Swift storage accounts they create with a configurable reseller
-prefix (`AUTH_` by default with the included TempAuth). This prefix will avoid
+prefix (``AUTH_`` by default with the included TempAuth). This prefix will avoid
conflicts with other authentication servers that might be using the same
Swift cluster. Otherwise, the Swift cluster will have to try all the resellers
until one validates a token or all fail.
@@ -48,18 +48,18 @@ designations as you'll see later).
Example Authentication with TempAuth:
- * Token AUTH_tkabcd is given to the TempAuth middleware in a request's
- X-Auth-Token header.
- * The TempAuth middleware validates the token AUTH_tkabcd and discovers
- it matches the "tester" user within the "test" account for the storage
- account "AUTH_storage_xyz".
- * The TempAuth middleware sets the REMOTE_USER to
- "test:tester,test,AUTH_storage_xyz"
- * Now this user will have full access (via authorization procedures later)
- to the AUTH_storage_xyz Swift storage account and access to containers in
- other storage accounts, provided the storage account begins with the same
- `AUTH_` reseller prefix and the container has an ACL specifying at least
- one of those three groups.
+* Token AUTH_tkabcd is given to the TempAuth middleware in a request's
+ X-Auth-Token header.
+* The TempAuth middleware validates the token AUTH_tkabcd and discovers
+ it matches the "tester" user within the "test" account for the storage
+ account "AUTH_storage_xyz".
+* The TempAuth middleware sets the REMOTE_USER to
+ "test:tester,test,AUTH_storage_xyz"
+* Now this user will have full access (via authorization procedures later)
+ to the AUTH_storage_xyz Swift storage account and access to containers in
+ other storage accounts, provided the storage account begins with the same
+ ``AUTH_`` reseller prefix and the container has an ACL specifying at least
+ one of those three groups.
Authorization is performed through callbacks by the Swift Proxy server to the
WSGI environment's swift.authorize value, if one is set. The swift.authorize
diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst
index d6cc90b55..979a728e0 100644
--- a/doc/source/development_guidelines.rst
+++ b/doc/source/development_guidelines.rst
@@ -184,6 +184,26 @@ using config files found in ``$HOME/my_tests`` and policy 'silver'::
SWIFT_TEST_POLICY=silver tox -e func
+S3 API cross-compatibility tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The cross-compatibility tests in directory `test/s3api` are intended to verify
+that the Swift S3 API behaves in the same way as the AWS S3 API. They should
+pass when run against either a Swift endpoint (with S3 API enabled) or an AWS
+S3 endpoint.
+
+To run against an AWS S3 endpoint, the `/etc/swift/test.conf` file must be
+edited to provide AWS key IDs and secrets. Alternatively, an AWS CLI style
+credentials file can be loaded by setting the ``SWIFT_TEST_AWS_CONFIG_FILE``
+environment variable, e.g.::
+
+ SWIFT_TEST_AWS_CONFIG_FILE=~/.aws/credentials nosetests ./test/s3api
+
+.. note::
+ When using ``SWIFT_TEST_AWS_CONFIG_FILE``, the region defaults to
+ ``us-east-1`` and only the default credentials are loaded.
+
+
------------
Coding Style
------------
diff --git a/doc/source/development_ondisk_backends.rst b/doc/source/development_ondisk_backends.rst
index 338073378..14934d7b6 100644
--- a/doc/source/development_ondisk_backends.rst
+++ b/doc/source/development_ondisk_backends.rst
@@ -12,7 +12,7 @@ from the details of how data is laid out and stored on-disk.
The APIs are documented in the reference implementations for all three
servers. For historical reasons, the object server backend reference
-implementation module is named `diskfile`, while the account and container
+implementation module is named ``diskfile``, while the account and container
server backend reference implementation modules are named appropriately.
This API is still under development and not yet finalized.
diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst
index f6f044481..81d564e8f 100644
--- a/doc/source/getting_started.rst
+++ b/doc/source/getting_started.rst
@@ -11,11 +11,12 @@ most Linux platforms.
Swift is written in Python and has these dependencies:
-* Python (2.7, 3.6, or 3.7)
-* rsync 3.0
-* The Python packages listed in `the requirements file <https://github.com/openstack/swift/blob/master/requirements.txt>`_
-* Testing additionally requires `the test dependencies <https://github.com/openstack/swift/blob/master/test-requirements.txt>`_
-* Testing requires `these distribution packages <https://github.com/openstack/swift/blob/master/bindep.txt>`_
+* Python (2.7 or 3.6-3.9)
+* rsync 3.x
+* `liberasurecode <https://opendev.org/openstack/liberasurecode/>`__
+* The Python packages listed in `the requirements file <https://github.com/openstack/swift/blob/master/requirements.txt>`__
+* Testing additionally requires `the test dependencies <https://github.com/openstack/swift/blob/master/test-requirements.txt>`__
+* Testing requires `these distribution packages <https://github.com/openstack/swift/blob/master/bindep.txt>`__
-----------
Development
@@ -36,8 +37,8 @@ CLI client and SDK library
There are many clients in the :ref:`ecosystem <application-bindings>`. The official CLI
and SDK is python-swiftclient.
-* `Source code <https://github.com/openstack/python-swiftclient>`_
-* `Python Package Index <https://pypi.org/project/python-swiftclient>`_
+* `Source code <https://github.com/openstack/python-swiftclient>`__
+* `Python Package Index <https://pypi.org/project/python-swiftclient>`__
----------
Production
diff --git a/doc/source/logs.rst b/doc/source/logs.rst
index e70de1f8e..d7a64a802 100644
--- a/doc/source/logs.rst
+++ b/doc/source/logs.rst
@@ -59,6 +59,7 @@ remote_addr The IP address of the other end of the TCP connection.
(anonymizable)
end_time Timestamp of the request. (timestamp)
method The HTTP verb in the request.
+domain The domain in the request. (anonymizable)
path The path portion of the request. (anonymizable)
protocol The transport protocol used (currently one of http or
https).
diff --git a/doc/source/misc.rst b/doc/source/misc.rst
index c418dd5ed..4d4b3dcda 100644
--- a/doc/source/misc.rst
+++ b/doc/source/misc.rst
@@ -39,6 +39,17 @@ Container Sync Realms
:members:
:show-inheritance:
+
+.. _digest:
+
+Digest
+======
+
+.. automodule:: swift.common.digest
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
.. _direct_client:
Direct Client
diff --git a/doc/source/ops_runbook/diagnose.rst b/doc/source/ops_runbook/diagnose.rst
index 2de368128..976cdb70d 100644
--- a/doc/source/ops_runbook/diagnose.rst
+++ b/doc/source/ops_runbook/diagnose.rst
@@ -36,11 +36,11 @@ External monitoring
We use pingdom.com to monitor the external Swift API. We suggest the
following:
- - Do a GET on ``/healthcheck``
+- Do a GET on ``/healthcheck``
- - Create a container, make it public (x-container-read:
- .r*,.rlistings), create a small file in the container; do a GET
- on the object
+- Create a container, make it public (``x-container-read:
+ .r*,.rlistings``), create a small file in the container; do a GET
+ on the object
Diagnose: General approach
--------------------------
@@ -82,11 +82,11 @@ if any servers are down. We suggest you run it regularly
to the last report without having to wait for a long-running command
to complete.
-Diagnose: Is system responding to /healthcheck?
------------------------------------------------
+Diagnose: Is system responding to ``/healthcheck``?
+---------------------------------------------------
When you want to establish if a swift endpoint is running, run ``curl -k``
-against https://*[ENDPOINT]*/healthcheck.
+against ``https://$ENDPOINT/healthcheck``.
.. _swift_logs:
@@ -209,11 +209,11 @@ Diagnose: Parted reports the backup GPT table is corrupt
- If a GPT table is broken, a message like the following should be
observed when the following command is run:
- .. code::
+ .. code:: console
$ sudo parted -l
- .. code::
+ .. code:: console
Error: The backup GPT table is corrupt, but the primary appears OK,
so that will be used.
@@ -232,40 +232,40 @@ invalid filesystem label. In such cases proceed as follows:
#. Verify that the disk labels are correct:
- .. code::
+ .. code:: console
- FS=/dev/sd#1
+ $ FS=/dev/sd#1
- sudo parted -l | grep object
+ $ sudo parted -l | grep object
#. If partition labels are inconsistent then, resolve the disk label issues
before proceeding:
- .. code::
+ .. code:: console
- sudo parted -s ${FS} name ${PART_NO} ${PART_NAME} #Partition Label
- #PART_NO is 1 for object disks and 3 for OS disks
- #PART_NAME follows the convention seen in "sudo parted -l | grep object"
+ $ sudo parted -s ${FS} name ${PART_NO} ${PART_NAME} #Partition Label
+ $ # PART_NO is 1 for object disks and 3 for OS disks
+ $ # PART_NAME follows the convention seen in "sudo parted -l | grep object"
#. If the Filesystem label is missing then create it with care:
- .. code::
+ .. code:: console
- sudo xfs_admin -l ${FS} #Filesystem label (12 Char limit)
+ $ sudo xfs_admin -l ${FS} #Filesystem label (12 Char limit)
- #Check for the existence of a FS label
+ $ # Check for the existence of a FS label
- OBJNO=<3 Length Object No.>
+ $ OBJNO=<3 Length Object No.>
- #I.E OBJNO for sw-stbaz3-object0007 would be 007
+ $ # I.E OBJNO for sw-stbaz3-object0007 would be 007
- DISKNO=<3 Length Disk No.>
+ $ DISKNO=<3 Length Disk No.>
- #I.E DISKNO for /dev/sdb would be 001, /dev/sdc would be 002 etc.
+ $ # I.E DISKNO for /dev/sdb would be 001, /dev/sdc would be 002 etc.
- sudo xfs_admin -L "obj${OBJNO}dsk${DISKNO}" ${FS}
+ $ sudo xfs_admin -L "obj${OBJNO}dsk${DISKNO}" ${FS}
- #Create a FS Label
+ $ # Create a FS Label
Diagnose: Failed LUNs
---------------------
@@ -293,9 +293,9 @@ Otherwise the lun can be re-enabled as follows:
LUN. You will come back later and grep this file for more details, but
just generate it for now.
- .. code::
+ .. code:: console
- sudo hpssacli controller all diag file=/tmp/hpacu.diag ris=on xml=off zip=off
+ $ sudo hpssacli controller all diag file=/tmp/hpacu.diag ris=on xml=off zip=off
Export the following variables using the below instructions before
proceeding further.
@@ -304,16 +304,16 @@ proceeding further.
failed drive's number and array value (example output: "array A
logicaldrive 1..." would be exported as LDRIVE=1):
- .. code::
+ .. code:: console
- sudo hpssacli controller slot=1 ld all show
+ $ sudo hpssacli controller slot=1 ld all show
#. Export the number of the logical drive that was retrieved from the
previous command into the LDRIVE variable:
- .. code::
+ .. code:: console
- export LDRIVE=<LogicalDriveNumber>
+ $ export LDRIVE=<LogicalDriveNumber>
#. Print the array value and Port:Box:Bay for all drives and take note of
the Port:Box:Bay for the failed drive (example output: " array A
@@ -324,9 +324,9 @@ proceeding further.
in the case of "array c"), but we will run a different command to be sure
we are operating on the correct device.
- .. code::
+ .. code:: console
- sudo hpssacli controller slot=1 pd all show
+ $ sudo hpssacli controller slot=1 pd all show
.. note::
@@ -339,24 +339,24 @@ proceeding further.
#. Export the Port:Box:Bay for the failed drive into the PBOX variable:
- .. code::
+ .. code:: console
- export PBOX=<Port:Box:Bay>
+ $ export PBOX=<Port:Box:Bay>
#. Print the physical device information and take note of the Disk Name
(example output: "Disk Name: /dev/sdk" would be exported as
DEV=/dev/sdk):
- .. code::
+ .. code:: console
- sudo hpssacli controller slot=1 ld ${LDRIVE} show detail | grep -i "Disk Name"
+ $ sudo hpssacli controller slot=1 ld ${LDRIVE} show detail | grep -i "Disk Name"
#. Export the device name variable from the preceding command (example:
/dev/sdk):
- .. code::
+ .. code:: console
- export DEV=<Device>
+ $ export DEV=<Device>
#. Export the filesystem variable. Disks that are split between the
operating system and data storage, typically sda and sdb, should only
@@ -367,39 +367,39 @@ proceeding further.
data filesystem for the device in question as the export. For example:
/dev/sdk1.
- .. code::
+ .. code:: console
- export FS=<Filesystem>
+ $ export FS=<Filesystem>
#. Verify the LUN is failed, and the device is not:
- .. code::
+ .. code:: console
- sudo hpssacli controller slot=1 ld all show
- sudo hpssacli controller slot=1 pd all show
- sudo hpssacli controller slot=1 ld ${LDRIVE} show detail
- sudo hpssacli controller slot=1 pd ${PBOX} show detail
+ $ sudo hpssacli controller slot=1 ld all show
+ $ sudo hpssacli controller slot=1 pd all show
+ $ sudo hpssacli controller slot=1 ld ${LDRIVE} show detail
+ $ sudo hpssacli controller slot=1 pd ${PBOX} show detail
#. Stop the swift and rsync service:
- .. code::
+ .. code:: console
- sudo service rsync stop
- sudo swift-init shutdown all
+ $ sudo service rsync stop
+ $ sudo swift-init shutdown all
#. Unmount the problem drive, fix the LUN and the filesystem:
- .. code::
+ .. code:: console
- sudo umount ${FS}
+ $ sudo umount ${FS}
#. If umount fails, you should run lsof search for the mountpoint and
kill any lingering processes before repeating the unpount:
- .. code::
+ .. code:: console
- sudo hpacucli controller slot=1 ld ${LDRIVE} modify reenable
- sudo xfs_repair ${FS}
+ $ sudo hpacucli controller slot=1 ld ${LDRIVE} modify reenable
+ $ sudo xfs_repair ${FS}
#. If the ``xfs_repair`` complains about possible journal data, use the
``xfs_repair -L`` option to zeroise the journal log.
@@ -407,21 +407,21 @@ proceeding further.
#. Once complete test-mount the filesystem, and tidy up its lost and
found area.
- .. code::
+ .. code:: console
- sudo mount ${FS} /mnt
- sudo rm -rf /mnt/lost+found/
- sudo umount /mnt
+ $ sudo mount ${FS} /mnt
+ $ sudo rm -rf /mnt/lost+found/
+ $ sudo umount /mnt
#. Mount the filesystem and restart swift and rsync.
#. Run the following to determine if a DC ticket is needed to check the
cables on the node:
- .. code::
+ .. code:: console
- grep -y media.exchanged /tmp/hpacu.diag
- grep -y hot.plug.count /tmp/hpacu.diag
+ $ grep -y media.exchanged /tmp/hpacu.diag
+ $ grep -y hot.plug.count /tmp/hpacu.diag
#. If the output reports any non 0x00 values, it suggests that the cables
should be checked. For example, log a DC ticket to check the sas cables
@@ -440,7 +440,7 @@ If the diagnostics report a message such as ``sda: drive is slow``, you
should log onto the node and run the following command (remove ``-c 1`` option to continuously monitor
the data):
-.. code::
+.. code:: console
$ /usr/bin/collectl -s D -c 1
waiting for 1 second sample...
@@ -475,7 +475,7 @@ otherwise hardware replacement is needed.
Another way to look at the data is as follows:
-.. code::
+.. code:: console
$ /opt/hp/syseng/disk-anal.pl -d
Disk: sda Wait: 54580 371 65 25 12 6 6 0 1 2 0 46
@@ -524,7 +524,7 @@ historical data. You can look at recent data as follows. It only looks
at data from 13:15 to 14:15. As you can see, this is a relatively clean
system (few if any long wait or service times):
-.. code::
+.. code:: console
$ /opt/hp/syseng/disk-anal.pl -d -t 13:15-14:15
Disk: sda Wait: 3600 0 0 0 0 0 0 0 0 0 0 0
@@ -582,21 +582,21 @@ Running tests
#. Prepare the ``target`` node as follows:
- .. code::
+ .. code:: console
- sudo iptables -I INPUT -p tcp -j ACCEPT
+ $ sudo iptables -I INPUT -p tcp -j ACCEPT
Or, do:
- .. code::
+ .. code:: console
- sudo ufw allow 12866/tcp
+ $ sudo ufw allow 12866/tcp
#. On the ``source`` node, run the following command to check
throughput. Note the double-dash before the -P option.
The command takes 10 seconds to complete. The ``target`` node is 192.168.245.5.
- .. code::
+ .. code:: console
$ netperf -H 192.168.245.5 -- -P 12866
MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 12866 AF_INET to
@@ -609,7 +609,7 @@ Running tests
#. On the ``source`` node, run the following command to check latency:
- .. code::
+ .. code:: console
$ netperf -H 192.168.245.5 -t TCP_RR -- -P 12866
MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 12866
@@ -644,21 +644,21 @@ Diagnose: Remapping sectors experiencing UREs
#. Set the environment variables SEC, DEV & FS, for example:
- .. code::
+ .. code:: console
- SEC=2930954256
- DEV=/dev/sdi
- FS=/dev/sdi1
+ $ SEC=2930954256
+ $ DEV=/dev/sdi
+ $ FS=/dev/sdi1
#. Verify that the sector is bad:
- .. code::
+ .. code:: console
- sudo dd if=${DEV} of=/dev/null bs=512 count=1 skip=${SEC}
+ $ sudo dd if=${DEV} of=/dev/null bs=512 count=1 skip=${SEC}
#. If the sector is bad this command will output an input/output error:
- .. code::
+ .. code:: console
dd: reading `/dev/sdi`: Input/output error
0+0 records in
@@ -667,28 +667,28 @@ Diagnose: Remapping sectors experiencing UREs
#. Prevent chef from attempting to re-mount the filesystem while the
repair is in progress:
- .. code::
+ .. code:: console
- sudo mv /etc/chef/client.pem /etc/chef/xx-client.xx-pem
+ $ sudo mv /etc/chef/client.pem /etc/chef/xx-client.xx-pem
#. Stop the swift and rsync service:
- .. code::
+ .. code:: console
- sudo service rsync stop
- sudo swift-init shutdown all
+ $ sudo service rsync stop
+ $ sudo swift-init shutdown all
#. Unmount the problem drive:
- .. code::
+ .. code:: console
- sudo umount ${FS}
+ $ sudo umount ${FS}
#. Overwrite/remap the bad sector:
- .. code::
+ .. code:: console
- sudo dd_rescue -d -A -m8b -s ${SEC}b ${DEV} ${DEV}
+ $ sudo dd_rescue -d -A -m8b -s ${SEC}b ${DEV} ${DEV}
#. This command should report an input/output error the first time
it is run. Run the command a second time, if it successfully remapped
@@ -696,9 +696,9 @@ Diagnose: Remapping sectors experiencing UREs
#. Verify the sector is now readable:
- .. code::
+ .. code:: console
- sudo dd if=${DEV} of=/dev/null bs=512 count=1 skip=${SEC}
+ $ sudo dd if=${DEV} of=/dev/null bs=512 count=1 skip=${SEC}
#. If the sector is now readable this command should not report an
input/output error.
@@ -706,24 +706,24 @@ Diagnose: Remapping sectors experiencing UREs
#. If more than one problem sector is listed, set the SEC environment
variable to the next sector in the list:
- .. code::
+ .. code:: console
- SEC=123456789
+ $ SEC=123456789
#. Repeat from step 8.
#. Repair the filesystem:
- .. code::
+ .. code:: console
- sudo xfs_repair ${FS}
+ $ sudo xfs_repair ${FS}
#. If ``xfs_repair`` reports that the filesystem has valuable filesystem
changes:
- .. code::
+ .. code:: console
- sudo xfs_repair ${FS}
+ $ sudo xfs_repair ${FS}
Phase 1 - find and verify superblock...
Phase 2 - using internal log
- zero log...
@@ -739,11 +739,11 @@ Diagnose: Remapping sectors experiencing UREs
#. You should attempt to mount the filesystem, and clear the lost+found
area:
- .. code::
+ .. code:: console
- sudo mount $FS /mnt
- sudo rm -rf /mnt/lost+found/*
- sudo umount /mnt
+ $ sudo mount $FS /mnt
+ $ sudo rm -rf /mnt/lost+found/*
+ $ sudo umount /mnt
#. If the filesystem fails to mount then you will need to use the
``xfs_repair -L`` option to force log zeroing.
@@ -752,16 +752,16 @@ Diagnose: Remapping sectors experiencing UREs
#. If ``xfs_repair`` reports that an additional input/output error has been
encountered, get the sector details as follows:
- .. code::
+ .. code:: console
- sudo grep "I/O error" /var/log/kern.log | grep sector | tail -1
+ $ sudo grep "I/O error" /var/log/kern.log | grep sector | tail -1
#. If new input/output error is reported then set the SEC environment
variable to the problem sector number:
- .. code::
+ .. code:: console
- SEC=234567890
+ $ SEC=234567890
#. Repeat from step 8
@@ -806,31 +806,31 @@ errors, it may well indicate a cable, switch, or network issue.
Get an overview of the interface with:
-.. code::
+.. code:: console
- sudo ifconfig eth{n}
- sudo ethtool eth{n}
+ $ sudo ifconfig eth{n}
+ $ sudo ethtool eth{n}
The ``Link Detected:`` indicator will read ``yes`` if the nic is
cabled.
Establish the adapter type with:
-.. code::
+.. code:: console
- sudo ethtool -i eth{n}
+ $ sudo ethtool -i eth{n}
Gather the interface statistics with:
-.. code::
+.. code:: console
- sudo ethtool -S eth{n}
+ $ sudo ethtool -S eth{n}
If the nick supports self test, this can be performed with:
-.. code::
+.. code:: console
- sudo ethtool -t eth{n}
+ $ sudo ethtool -t eth{n}
Self tests should read ``PASS`` if the nic is operating correctly.
@@ -853,9 +853,9 @@ A replicator reports in its log that remaining time exceeds
making progress. Another useful way to check this is with the
'swift-recon -r' command on a swift proxy server:
-.. code::
+.. code:: console
- sudo swift-recon -r
+ $ sudo swift-recon -r
===============================================================================
--> Starting reconnaissance on 384 hosts
@@ -877,9 +877,9 @@ You can further check if the object replicator is stuck by logging on
the object server and checking the object replicator progress with
the following command:
-.. code::
+.. code:: console
- # sudo grep object-rep /var/log/swift/background.log | grep -e "Starting object replication" -e "Object replication complete" -e "partitions rep"
+ $ sudo grep object-rep /var/log/swift/background.log | grep -e "Starting object replication" -e "Object replication complete" -e "partitions rep"
Jul 16 06:25:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69018.48s (0.22/sec, 22h remaining)
Jul 16 06:30:46 192.168.245.4object-replicator 15344/16450 (93.28%) partitions replicated in 69318.58s (0.22/sec, 22h remaining)
Jul 16 06:35:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69618.63s (0.22/sec, 23h remaining)
@@ -912,9 +912,9 @@ One of the reasons for the object replicator hanging like this is
filesystem corruption on the drive. The following is a typical log entry
of a corrupted filesystem detected by the object replicator:
-.. code::
+.. code:: console
- # sudo bzgrep "Remote I/O error" /var/log/swift/background.log* |grep srv | - tail -1
+ $ sudo bzgrep "Remote I/O error" /var/log/swift/background.log* |grep srv | - tail -1
Jul 12 03:33:30 192.168.245.4 object-replicator STDOUT: ERROR:root:Error hashing suffix#012Traceback (most recent call last):#012 File
"/usr/lib/python2.7/dist-packages/swift/obj/replicator.py", line 199, in get_hashes#012 hashes[suffix] = hash_suffix(suffix_dir,
reclaim_age)#012 File "/usr/lib/python2.7/dist-packages/swift/obj/replicator.py", line 84, in hash_suffix#012 path_contents =
@@ -922,9 +922,9 @@ of a corrupted filesystem detected by the object replicator:
An ``ls`` of the problem file or directory usually shows something like the following:
-.. code::
+.. code:: console
- # ls -l /srv/node/disk4/objects/1643763/b51
+ $ ls -l /srv/node/disk4/objects/1643763/b51
ls: cannot access /srv/node/disk4/objects/1643763/b51: Remote I/O error
If no entry with ``Remote I/O error`` occurs in the ``background.log`` it is
@@ -935,27 +935,27 @@ restart the object-replicator.
#. Stop the object-replicator:
- .. code::
+ .. code:: console
# sudo swift-init object-replicator stop
#. Make sure the object replicator has stopped, if it has hung, the stop
command will not stop the hung process:
- .. code::
+ .. code:: console
# ps auxww | - grep swift-object-replicator
#. If the previous ps shows the object-replicator is still running, kill
the process:
- .. code::
+ .. code:: console
# kill -9 <pid-of-swift-object-replicator>
#. Start the object-replicator:
- .. code::
+ .. code:: console
# sudo swift-init object-replicator start
@@ -964,14 +964,14 @@ to repair the problem filesystem.
#. Stop swift and rsync:
- .. code::
+ .. code:: console
# sudo swift-init all shutdown
# sudo service rsync stop
#. Make sure all swift process have stopped:
- .. code::
+ .. code:: console
# ps auxww | grep swift | grep python
@@ -979,13 +979,13 @@ to repair the problem filesystem.
#. Unmount the problem filesystem:
- .. code::
+ .. code:: console
# sudo umount /srv/node/disk4
#. Repair the filesystem:
- .. code::
+ .. code:: console
# sudo xfs_repair -P /dev/sde1
@@ -1002,7 +1002,7 @@ The CPU load average on an object server, as shown with the
'uptime' command, is typically under 10 when the server is
lightly-moderately loaded:
-.. code::
+.. code:: console
$ uptime
07:59:26 up 99 days, 5:57, 1 user, load average: 8.59, 8.39, 8.32
@@ -1014,7 +1014,7 @@ However, sometimes the CPU load average can increase significantly. The
following is an example of an object server that has extremely high CPU
load:
-.. code::
+.. code:: console
$ uptime
07:44:02 up 18:22, 1 user, load average: 407.12, 406.36, 404.59
@@ -1050,9 +1050,9 @@ Further issues and resolutions
given server.
- Run this command:
- .. code::
+ .. code:: console
- sudo swift-init all start
+ $ sudo swift-init all start
Examine messages in the swift log files to see if there are any
error messages related to any of the swift processes since the time you
@@ -1080,9 +1080,9 @@ Further issues and resolutions
- Restart the swift processes on the affected node:
- .. code::
+ .. code:: console
- % sudo swift-init all reload
+ $ sudo swift-init all reload
Urgency:
If known performance problem: Immediate
@@ -1135,18 +1135,18 @@ Further issues and resolutions
For example, it is running at 100 Mb/s and the NIC is a 1Ge NIC.
- 1. Try resetting the interface with:
- .. code::
+ .. code:: console
- sudo ethtool -s eth0 speed 1000
+ $ sudo ethtool -s eth0 speed 1000
- ... and then run:
+ ... and then run:
- .. code::
+ .. code:: console
- sudo lshw -class
+ $ sudo lshw -class
- See if size goes to the expected speed. Failing
- that, check hardware (NIC cable/switch port).
+ See if size goes to the expected speed. Failing
+ that, check hardware (NIC cable/switch port).
2. If persistent, consider shutting down the server (especially if a proxy)
until the problem is identified and resolved. If you leave this server
@@ -1183,9 +1183,11 @@ Further issues and resolutions
- Urgency: Medium
This may have been triggered by a recent restart of the rsyslog daemon.
Restart the service with:
- .. code::
- sudo swift-init <service> reload
+ .. code:: console
+
+ $ sudo swift-init <service> reload
+
* - Object replicator: Reports the remaining time and that time is more than 100 hours.
- Each replication cycle the object replicator writes a log message to its log
reporting statistics about the current cycle. This includes an estimate for the
@@ -1193,9 +1195,10 @@ Further issues and resolutions
100 hours, there is a problem with the replication process.
- Urgency: Medium
Restart the service with:
- .. code::
- sudo swift-init object-replicator reload
+ .. code:: console
+
+ $ sudo swift-init object-replicator reload
Check that the remaining replication time is going down.
diff --git a/doc/source/ops_runbook/maintenance.rst b/doc/source/ops_runbook/maintenance.rst
index a2a9cbb10..c63feb7bd 100644
--- a/doc/source/ops_runbook/maintenance.rst
+++ b/doc/source/ops_runbook/maintenance.rst
@@ -27,9 +27,9 @@ if you wait a while things get better.
For example:
-.. code::
+.. code:: console
- sudo swift-recon -rla
+ $ sudo swift-recon -rla
===============================================================================
[2012-03-10 12:57:21] Checking async pendings on 384 hosts...
Async stats: low: 0, high: 1, avg: 0, total: 1
@@ -52,7 +52,7 @@ system. Rules-of-thumb for 'good' recon output are:
- Nodes that respond are up and running Swift. If all nodes respond,
that is a good sign. But some nodes may time out. For example:
- .. code::
+ .. code:: console
-> [http://<redacted>.29:6200/recon/load:] <urlopen error [Errno 111] ECONNREFUSED>
-> [http://<redacted>.31:6200/recon/load:] <urlopen error timed out>
@@ -83,7 +83,7 @@ system. Rules-of-thumb for 'good' recon output are:
For comparison here is the recon output for the same system above when
two entire racks of Swift are down:
-.. code::
+.. code:: console
[2012-03-10 16:56:33] Checking async pendings on 384 hosts...
-> http://<redacted>.22:6200/recon/async: <urlopen error timed out>
@@ -152,9 +152,9 @@ Here is an example of noting and tracking down a problem with recon.
Running reccon shows some async pendings:
-.. code::
+.. code:: console
- bob@notso:~/swift-1.4.4/swift$ ssh -q <redacted>.132.7 sudo swift-recon -alr
+ $ ssh -q <redacted>.132.7 sudo swift-recon -alr
===============================================================================
[2012-03-14 17:25:55] Checking async pendings on 384 hosts...
Async stats: low: 0, high: 23, avg: 8, total: 3356
@@ -172,9 +172,9 @@ Why? Running recon again with -av swift (not shown here) tells us that
the node with the highest (23) is <redacted>.72.61. Looking at the log
files on <redacted>.72.61 we see:
-.. code::
+.. code:: console
- souzab@<redacted>:~$ sudo tail -f /var/log/swift/background.log | - grep -i ERROR
+ $ sudo tail -f /var/log/swift/background.log | - grep -i ERROR
Mar 14 17:28:06 <redacted> container-replicator ERROR Remote drive not mounted
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201}
Mar 14 17:28:06 <redacted> container-replicator ERROR Remote drive not mounted
@@ -235,7 +235,7 @@ Procedure
running the ring builder on a proxy node to determine which zones
the storage nodes are in. For example:
- .. code::
+ .. code:: console
% sudo swift-ring-builder /etc/swift/object.builder
/etc/swift/object.builder, build version 1467
@@ -258,7 +258,7 @@ Procedure
builder again, this time with the ``list_parts`` option and specify
the nodes under consideration. For example:
- .. code::
+ .. code:: console
% sudo swift-ring-builder /etc/swift/object.builder list_parts <redacted>.8 <redacted>.15 <redacted>.72.2
Partition Matches
@@ -283,7 +283,7 @@ Procedure
small, and is proportional to the number of entries that have a 3 in
the Matches column. For example:
- .. code::
+ .. code:: console
Partition Matches
26865 3
@@ -300,7 +300,7 @@ Procedure
#. A quick way to count the number of rows with 3 matches is:
- .. code::
+ .. code:: console
% sudo swift-ring-builder /etc/swift/object.builder list_parts <redacted>.8 <redacted>.15 <redacted>.72.2 | grep "3$" | wc -l
diff --git a/doc/source/ops_runbook/procedures.rst b/doc/source/ops_runbook/procedures.rst
index af28e020c..1d84d5969 100644
--- a/doc/source/ops_runbook/procedures.rst
+++ b/doc/source/ops_runbook/procedures.rst
@@ -10,13 +10,13 @@ Fix broken GPT table (broken disk partition)
- If a GPT table is broken, a message like the following should be
observed when the command...
- .. code::
+ .. code:: console
$ sudo parted -l
- ... is run.
- .. code::
+ .. code:: console
...
Error: The backup GPT table is corrupt, but the primary appears OK, so that will
@@ -25,13 +25,13 @@ Fix broken GPT table (broken disk partition)
#. To fix this, firstly install the ``gdisk`` program to fix this:
- .. code::
+ .. code:: console
$ sudo aptitude install gdisk
#. Run ``gdisk`` for the particular drive with the damaged partition:
- .. code:
+ .. code: console
$ sudo gdisk /dev/sd*a-l*
GPT fdisk (gdisk) version 0.6.14
@@ -57,7 +57,7 @@ Fix broken GPT table (broken disk partition)
and finally ``w`` (write table to disk and exit). Will also need to
enter ``Y`` when prompted in order to confirm actions.
- .. code::
+ .. code:: console
Command (? for help): r
@@ -92,7 +92,7 @@ Fix broken GPT table (broken disk partition)
#. Running the command:
- .. code::
+ .. code:: console
$ sudo parted /dev/sd#
@@ -100,7 +100,7 @@ Fix broken GPT table (broken disk partition)
#. Finally, uninstall ``gdisk`` from the node:
- .. code::
+ .. code:: console
$ sudo aptitude remove gdisk
@@ -112,20 +112,20 @@ Procedure: Fix broken XFS filesystem
#. A filesystem may be corrupt or broken if the following output is
observed when checking its label:
- .. code::
+ .. code:: console
$ sudo xfs_admin -l /dev/sd#
- cache_node_purge: refcount was 1, not zero (node=0x25d5ee0)
- xfs_admin: cannot read root inode (117)
- cache_node_purge: refcount was 1, not zero (node=0x25d92b0)
- xfs_admin: cannot read realtime bitmap inode (117)
- bad sb magic # 0 in AG 1
- failed to read label in AG 1
+ cache_node_purge: refcount was 1, not zero (node=0x25d5ee0)
+ xfs_admin: cannot read root inode (117)
+ cache_node_purge: refcount was 1, not zero (node=0x25d92b0)
+ xfs_admin: cannot read realtime bitmap inode (117)
+ bad sb magic # 0 in AG 1
+ failed to read label in AG 1
#. Run the following commands to remove the broken/corrupt filesystem and replace.
(This example uses the filesystem ``/dev/sdb2``) Firstly need to replace the partition:
- .. code::
+ .. code:: console
$ sudo parted
GNU Parted 2.3
@@ -167,7 +167,7 @@ Procedure: Fix broken XFS filesystem
#. Next step is to scrub the filesystem and format:
- .. code::
+ .. code:: console
$ sudo dd if=/dev/zero of=/dev/sdb2 bs=$((1024*1024)) count=1
1+0 records in
@@ -175,19 +175,19 @@ Procedure: Fix broken XFS filesystem
1048576 bytes (1.0 MB) copied, 0.00480617 s, 218 MB/s
$ sudo /sbin/mkfs.xfs -f -i size=1024 /dev/sdb2
meta-data=/dev/sdb2 isize=1024 agcount=4, agsize=106811524 blks
- = sectsz=512 attr=2, projid32bit=0
- data = bsize=4096 blocks=427246093, imaxpct=5
- = sunit=0 swidth=0 blks
- naming =version 2 bsize=4096 ascii-ci=0
- log =internal log bsize=4096 blocks=208616, version=2
- = sectsz=512 sunit=0 blks, lazy-count=1
- realtime =none extsz=4096 blocks=0, rtextents=0
+ = sectsz=512 attr=2, projid32bit=0
+ data = bsize=4096 blocks=427246093, imaxpct=5
+ = sunit=0 swidth=0 blks
+ naming =version 2 bsize=4096 ascii-ci=0
+ log =internal log bsize=4096 blocks=208616, version=2
+ = sectsz=512 sunit=0 blks, lazy-count=1
+ realtime =none extsz=4096 blocks=0, rtextents=0
#. You should now label and mount your filesystem.
#. Can now check to see if the filesystem is mounted using the command:
- .. code::
+ .. code:: console
$ mount
@@ -204,7 +204,7 @@ Procedure: Checking if an account is okay
You must know the tenant/project ID. You can check if the account is okay as follows from a proxy.
-.. code::
+.. code:: console
$ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_<project-id>
@@ -214,7 +214,7 @@ containers, or an error indicating that the resource could not be found.
Alternatively, you can use ``swift-get-nodes`` to find the account database
files. Run the following on a proxy:
-.. code::
+.. code:: console
$ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_<project-id>
@@ -239,7 +239,7 @@ Log onto one of the swift proxy servers.
Use swift-direct to show this accounts usage:
-.. code::
+.. code:: console
$ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_<project-id>
Status: 200
@@ -288,7 +288,7 @@ re-create the account as follows:
servers). The output has been truncated so we can focus on the import pieces
of data:
- .. code::
+ .. code:: console
$ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_4ebe3039674d4864a11fe0864ae4d905
...
@@ -308,7 +308,7 @@ re-create the account as follows:
#. Before proceeding check that the account is really deleted by using curl. Execute the
commands printed by ``swift-get-nodes``. For example:
- .. code::
+ .. code:: console
$ curl -I -XHEAD "http://192.168.245.5:6202/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905"
HTTP/1.1 404 Not Found
@@ -323,7 +323,7 @@ re-create the account as follows:
#. Use the ssh commands printed by ``swift-get-nodes`` to check if database
files exist. For example:
- .. code::
+ .. code:: console
$ ssh 192.168.245.5 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052"
total 20K
@@ -344,7 +344,7 @@ re-create the account as follows:
#. Delete the database files. For example:
- .. code::
+ .. code:: console
$ ssh 192.168.245.5
$ cd /srv/node/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052
@@ -374,9 +374,9 @@ balancers, customer's are not impacted by the misbehaving proxy.
#. Shut down Swift as follows:
- .. code::
+ .. code:: console
- sudo swift-init proxy shutdown
+ $ sudo swift-init proxy shutdown
.. note::
@@ -384,15 +384,15 @@ balancers, customer's are not impacted by the misbehaving proxy.
#. Create the ``/etc/swift/disabled-by-file`` file. For example:
- .. code::
+ .. code:: console
- sudo touch /etc/swift/disabled-by-file
+ $ sudo touch /etc/swift/disabled-by-file
#. Optional, restart Swift:
- .. code::
+ .. code:: console
- sudo swift-init proxy start
+ $ sudo swift-init proxy start
It works because the healthcheck middleware looks for /etc/swift/disabled-by-file.
If it exists, the middleware will return 503/error instead of 200/OK. This means the load balancer
@@ -403,9 +403,9 @@ Procedure: Ad-Hoc disk performance test
You can get an idea whether a disk drive is performing as follows:
-.. code::
+.. code:: console
- sudo dd bs=1M count=256 if=/dev/zero conv=fdatasync of=/srv/node/disk11/remember-to-delete-this-later
+ $ sudo dd bs=1M count=256 if=/dev/zero conv=fdatasync of=/srv/node/disk11/remember-to-delete-this-later
You can expect ~600MB/sec. If you get a low number, repeat many times as
Swift itself may also read or write to the disk, hence giving a lower
diff --git a/doc/source/ops_runbook/troubleshooting.rst b/doc/source/ops_runbook/troubleshooting.rst
index cb7553fc6..75511010c 100644
--- a/doc/source/ops_runbook/troubleshooting.rst
+++ b/doc/source/ops_runbook/troubleshooting.rst
@@ -16,20 +16,20 @@ transactions from this user. The linux ``bzgrep`` command can be used to
search all the proxy log files on a node including the ``.bz2`` compressed
files. For example:
-.. code::
+.. code:: console
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh \
-w <redacted>.68.[4-11,132-139 4-11,132-139],<redacted>.132.[4-11,132-139] \
'sudo bzgrep -w AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log*' | dshbak -c
- .
- .
- ----------------
- <redacted>.132.6
- ----------------
- Feb 29 08:51:57 sw-aw2az2-proxy011 proxy-server <redacted>.16.132
- <redacted>.66.8 29/Feb/2012/08/51/57 GET /v1.0/AUTH_redacted-4962-4692-98fb-52ddda82a5af
- /%3Fformat%3Djson HTTP/1.0 404 - - <REDACTED>_4f4d50c5e4b064d88bd7ab82 - - -
- tx429fc3be354f434ab7f9c6c4206c1dc3 - 0.0130
+ .
+ .
+ ----------------
+ <redacted>.132.6
+ ----------------
+ Feb 29 08:51:57 sw-aw2az2-proxy011 proxy-server <redacted>.16.132
+ <redacted>.66.8 29/Feb/2012/08/51/57 GET /v1.0/AUTH_redacted-4962-4692-98fb-52ddda82a5af
+ /%3Fformat%3Djson HTTP/1.0 404 - - <REDACTED>_4f4d50c5e4b064d88bd7ab82 - - -
+ tx429fc3be354f434ab7f9c6c4206c1dc3 - 0.0130
This shows a ``GET`` operation on the users account.
@@ -40,7 +40,7 @@ This shows a ``GET`` operation on the users account.
Using the transaction ID, ``tx429fc3be354f434ab7f9c6c4206c1dc3`` you can
search the swift object servers log files for this transaction ID:
-.. code::
+.. code:: console
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh \
-w <redacted>.72.[4-67|4-67],<redacted>.[4-67|4-67],<redacted>.[4-67|4-67],<redacted>.204.[4-131] \
@@ -79,7 +79,7 @@ search the swift object servers log files for this transaction ID:
Next, use the ``swift-get-nodes`` command to determine exactly where the
user's account data is stored:
-.. code::
+.. code:: console
$ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_redacted-4962-4692-98fb-52ddda82a5af
Account AUTH_redacted-4962-4692-98fb-52ddda82a5af
@@ -119,7 +119,7 @@ user's account data is stored:
Check each of the primary servers, <redacted>.31, <redacted>.204.70 and <redacted>.72.16, for
this users account. For example on <redacted>.72.16:
-.. code::
+.. code:: console
$ ls -lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/
total 1.0M
@@ -131,7 +131,7 @@ this users account. For example on <redacted>.72.16:
So this users account db, an sqlite db is present. Use sqlite to
checkout the account:
-.. code::
+.. code:: console
$ sudo cp /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/1846d99185f8a0edaf65cfbf37439696.db /tmp
$ sudo sqlite3 /tmp/1846d99185f8a0edaf65cfbf37439696.db
@@ -156,7 +156,7 @@ checkout the account:
why the GET operations are returning 404, not found. Check the account
delete date/time:
- .. code::
+ .. code:: console
$ python
@@ -167,7 +167,7 @@ checkout the account:
Next try and find the ``DELETE`` operation for this account in the proxy
server logs:
-.. code::
+.. code:: console
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh \
-w <redacted>.68.[4-11,132-139 4-11,132-139],<redacted>.132.[4-11,132-139|4-11,132-139] \
@@ -206,7 +206,7 @@ as follows:
Examine the object in question:
-.. code::
+.. code:: console
$ sudo -u swift /opt/hp/swift/bin/swift-direct head 132345678912345 container_name obj_name
@@ -219,14 +219,14 @@ name of the objects this means it is a DLO. For example,
if ``X-Object-Manifest`` is ``container2/seg-blah``, list the contents
of the container container2 as follows:
-.. code::
+.. code:: console
$ sudo -u swift /opt/hp/swift/bin/swift-direct show 132345678912345 container2
Pick out the objects whose names start with ``seg-blah``.
Delete the segment objects as follows:
-.. code::
+.. code:: console
$ sudo -u swift /opt/hp/swift/bin/swift-direct delete 132345678912345 container2 seg-blah01
$ sudo -u swift /opt/hp/swift/bin/swift-direct delete 132345678912345 container2 seg-blah02
diff --git a/doc/source/policies_saio.rst b/doc/source/policies_saio.rst
index 5efc4bf6d..9a8f6ce2e 100644
--- a/doc/source/policies_saio.rst
+++ b/doc/source/policies_saio.rst
@@ -22,39 +22,45 @@ to understand and adding a bunch of new devices isn't really required
to implement a usable set of policies.
1. To define your policies, add the following to your ``/etc/swift/swift.conf``
- file::
+ file:
- [storage-policy:0]
- name = gold
- aliases = yellow, orange
- default = yes
+ .. code:: ini
- [storage-policy:1]
- name = silver
+ [storage-policy:0]
+ name = gold
+ aliases = yellow, orange
+ default = yes
- See :doc:`overview_policies` for detailed information on ``swift.conf`` policy
- options.
+ [storage-policy:1]
+ name = silver
+
+ See :doc:`overview_policies` for detailed information on ``swift.conf`` policy
+ options.
2. To create the object ring for the silver policy (index 1), add the following
to your ``bin/remakerings`` script and re-run it (your script may already have
- these changes)::
+ these changes):
+
+ .. code:: shell
- swift-ring-builder object-1.builder create 10 2 1
- swift-ring-builder object-1.builder add r1z1-127.0.0.1:6210/sdb1 1
- swift-ring-builder object-1.builder add r1z2-127.0.0.1:6220/sdb2 1
- swift-ring-builder object-1.builder add r1z3-127.0.0.1:6230/sdb3 1
- swift-ring-builder object-1.builder add r1z4-127.0.0.1:6240/sdb4 1
- swift-ring-builder object-1.builder rebalance
+ swift-ring-builder object-1.builder create 10 2 1
+ swift-ring-builder object-1.builder add r1z1-127.0.0.1:6210/sdb1 1
+ swift-ring-builder object-1.builder add r1z2-127.0.0.1:6220/sdb2 1
+ swift-ring-builder object-1.builder add r1z3-127.0.0.1:6230/sdb3 1
+ swift-ring-builder object-1.builder add r1z4-127.0.0.1:6240/sdb4 1
+ swift-ring-builder object-1.builder rebalance
- Note that the reduced replication of the silver policy is only a function
- of the replication parameter in the ``swift-ring-builder create`` command
- and is not specified in ``/etc/swift/swift.conf``.
+ Note that the reduced replication of the silver policy is only a function
+ of the replication parameter in the ``swift-ring-builder create`` command
+ and is not specified in ``/etc/swift/swift.conf``.
3. Copy ``etc/container-reconciler.conf-sample`` to
- ``/etc/swift/container-reconciler.conf`` and fix the user option::
+ ``/etc/swift/container-reconciler.conf`` and fix the user option:
+
+ .. code:: shell
- cp etc/container-reconciler.conf-sample /etc/swift/container-reconciler.conf
- sed -i "s/# user.*/user = $USER/g" /etc/swift/container-reconciler.conf
+ cp etc/container-reconciler.conf-sample /etc/swift/container-reconciler.conf
+ sed -i "s/# user.*/user = $USER/g" /etc/swift/container-reconciler.conf
------------------
Using Policies
@@ -68,82 +74,104 @@ Storage Policies effect placement of data in Swift.
1. We will be using the list_endpoints middleware to confirm object locations,
so enable that now in your ``proxy-server.conf`` file by adding it to the pipeline
and including the filter section as shown below (be sure to restart your proxy
- after making these changes)::
+ after making these changes):
+
+ .. code:: ini
+
+ pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk \
+ slo dlo ratelimit crossdomain list-endpoints tempurl tempauth staticweb \
+ container-quotas account-quotas proxy-logging proxy-server
+
+ [filter:list-endpoints]
+ use = egg:swift#list_endpoints
- pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk \
- slo dlo ratelimit crossdomain list-endpoints tempurl tempauth staticweb \
- container-quotas account-quotas proxy-logging proxy-server
+2. Check to see that your policies are reported via /info:
- [filter:list-endpoints]
- use = egg:swift#list_endpoints
+ .. code:: shell
-2. Check to see that your policies are reported via /info::
+ swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing info
- swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing info
+ You should see this: (only showing the policy output here):
- You should see this: (only showing the policy output here)::
+ .. code:: none
- policies: [{'aliases': 'gold, yellow, orange', 'default': True,
- 'name': 'gold'}, {'aliases': 'silver', 'name': 'silver'}]
+ policies: [{'aliases': 'gold, yellow, orange', 'default': True,
+ 'name': 'gold'}, {'aliases': 'silver', 'name': 'silver'}]
3. Now create a container without specifying a policy, it will use the
default, 'gold' and then put a test object in it (create the file ``file0.txt``
- with your favorite editor with some content)::
+ with your favorite editor with some content):
+
+ .. code:: shell
+
+ curl -v -X PUT -H 'X-Auth-Token: <your auth token>' \
+ http://127.0.0.1:8080/v1/AUTH_test/myCont0
+ curl -X PUT -v -T file0.txt -H 'X-Auth-Token: <your auth token>' \
+ http://127.0.0.1:8080/v1/AUTH_test/myCont0/file0.txt
+
+4. Now confirm placement of the object with the :ref:`list_endpoints` middleware:
+
+ .. code:: shell
+
+ curl -X GET -v http://127.0.0.1:8080/endpoints/AUTH_test/myCont0/file0.txt
+
+ You should see this: (note placement on expected devices):
- curl -v -X PUT -H 'X-Auth-Token: <your auth token>' \
- http://127.0.0.1:8080/v1/AUTH_test/myCont0
- curl -X PUT -v -T file0.txt -H 'X-Auth-Token: <your auth token>' \
- http://127.0.0.1:8080/v1/AUTH_test/myCont0/file0.txt
+ .. code:: json
-4. Now confirm placement of the object with the :ref:`list_endpoints` middleware::
+ ["http://127.0.0.1:6230/sdb3/761/AUTH_test/myCont0/file0.txt",
+ "http://127.0.0.1:6210/sdb1/761/AUTH_test/myCont0/file0.txt",
+ "http://127.0.0.1:6220/sdb2/761/AUTH_test/myCont0/file0.txt"]
- curl -X GET -v http://127.0.0.1:8080/endpoints/AUTH_test/myCont0/file0.txt
+5. Create a container using policy 'silver' and put a different file in it:
- You should see this: (note placement on expected devices)::
+ .. code:: shell
- ["http://127.0.0.1:6230/sdb3/761/AUTH_test/myCont0/file0.txt",
- "http://127.0.0.1:6210/sdb1/761/AUTH_test/myCont0/file0.txt",
- "http://127.0.0.1:6220/sdb2/761/AUTH_test/myCont0/file0.txt"]
+ curl -v -X PUT -H 'X-Auth-Token: <your auth token>' -H \
+ "X-Storage-Policy: silver" \
+ http://127.0.0.1:8080/v1/AUTH_test/myCont1
+ curl -X PUT -v -T file1.txt -H 'X-Auth-Token: <your auth token>' \
+ http://127.0.0.1:8080/v1/AUTH_test/myCont1/
-5. Create a container using policy 'silver' and put a different file in it::
+6. Confirm placement of the object for policy 'silver':
- curl -v -X PUT -H 'X-Auth-Token: <your auth token>' -H \
- "X-Storage-Policy: silver" \
- http://127.0.0.1:8080/v1/AUTH_test/myCont1
- curl -X PUT -v -T file1.txt -H 'X-Auth-Token: <your auth token>' \
- http://127.0.0.1:8080/v1/AUTH_test/myCont1/
+ .. code:: shell
-6. Confirm placement of the object for policy 'silver'::
+ curl -X GET -v http://127.0.0.1:8080/endpoints/AUTH_test/myCont1/file1.txt
- curl -X GET -v http://127.0.0.1:8080/endpoints/AUTH_test/myCont1/file1.txt
+ You should see this: (note placement on expected devices):
- You should see this: (note placement on expected devices)::
+ .. code:: json
- ["http://127.0.0.1:6210/sdb1/32/AUTH_test/myCont1/file1.txt",
- "http://127.0.0.1:6240/sdb4/32/AUTH_test/myCont1/file1.txt"]
+ ["http://127.0.0.1:6210/sdb1/32/AUTH_test/myCont1/file1.txt",
+ "http://127.0.0.1:6240/sdb4/32/AUTH_test/myCont1/file1.txt"]
7. Confirm account information with HEAD, make sure that your container-updater
service is running and has executed once since you performed the PUTs or the
- account database won't be updated yet::
-
- curl -i -X HEAD -H 'X-Auth-Token: <your auth token>' \
- http://127.0.0.1:8080/v1/AUTH_test
-
- You should see something like this (note that total and per policy stats
- object sizes will vary)::
-
- HTTP/1.1 204 No Content
- Content-Length: 0
- X-Account-Object-Count: 2
- X-Account-Bytes-Used: 174
- X-Account-Container-Count: 2
- X-Account-Storage-Policy-Gold-Object-Count: 1
- X-Account-Storage-Policy-Gold-Bytes-Used: 84
- X-Account-Storage-Policy-Silver-Object-Count: 1
- X-Account-Storage-Policy-Silver-Bytes-Used: 90
- X-Timestamp: 1397230339.71525
- Content-Type: text/plain; charset=utf-8
- Accept-Ranges: bytes
- X-Trans-Id: tx96e7496b19bb44abb55a3-0053482c75
- X-Openstack-Request-Id: tx96e7496b19bb44abb55a3-0053482c75
- Date: Fri, 11 Apr 2014 17:55:01 GMT
+ account database won't be updated yet:
+
+ .. code:: shell
+
+ curl -i -X HEAD -H 'X-Auth-Token: <your auth token>' \
+ http://127.0.0.1:8080/v1/AUTH_test
+
+ You should see something like this (note that total and per policy stats
+ object sizes will vary):
+
+ .. code:: none
+
+ HTTP/1.1 204 No Content
+ Content-Length: 0
+ X-Account-Object-Count: 2
+ X-Account-Bytes-Used: 174
+ X-Account-Container-Count: 2
+ X-Account-Storage-Policy-Gold-Object-Count: 1
+ X-Account-Storage-Policy-Gold-Bytes-Used: 84
+ X-Account-Storage-Policy-Silver-Object-Count: 1
+ X-Account-Storage-Policy-Silver-Bytes-Used: 90
+ X-Timestamp: 1397230339.71525
+ Content-Type: text/plain; charset=utf-8
+ Accept-Ranges: bytes
+ X-Trans-Id: tx96e7496b19bb44abb55a3-0053482c75
+ X-Openstack-Request-Id: tx96e7496b19bb44abb55a3-0053482c75
+ Date: Fri, 11 Apr 2014 17:55:01 GMT
diff --git a/docker/install_scripts/20_apk_install_py2.sh b/docker/install_scripts/20_apk_install_py2.sh
index 32367cc8d..12c28e59c 100755
--- a/docker/install_scripts/20_apk_install_py2.sh
+++ b/docker/install_scripts/20_apk_install_py2.sh
@@ -2,8 +2,9 @@
set -e
apk add --update \
- python \
- python-dev \
- py-pip \
- py-cffi \
- py-cryptography
+ python2 \
+ python2-dev
+wget -O - https://bootstrap.pypa.io/pip/2.7/get-pip.py | python
+pip install \
+ cffi \
+ cryptography
diff --git a/docker/install_scripts/99_apk_uninstall_dev.sh b/docker/install_scripts/99_apk_uninstall_dev.sh
index 5109354e9..3e478c9a4 100755
--- a/docker/install_scripts/99_apk_uninstall_dev.sh
+++ b/docker/install_scripts/99_apk_uninstall_dev.sh
@@ -13,5 +13,6 @@ apk del g++
apk del libffi-dev
apk del libxslt-dev
apk del libxml2-dev
-apk del python-dev
+apk del python2-dev || :
+apk del python3-dev
rm -rf /var/cache/apk/*
diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample
index 485fb45e2..ade806819 100644
--- a/etc/account-server.conf-sample
+++ b/etc/account-server.conf-sample
@@ -199,8 +199,8 @@ use = egg:swift#backend_ratelimit
# ionice_class =
# ionice_priority =
#
-# The handoffs_only mode option is for special-case emergency
-# situations such as full disks in the cluster. This option SHOULD NOT
+# The handoffs_only and handoff_delete options are for special-case emergency
+# situations such as full disks in the cluster. These options SHOULD NOT
# BE ENABLED except in emergencies. When handoffs_only mode is enabled
# the replicator will *only* replicate from handoff nodes to primary
# nodes and will not sync primary nodes with other primary nodes.
@@ -217,6 +217,15 @@ use = egg:swift#backend_ratelimit
# long-term use.
#
# handoffs_only = no
+#
+# handoff_delete is the number of replicas which are ensured in swift.
+# If the number less than the number of replicas is set, account-replicator
+# could delete local handoffs even if all replicas are not ensured in the
+# cluster. The replicator would remove local handoff account database after
+# syncing when the number of successful responses is greater than or equal to
+# this number. By default(auto), handoff partitions will be
+# removed when it has successfully replicated to all the canonical nodes.
+# handoff_delete = auto
[account-auditor]
# You can override the default log routing for this app here (don't use set!):
diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample
index 5f949abc4..7184c1fb2 100644
--- a/etc/container-server.conf-sample
+++ b/etc/container-server.conf-sample
@@ -209,8 +209,8 @@ use = egg:swift#backend_ratelimit
# ionice_class =
# ionice_priority =
#
-# The handoffs_only mode option is for special-case emergency
-# situations such as full disks in the cluster. This option SHOULD NOT
+# The handoffs_only and handoff_delete options are for special-case emergency
+# situations such as full disks in the cluster. These options SHOULD NOT
# BE ENABLED except in emergencies. When handoffs_only mode is enabled
# the replicator will *only* replicate from handoff nodes to primary
# nodes and will not sync primary nodes with other primary nodes.
@@ -227,6 +227,15 @@ use = egg:swift#backend_ratelimit
# long-term use.
#
# handoffs_only = no
+#
+# handoff_delete is the number of replicas which are ensured in swift.
+# If the number less than the number of replicas is set, container-replicator
+# could delete local handoffs even if all replicas are not ensured in the
+# cluster. The replicator would remove local handoff container database after
+# syncing when the number of successful responses is greater than or equal to
+# this number. By default(auto), handoff partitions will be
+# removed when it has successfully replicated to all the canonical nodes.
+# handoff_delete = auto
[container-updater]
# You can override the default log routing for this app here (don't use set!):
diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample
index c06f4365e..fd516f780 100644
--- a/etc/object-server.conf-sample
+++ b/etc/object-server.conf-sample
@@ -22,6 +22,12 @@ bind_port = 6200
# feature.
# servers_per_port = 0
#
+# If running in a container, servers_per_port may not be able to use the
+# bind_ip to lookup the ports in the ring. You may instead override the port
+# lookup in the ring using the ring_ip. Any devices/ports associted with the
+# ring_ip will be used when listening on the configured bind_ip address.
+# ring_ip = <bind_ip>
+#
# Maximum concurrent requests per worker
# max_clients = 1024
#
@@ -303,6 +309,13 @@ use = egg:swift#backend_ratelimit
# ring_check_interval = 15.0
# recon_cache_path = /var/cache/swift
#
+# By default, per-file rsync transfers are logged at debug if successful and
+# error on failure. During large rebalances (which both increase the number
+# of diskfiles transferred and increases the likelihood of failures), this
+# can overwhelm log aggregation while providing little useful insights.
+# Change this to false to disable per-file logging.
+# log_rsync_transfers = true
+#
# limits how long rsync error log lines are
# 0 means to log the entire line
# rsync_error_log_line_length = 0
diff --git a/py36-constraints.txt b/py36-constraints.txt
new file mode 100644
index 000000000..623746daa
--- /dev/null
+++ b/py36-constraints.txt
@@ -0,0 +1,88 @@
+# taken from last passing py36 gate job prior to requirements constraining
+# oslo-log to a version that dropped py36 support, 2022-05-19
+attrs==21.4.0
+autopage==0.5.0
+bandit==1.7.1
+boto==2.49.0
+boto3==1.23.0
+botocore==1.26.0
+certifi==2022.5.18
+cffi==1.15.0
+charset-normalizer==2.0.12
+cliff==3.10.1
+cmd2==2.4.1
+coverage==6.2
+cryptography==36.0.2
+debtcollector==2.5.0
+decorator==4.4.2
+dnspython==2.2.1
+docutils==0.17.1
+dogpile.cache==1.1.5
+eventlet==0.33.1
+extras==1.0.0
+fixtures==3.0.0
+flake8==3.9.2
+future==0.18.2
+gitdb==4.0.9
+GitPython==3.1.18
+greenlet==1.1.2
+hacking==2.0.0
+idna==3.3
+importlib-metadata==4.8.3
+importlib-resources==5.4.0
+iso8601==1.0.2
+jmespath==0.10.0
+keystoneauth1==4.5.0
+keystonemiddleware==9.4.0
+lxml==4.8.0
+mccabe==0.6.1
+mock==4.0.3
+msgpack==1.0.3
+netaddr==0.8.0
+netifaces==0.11.0
+nose==1.3.7
+nosehtmloutput==0.0.7
+nosexcover==1.0.11
+os-service-types==1.7.0
+oslo.cache==2.11.0
+oslo.config==8.8.0
+oslo.context==4.1.0
+oslo.i18n==5.1.0
+oslo.log==4.7.0
+oslo.serialization==4.3.0
+oslo.utils==4.13.0
+packaging==21.3
+PasteDeploy==2.1.1
+pbr==5.9.0
+prettytable==2.5.0
+pycadf==3.1.1
+pycodestyle==2.7.0
+pycparser==2.21
+pyeclib==1.6.1
+pyflakes==2.3.1
+pyinotify==0.9.6
+pyparsing==3.0.9
+pyperclip==1.8.2
+python-dateutil==2.8.2
+python-keystoneclient==4.4.0
+python-subunit==1.4.0
+python-swiftclient==3.13.1
+pytz==2022.1
+PyYAML==6.0
+requests==2.27.1
+requests-mock==1.9.3
+rfc3986==1.5.0
+s3transfer==0.5.2
+six==1.16.0
+smmap==5.0.0
+stestr==3.2.1
+stevedore==3.5.0
+testtools==2.5.0
+typing_extensions==4.1.1
+urllib3==1.26.9
+voluptuous==0.13.1
+wcwidth==0.2.5
+WebOb==1.8.7
+wrapt==1.14.1
+xattr==0.9.9
+zipp==3.6.0
diff --git a/releasenotes/notes/2_30_0_release-642778c3010848db.yaml b/releasenotes/notes/2_30_0_release-642778c3010848db.yaml
new file mode 100644
index 000000000..e918df16c
--- /dev/null
+++ b/releasenotes/notes/2_30_0_release-642778c3010848db.yaml
@@ -0,0 +1,167 @@
+---
+features:
+ - |
+ Sharding improvements
+
+ * The ``swift-manage-shard-ranges`` tool has a new mode to repair gaps
+ in the namespace.
+
+ * Metrics are now emitted for whether databases used for cleaving
+ were created or already existed, allowing a better understanding
+ of the reason for handoffs in the cluster.
+
+ * Misplaced-record stats are now also emitted to statsd. Previously,
+ these were only available in logs.
+
+ - |
+ Logging improvements
+
+ * The message template for proxy logging may now include a
+ ``{domain}`` field for the client-provided ``Host`` header.
+
+ * Added a ``log_rsync_transfers`` option to the object-replicator.
+ Set it to false to disable logging rsync "send" lines; during
+ large rebalances, such logging can overwhelm log aggregation
+ while providing little useful information.
+
+ - |
+ The formpost digest algorithm is now configurable via the new
+ ``allowed_digests`` option, and support is added for both SHA-256
+ and SHA-512. Supported formpost digests are exposed to clients in
+ ``/info``. Additionally, formpost signatures can now be base64 encoded.
+
+ - |
+ Added metrics to the formpost and tempurl middlewares to monitor
+ digest usage in signatures.
+
+ - |
+ Improved compatibility with certain FIPS-mode-enabled systems.
+
+ - |
+ Added a ``ring_ip`` option for various object services. This may be
+ used to find own devices in the ring in a containerized environment
+ where the ``bind_ip`` may not appear in the ring at all.
+
+ - |
+ Account and container replicators can now be configured with a
+ ``handoff_delete`` option, similar to object replicators and
+ reconstructors. See the sample config for more information.
+
+ - |
+ Developers using Swift's memcache client may now opt in to having
+ a ``MemcacheConnectionError`` be raised when no connection succeeded
+ using a new ``raise_on_error`` keyword argument to ``get``/``set``.
+
+ - |
+ Device names are now included in new database IDs. This provides more
+ context when examining incoming/outgoing sync tables or sharding
+ CleaveContexts.
+
+deprecations:
+ - |
+ SHA-1 signatures are now deprecated for the formpost and tempurl
+ middlewares. At some point in the future, SHA-1 will no longer be
+ enabled by default; eventually, support for it will be removed
+ entirely.
+
+security:
+ - |
+ Constant-time string comparisons are now used when checking S3 API signatures.
+
+ - |
+ Fixed a socket leak when clients try to delete a non-SLO as though
+ it were a Static Large Object.
+
+fixes:
+ - |
+ Sharding improvements
+
+ * Misplaced tombstone records are now properly cleaved.
+
+ * Fixed a bug where the sharder could fail to find a device to use for
+ cleaving.
+
+ * Databases marked deleted are now processed by the sharder.
+
+ * More information is now synced to the fresh database when sharding.
+ Previously, a database could lose the fact that it had been marked
+ as deleted.
+
+ * Shard ranges with no rows to cleave could previously be left in the
+ CREATED state after cleaving. Now, they are advanced to CLEAVED.
+
+ - |
+ S3 API improvements
+
+ * Fixed cross-policy object copies. Previously, copied data would
+ always be written using the source container's policy. Now, the
+ destination container's policy will be used, avoiding availability
+ issues and unnecessary container-reconciler work.
+
+ * More headers are now copied from multi-part upload markers to their
+ completed objects, including ``Content-Encoding``.
+
+ * When running with ``s3_acl`` disabled, ``bucket-owner-full-control`` and
+ ``bucket-owner-read`` canned ACLs will be translated to the same Swift
+ ACLs as ``private``.
+
+ * The S3 ACL and Delete Multiple APIs are now less case-sensitive.
+
+ * Improved the error message when deleting a bucket that's ever had
+ versioning enabled and still has versions in it.
+
+ * ``LastModified`` timestamps in listings are now rounded up to whole
+ seconds, like they are in responses from AWS.
+
+ * Proxy logging for Complete Multipart Upload requests is now more
+ consistent when requests have been retried.
+
+ - |
+ Logging improvements
+
+ * Signal handling is more consistently logged at notice level.
+ Previously, signal handling would sometimes be logged at info
+ or error levels.
+
+ * The object-replicator now logs successful rsync transfers at debug
+ instead of info.
+
+ * Transaction IDs are now only included in daemon log lines
+ in a request/response context.
+
+ - |
+ The tempurl middleware has been updated to return a 503 if storing a
+ token in memcache fails. Third party authentication middlewares are
+ encouraged to also use the new ``raise_on_error`` keyword argument
+ when storing ephemeral tokens in memcache.
+
+ - |
+ Database replication connections are now closed following an error
+ or timeout. This prevents a traceback in some cases when the replicator
+ tries to reuse the connection.
+
+ - |
+ ``ENOENT`` and ``ENODATA`` errors are better handled in the object
+ replicator and auditor.
+
+ - |
+ Improved object update throughput by shifting some shard range
+ filtering from Python to SQL.
+
+ - |
+ Include ``Vary: Origin`` header when CORS responses vary by origin.
+
+ - |
+ The staticweb middleware now allows empty listings at the root of
+ a container. Previously, this would result in a 404 response.
+
+ - |
+ Ring builder output tables better display weights over 1000.
+
+ - |
+ Various other minor bug fixes and improvements.
+
+other:
+ - |
+ Pickle support has been removed from Swift's memcache client. Support
+ had been deprecated since Swift 1.7.0.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 932a4cbbd..401a53ab9 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -65,8 +65,8 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-project = u'Swift Release Notes'
-copyright = u'%d, OpenStack Foundation' % datetime.datetime.now().year
+project = 'Swift Release Notes'
+copyright = '%d, OpenStack Foundation' % datetime.datetime.now().year
# Release notes do not need a version number in the title, they
# cover multiple releases.
@@ -80,7 +80,7 @@ release = ''
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
-language = None
+# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index 3f1c34c88..5eb3d4858 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -1,15 +1,16 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Swift Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-10-29 09:29+0000\n"
+"POT-Creation-Date: 2022-08-10 20:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-10-28 11:21+0000\n"
+"PO-Revision-Date: 2022-08-14 12:43+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -85,6 +86,15 @@ msgstr ""
"Method Not Allowed`` messages during a rolling upgrade. To avoid this, "
"comment out the option and restart replication servers before upgrading."
+msgid "0 for success"
+msgstr "0 for success"
+
+msgid "1 for an unexpected outcome"
+msgstr "1 for an unexpected outcome"
+
+msgid "2 for invalid options"
+msgstr "2 for invalid options"
+
msgid "2.10.0"
msgstr "2.10.0"
@@ -163,6 +173,9 @@ msgstr "2.23.1"
msgid "2.23.2"
msgstr "2.23.2"
+msgid "2.23.3"
+msgstr "2.23.3"
+
msgid "2.24.0"
msgstr "2.24.0"
@@ -175,6 +188,30 @@ msgstr "2.25.1"
msgid "2.26.0"
msgstr "2.26.0"
+msgid "2.27.0"
+msgstr "2.27.0"
+
+msgid "2.28.0"
+msgstr "2.28.0"
+
+msgid "2.29.0"
+msgstr "2.29.0"
+
+msgid "2.29.1"
+msgstr "2.29.1"
+
+msgid "3 for user exit"
+msgstr "3 for user exit"
+
+msgid ""
+"A 'compact' command has been added to ``swift-manage-shard-ranges`` that "
+"enables sequences of contiguous shards with low object counts to be "
+"compacted into another existing shard, or into the root container."
+msgstr ""
+"A 'compact' command has been added to ``swift-manage-shard-ranges`` that "
+"enables sequences of contiguous shards with low object counts to be "
+"compacted into another existing shard, or into the root container."
+
msgid ""
"A PUT or POST to a container will now update the container's Last-Modified "
"time, and that value will be included in a GET/HEAD response."
@@ -183,6 +220,18 @@ msgstr ""
"time, and that value will be included in a GET/HEAD response."
msgid ""
+"A ``--no-auto-shard`` option has been added to ``swift-container-sharder``."
+msgstr ""
+"A ``--no-auto-shard`` option has been added to ``swift-container-sharder``."
+
+msgid ""
+"A comparable group, ``.reseller_reader``, is now available for development "
+"purposes when authenticating using tempauth."
+msgstr ""
+"A comparable group, ``.reseller_reader``, is now available for development "
+"purposes when authenticating using tempauth."
+
+msgid ""
"A composite ring comprises two or more component rings that are combined to "
"form a single ring with a replica count equal to the sum of the component "
"rings. The component rings are built independently, using distinct devices "
@@ -195,6 +244,15 @@ msgstr ""
"in distinct regions, which means that the dispersion of replicas between the "
"components can be guaranteed."
+msgid ""
+"A new ``item_size_warning_threshold`` option may be used to monitor for "
+"values that are approaching the limit of what can be stored in memcache. See "
+"the memcache sample config for more information."
+msgstr ""
+"A new ``item_size_warning_threshold`` option may be used to monitor for "
+"values that are approaching the limit of what can be stored in memcache. See "
+"the memcache sample config for more information."
+
msgid "ACLs now work with unicode in user/account names."
msgstr "ACLs now work with Unicode in user/account names."
@@ -296,6 +354,9 @@ msgstr ""
"shared between account/container and object rings to avoid getting 100% "
"full. The default value of 1% matches the existing default on object servers."
+msgid "Add root containers with compactible ranges to recon cache."
+msgstr "Add root containers with compatible ranges to recon cache."
+
msgid ""
"Add slo_manifest_hook callback to allow other middlewares to impose "
"additional constraints on or make edits to SLO manifests before being "
@@ -349,6 +410,17 @@ msgstr ""
"ring_partpower.html>`__ for more information."
msgid ""
+"Added \"audit watcher\" hooks to allow operators to run arbitrary code "
+"against every diskfile in a cluster. For more information, see `the "
+"documentation <https://docs.openstack.org/swift/latest/development_watchers."
+"html>`__."
+msgstr ""
+"Added \"audit watcher\" hooks to allow operators to run arbitrary code "
+"against every diskfile in a cluster. For more information, see `the "
+"documentation <https://docs.openstack.org/swift/latest/development_watchers."
+"html>`__."
+
+msgid ""
"Added \"emergency mode\" hooks in the account and container replicators. "
"These options may be used to prioritize moving handoff partitions to primary "
"locations more quickly. This helps when adding capacity to a ring."
@@ -380,6 +452,9 @@ msgstr ""
msgid "Added ``-d <devs>`` and ``-p <partitions>`` command line options."
msgstr "Added ``-d <devs>`` and ``-p <partitions>`` command line options."
+msgid "Added ``tasks_per_second`` option to rate-limit the object-expirer."
+msgstr "Added ``tasks_per_second`` option to rate-limit the object-expirer."
+
msgid ""
"Added ``ttfb`` (Time to First Byte) and ``pid`` (Process ID) to the set of "
"available proxy-server log fields. For more information, see `the "
@@ -390,12 +465,27 @@ msgstr ""
"documentation <https://docs.openstack.org/swift/latest/logs.html>`__."
msgid ""
+"Added ``usedforsecurity`` annotations for use on FIPS-compliant systems."
+msgstr ""
+"Added ``usedforsecurity`` annotations for use on FIPS-compliant systems."
+
+msgid ""
"Added a \"user\" option to the drive-audit config file. Its value is used to "
"set the owner of the drive-audit recon cache."
msgstr ""
"Added a \"user\" option to the drive-audit config file. Its value is used to "
"set the owner of the drive-audit recon cache."
+msgid "Added a ``--dry-run`` option for the 'compact' command."
+msgstr "Added a ``--dry-run`` option for the 'compact' command."
+
+msgid ""
+"Added a ``--includes`` option for the 'show' command to only output shard "
+"ranges that may include a given object name."
+msgstr ""
+"Added a ``--includes`` option for the 'show' command to only output shard "
+"ranges that may include a given object name."
+
msgid ""
"Added a ``keep_idle`` config option to configure KEEPIDLE time for TCP "
"sockets. The default value is the old constant of 600."
@@ -413,10 +503,93 @@ msgstr ""
msgid "Added a configurable URL base to staticweb."
msgstr "Added a configurable URL base to staticweb."
+msgid ""
+"Added a delay before deleting non-durable data. A new configuration option, "
+"``commit_window`` in the ``[DEFAULT]`` section of object-server.conf, "
+"adjusts this delay; the default is 60 seconds. This improves the durability "
+"of both back-dated PUTs (from the reconciler or container-sync, for example) "
+"and fresh writes to handoffs by preventing the reconstructor from deleting "
+"data that the object-server was still writing."
+msgstr ""
+"Added a delay before deleting non-durable data. A new configuration option, "
+"``commit_window`` in the ``[DEFAULT]`` section of object-server.conf, "
+"adjusts this delay; the default is 60 seconds. This improves the durability "
+"of both back-dated PUTs (from the reconciler or container-sync, for example) "
+"and fresh writes to handoffs by preventing the reconstructor from deleting "
+"data that the object-server was still writing."
+
msgid "Added a handoffs-only mode."
msgstr "Added a handoffs-only mode."
msgid ""
+"Added a new 'analyze' command to automatically identify overlapping shard "
+"ranges and recommend a resolution based on a JSON listing of shard ranges "
+"such as produced by the 'show' command."
+msgstr ""
+"Added a new 'analyze' command to automatically identify overlapping shard "
+"ranges and recommend a resolution based on a JSON listing of shard ranges "
+"such as produced by the 'show' command."
+
+msgid ""
+"Added a new 'repair' command to automatically identify and optionally "
+"resolve overlapping shard ranges."
+msgstr ""
+"Added a new 'repair' command to automatically identify and optionally "
+"resolve overlapping shard ranges."
+
+msgid ""
+"Added a new ``swift.common.registry`` module. This includes helper functions "
+"``register_sensitive_header`` and ``register_sensitive_param`` which third "
+"party middleware authors may use to flag headers and query parameters for "
+"redaction when logging. For more information, see `the documentation "
+"<https://docs.openstack.org/swift/latest/misc.html# module-swift.common."
+"registry>`__."
+msgstr ""
+"Added a new ``swift.common.registry`` module. This includes helper functions "
+"``register_sensitive_header`` and ``register_sensitive_param`` which third "
+"party middleware authors may use to flag headers and query parameters for "
+"redaction when logging. For more information, see `the documentation "
+"<https://docs.openstack.org/swift/latest/misc.html# module-swift.common."
+"registry>`__."
+
+msgid ""
+"Added a new config option, ``minimum_shard_size``. When scanning for shard "
+"ranges, if the final shard would otherwise contain fewer than this many "
+"objects, the previous shard will instead be expanded to the end of the "
+"namespace (and so may contain up to ``rows_per_shard + minimum_shard_size`` "
+"objects). This reduces the number of small shards generated. The default "
+"value is ``rows_per_shard / 5``."
+msgstr ""
+"Added a new config option, ``minimum_shard_size``. When scanning for shard "
+"ranges, if the final shard would otherwise contain fewer than this many "
+"objects, the previous shard will instead be expanded to the end of the "
+"namespace (and so may contain up to ``rows_per_shard + minimum_shard_size`` "
+"objects). This reduces the number of small shards generated. The default "
+"value is ``rows_per_shard / 5``."
+
+msgid ""
+"Added a new config option, ``rows_per_shard``, to specify how many objects "
+"should be in each shard when scanning for ranges. The default is "
+"``shard_container_threshold / 2``, preserving existing behavior."
+msgstr ""
+"Added a new config option, ``rows_per_shard``, to specify how many objects "
+"should be in each shard when scanning for ranges. The default is "
+"``shard_container_threshold / 2``, preserving existing behaviour."
+
+msgid ""
+"Added a new config option, ``shrink_threshold``, to specify the absolute "
+"size below which a shard will be considered for shrinking. This overrides "
+"the ``shard_shrink_point`` configuration option, which expressed this as a "
+"percentage of ``shard_container_threshold``. ``shard_shrink_point`` is now "
+"deprecated."
+msgstr ""
+"Added a new config option, ``shrink_threshold``, to specify the absolute "
+"size below which a shard will be considered for shrinking. This overrides "
+"the ``shard_shrink_point`` configuration option, which expressed this as a "
+"percentage of ``shard_container_threshold``. ``shard_shrink_point`` is now "
+"deprecated."
+
+msgid ""
"Added a new middleware that allows users and operators to configure accounts "
"and containers to use RFC-compliant (i.e., double-quoted) ETags. This may be "
"useful when using Swift as an origin for some content delivery networks. For "
@@ -456,6 +629,17 @@ msgstr ""
"versioned_writes.object_versioning>`__."
msgid ""
+"Added a new optional proxy-logging field ``{wire_status_int}`` for the "
+"status code returned to the client. For more information, see `the "
+"documentation <https://docs.openstack.org/swift/latest/logs.html#proxy-"
+"logs>`__."
+msgstr ""
+"Added a new optional proxy-logging field ``{wire_status_int}`` for the "
+"status code returned to the client. For more information, see `the "
+"documentation <https://docs.openstack.org/swift/latest/logs.html#proxy-"
+"logs>`__."
+
+msgid ""
"Added an experimental ``swift-ring-composer`` CLI tool to build composite "
"rings."
msgstr ""
@@ -469,6 +653,38 @@ msgstr ""
"Added an operator tool, ``swift-container-deleter``, to asynchronously "
"delete some or all objects in a container using the object expirers."
+msgid "Added an option to drop privileges when running the relinker as root."
+msgstr "Added an option to drop privileges when running the relinker as root."
+
+msgid ""
+"Added an option to rate-limit how quickly data files are relinked or cleaned "
+"up. This may be used to reduce I/O load during partition power increases, "
+"improving end-user performance."
+msgstr ""
+"Added an option to rate-limit how quickly data files are relinked or cleaned "
+"up. This may be used to reduce I/O load during partition power increases, "
+"improving end-user performance."
+
+msgid ""
+"Added an option to write EC fragments with legacy CRC to ensure a smooth "
+"upgrade from liberasurecode<=1.5.0 to >=1.6.2. For more information, see "
+"`bug 1886088 <https://bugs.launchpad.net/liberasurecode/+bug/1886088>`__."
+msgstr ""
+"Added an option to write EC fragments with legacy CRC to ensure a smooth "
+"upgrade from liberasurecode<=1.5.0 to >=1.6.2. For more information, see "
+"`bug 1886088 <https://bugs.launchpad.net/liberasurecode/+bug/1886088>`__."
+
+msgid ""
+"Added an option, ``ratelimit_as_client_error``, to return 429s for rate-"
+"limited responses. Several clients/SDKs have seem to support retries with "
+"backoffs on 429, and having it as a client error cleans up logging and "
+"metrics. By default, Swift will respond 503, matching AWS documentation."
+msgstr ""
+"Added an option, ``ratelimit_as_client_error``, to return 429s for rate-"
+"limited responses. Several clients/SDKs have seem to support retries with "
+"backoffs on 429, and having it as a client error cleans up logging and "
+"metrics. By default, Swift will respond 503, matching AWS documentation."
+
msgid ""
"Added an optional ``read_only`` middleware to make an entire cluster or "
"individual accounts read only."
@@ -494,9 +710,18 @@ msgstr ""
msgid "Added container/object listing with prefix to InternalClient."
msgstr "Added container/object listing with prefix to InternalClient."
+msgid "Added object-reconstructor stats to recon."
+msgstr "Added object-reconstructor stats to recon."
+
+msgid "Added stats for shard range cache hits, misses, and skips."
+msgstr "Added stats for shard range cache hits, misses, and skips."
+
msgid "Added support for Python 3.8."
msgstr "Added support for Python 3.8."
+msgid "Added support for Python 3.9."
+msgstr "Added support for Python 3.9."
+
msgid "Added support for S3 versioning using the above new mode."
msgstr "Added support for S3 versioning using the above new mode."
@@ -527,9 +752,43 @@ msgstr ""
"Added support for retrieving the encryption root secret from an external key "
"management system. In practice, this is currently limited to Barbican."
+msgid ""
+"Added support for system-scoped \"reader\" roles when authenticating using "
+"Keystone. Operators may configure this using the ``system_reader_roles`` "
+"option in the ``[filter:keystoneauth]`` section of their proxy-server.conf."
+msgstr ""
+"Added support for system-scoped \"reader\" roles when authenticating using "
+"Keystone. Operators may configure this using the ``system_reader_roles`` "
+"option in the ``[filter:keystoneauth]`` section of their proxy-server.conf."
+
msgid "Added symlink objects support."
msgstr "Added symlink objects support."
+msgid ""
+"Added the ability to configure a chance to skip checking memcache when "
+"querying shard ranges. This allows some fraction of traffic to go to disk "
+"and refresh memcache before the key ages out. Recommended values for the new "
+"``container_updating_shard_ranges_skip_cache_pct`` and "
+"``container_listing_shard_ranges_skip_cache_pct`` options are in the range "
+"of 0.0 to 0.1."
+msgstr ""
+"Added the ability to configure a chance to skip checking Memcache when "
+"querying shard ranges. This allows some fraction of traffic to go to disk "
+"and refresh Memcache before the key ages out. Recommended values for the new "
+"``container_updating_shard_ranges_skip_cache_pct`` and "
+"``container_listing_shard_ranges_skip_cache_pct`` options are in the range "
+"of 0.0 to 0.1."
+
+msgid "Added the ability to configure auth region in s3token middleware."
+msgstr "Added the ability to configure auth region in s3token middleware."
+
+msgid ""
+"Added the ability to configure project-scope read-only roles for "
+"keystoneauth using the new ``project_reader_roles`` option."
+msgstr ""
+"Added the ability to configure project-scope read-only roles for "
+"keystoneauth using the new ``project_reader_roles`` option."
+
msgid "After upgrading, re-enable and restart the object-reconstructor."
msgstr "After upgrading, re-enable and restart the object-reconstructor."
@@ -1923,6 +2182,9 @@ msgstr ""
"SSC (server-side copy) Swift source field. See https://docs.openstack.org/"
"developer/swift/logs.html#swift-source for more information."
+msgid "Partition power increase improvements:"
+msgstr "Partition power increase improvements:"
+
msgid ""
"Per-service ``auto_create_account_prefix`` settings are now deprecated and "
"may be ignored in a future release; if you need to use this, please set it "
@@ -2231,6 +2493,9 @@ msgstr ""
msgid "Several utility scripts now work better on Python 3:"
msgstr "Several utility scripts now work better on Python 3:"
+msgid "Sharding fixes:"
+msgstr "Sharding fixes:"
+
msgid "Sharding improvements"
msgstr "Sharding improvements"
@@ -2252,6 +2517,22 @@ msgstr ""
"openstack.org/api-ref/object-storage/."
msgid ""
+"Similar to above, ``expansion_limit`` was added as an absolute-size "
+"replacement for the now-deprecated ``shard_shrink_merge_point`` "
+"configuration option."
+msgstr ""
+"Similar to above, ``expansion_limit`` was added as an absolute-size "
+"replacement for the now-deprecated ``shard_shrink_merge_point`` "
+"configuration option."
+
+msgid ""
+"Some client behaviors that the proxy previously logged at warning have been "
+"lowered to info."
+msgstr ""
+"Some client behaviours that the proxy previously logged at warning have been "
+"lowered to info."
+
+msgid ""
"Static Large Object (SLO) manifest may now (again) have zero-byte last "
"segments."
msgstr ""
@@ -2265,6 +2546,11 @@ msgstr ""
"Static Large Object sizes in listings for versioned containers are now more "
"accurate."
+msgid ""
+"Staticweb correctly handles listings when paths include non-ASCII characters."
+msgstr ""
+"Staticweb correctly handles listings when paths include non-ASCII characters."
+
msgid "Stein Series Release Notes"
msgstr "Stein Series Release Notes"
@@ -2387,6 +2673,11 @@ msgstr ""
"produce and consume."
msgid ""
+"The 'compact' command now outputs the total number of compactible sequences."
+msgstr ""
+"The 'compact' command now outputs the total number of compactible sequences."
+
+msgid ""
"The EC reconstructor process has been dramatically improved by adding "
"support for multiple concurrent workers. Multiple processes are required to "
"get high concurrency, and this change results in much faster rebalance times "
@@ -2410,6 +2701,21 @@ msgid "The ETag-quoting middleware no longer raises TypeErrors."
msgstr "The ETag-quoting middleware no longer raises TypeErrors."
msgid ""
+"The ``StatsdClient.set_prefix`` method is now deprecated and may be removed "
+"in a future release; by extension, so is the ``LogAdapter."
+"set_statsd_prefix`` method. Middleware developers should use the "
+"``statsd_tail_prefix`` argument to ``get_logger`` instead."
+msgstr ""
+"The ``StatsdClient.set_prefix`` method is now deprecated and may be removed "
+"in a future release; by extension, so is the ``LogAdapter."
+"set_statsd_prefix`` method. Middleware developers should use the "
+"``statsd_tail_prefix`` argument to ``get_logger`` instead."
+
+msgid "The ``cname_lookup`` middleware now works with dnspython 2.0 and later."
+msgstr ""
+"The ``cname_lookup`` middleware now works with dnspython 2.0 and later."
+
+msgid ""
"The ``container-replicator`` now correctly enqueues ``container-reconciler`` "
"work for sharded containers."
msgstr ""
@@ -2486,6 +2792,26 @@ msgstr ""
"requests."
msgid ""
+"The ``storage_domain`` option now accepts a comma-separated list of storage "
+"domains. This allows multiple storage domains to configured for use with "
+"virtual-host style addressing."
+msgstr ""
+"The ``storage_domain`` option now accepts a comma-separated list of storage "
+"domains. This allows multiple storage domains to be configured for use with "
+"virtual-host style addressing."
+
+msgid ""
+"The ``swift-recon`` tool now queries each object-server IP only once when "
+"reporting disk usage. Previously, each port in the ring would be queried; "
+"when using servers-per-port, this could dramatically overstate the disk "
+"capacity in the cluster."
+msgstr ""
+"The ``swift-recon`` tool now queries each object-server IP only once when "
+"reporting disk usage. Previously, each port in the ring would be queried; "
+"when using servers-per-port, this could dramatically overstate the disk "
+"capacity in the cluster."
+
+msgid ""
"The above bug was caused by a difference in string types that resulted in "
"ambiguity when decrypting. To prevent the ambiguity for new data, set "
"``meta_version_to_write = 3`` in your keymaster configuration *after* "
@@ -2497,6 +2823,13 @@ msgstr ""
"upgrading all proxy servers."
msgid ""
+"The account and container auditors now log and update recon before going to "
+"sleep."
+msgstr ""
+"The account and container auditors now log and update recon before going to "
+"sleep."
+
+msgid ""
"The bulk extract middleware once again allows clients to specify metadata "
"(including expiration timestamps) for all objects in the archive."
msgstr ""
@@ -2520,6 +2853,13 @@ msgstr ""
"their names."
msgid ""
+"The container-reconciler now scales out better with new ``processes``, "
+"``process``, and ``concurrency`` options, similar to the object-expirer."
+msgstr ""
+"The container-reconciler now scales out better with new ``processes``, "
+"``process``, and ``concurrency`` options, similar to the object-expirer."
+
+msgid ""
"The container-updater now reports zero objects and bytes used for child DBs "
"in sharded containers. This prevents double-counting in utilization reports."
msgstr ""
@@ -2527,6 +2867,38 @@ msgstr ""
"in sharded containers. This prevents double-counting in utilisation reports."
msgid ""
+"The container-updater will quarantine container databases if all replicas "
+"for the account respond 404."
+msgstr ""
+"The container-updater will quarantine container databases if all replicas "
+"for the account respond 404."
+
+msgid "The correct storage policy is now logged for S3 requests."
+msgstr "The correct storage policy is now logged for S3 requests."
+
+msgid ""
+"The dark-data audit watcher now requires that all primary locations for an "
+"object's container agree that the data does not appear in listings to "
+"consider data \"dark\". Previously, a network partition that left an object "
+"node isolated could cause it to quarantine or delete all of its data."
+msgstr ""
+"The dark-data audit watcher now requires that all primary locations for an "
+"object's container agree that the data does not appear in listings to "
+"consider data \"dark\". Previously, a network partition that left an object "
+"node isolated could cause it to quarantine or delete all of its data."
+
+msgid ""
+"The dark-data audit watcher now skips objects younger than a new "
+"configurable ``grace_age`` period. This avoids issues where data could be "
+"flagged, quarantined, or deleted because of listing consistency issues. The "
+"default is one week."
+msgstr ""
+"The dark-data audit watcher now skips objects younger than a new "
+"configurable ``grace_age`` period. This avoids issues where data could be "
+"flagged, quarantined, or deleted because of listing consistency issues. The "
+"default is one week."
+
+msgid ""
"The default for `object_post_as_copy` has been changed to False. The option "
"is now deprecated and will be removed in a future release. If your cluster "
"is still running with post-as-copy enabled, please update it to use the "
@@ -2578,6 +2950,13 @@ msgstr ""
"instead of going disk-by-disk. This eliminates single-disk I/O contention "
"and allows continued scaling as concurrency is increased."
+msgid ""
+"The formpost middleware now properly supports uploading multiple files with "
+"different content-types."
+msgstr ""
+"The formpost middleware now properly supports uploading multiple files with "
+"different content-types."
+
msgid "The formpost middleware now works with unicode file names."
msgstr "The formpost middleware now works with Unicode file names."
@@ -2591,6 +2970,21 @@ msgstr ""
"circumstances, resulting in faster recovery from failures."
msgid ""
+"The internal clients used by the container-reconciler, container-sharder, "
+"container-sync, and object-expirer daemons now use a more-descriptive "
+"``<daemon>-ic`` log name, rather than ``swift``. If you previously "
+"configured the ``log_name`` option in ``internal-client.conf``, you must now "
+"use the ``set log_name = <value>`` syntax to configure it, even if no value "
+"is set in the ``[DEFAULT]`` section. This may be done prior to upgrading."
+msgstr ""
+"The internal clients used by the container-reconciler, container-sharder, "
+"container-sync, and object-expirer daemons now use a more-descriptive "
+"``<daemon>-ic`` log name, rather than ``swift``. If you previously "
+"configured the ``log_name`` option in ``internal-client.conf``, you must now "
+"use the ``set log_name = <value>`` syntax to configure it, even if no value "
+"is set in the ``[DEFAULT]`` section. This may be done prior to upgrading."
+
+msgid ""
"The number of container updates on object PUTs (ie to update listings) has "
"been recomputed to be far more efficient while maintaining durability "
"guarantees. Specifically, object PUTs to erasure-coded policies will now "
@@ -2650,6 +3044,29 @@ msgstr ""
"performed by each worker process. This should speed the processing of "
"async_pendings."
+msgid "The object-expirer logs fewer client disconnects."
+msgstr "The object-expirer logs fewer client disconnects."
+
+msgid ""
+"The object-expirer now only cleans up empty containers. Previously, it would "
+"attempt to delete all processed containers, regardless of whether there were "
+"entries which were skipped or had errors."
+msgstr ""
+"The object-expirer now only cleans up empty containers. Previously, it would "
+"attempt to delete all processed containers, regardless of whether there were "
+"entries which were skipped or had errors."
+
+msgid ""
+"The object-updater now defers rate-limited updates to the end of its cycle; "
+"these deferred updates will be processed (at the limited rate) until the "
+"configured ``interval`` elapses. A new ``max_deferred_updates`` option may "
+"be used to bound the deferral queue."
+msgstr ""
+"The object-updater now defers rate-limited updates to the end of its cycle; "
+"these deferred updates will be processed (at the limited rate) until the "
+"configured ``interval`` elapses. A new ``max_deferred_updates`` option may "
+"be used to bound the deferral queue."
+
msgid ""
"The output of devices from ``swift-ring-builder`` has been reordered by "
"region, zone, ip, and device."
@@ -2658,6 +3075,117 @@ msgstr ""
"region, zone, ip, and device."
msgid ""
+"The post-rsync REPLICATE call no longer recalculates hashes immediately."
+msgstr ""
+"The post-rsync REPLICATE call no longer recalculates hashes immediately."
+
+msgid ""
+"The proxy-server now caches 'listing' shards, improving listing performance "
+"for sharded containers. A new config option, "
+"``recheck_listing_shard_ranges``, controls the cache time and defaults to 10 "
+"minutes; set it to 0 to disable caching (the previous behavior)."
+msgstr ""
+"The proxy-server now caches 'listing' shards, improving listing performance "
+"for sharded containers. A new config option, "
+"``recheck_listing_shard_ranges``, controls the cache time and defaults to 10 "
+"minutes; set it to 0 to disable caching (the previous behaviour)."
+
+msgid ""
+"The reconciler now defers working on policies that have a partition power "
+"increase in progress to avoid issues with concurrent writes."
+msgstr ""
+"The reconciler now defers working on policies that have a partition power "
+"increase in progress to avoid issues with concurrent writes."
+
+msgid ""
+"The reconstructor now uses the replication network to fetch fragments for "
+"reconstruction."
+msgstr ""
+"The reconstructor now uses the replication network to fetch fragments for "
+"reconstruction."
+
+msgid ""
+"The relinker better handles data found from earlier partition power "
+"increases."
+msgstr ""
+"The relinker better handles data found from earlier partition power "
+"increases."
+
+msgid ""
+"The relinker better handles tombstones found for the same object but with "
+"different inodes."
+msgstr ""
+"The relinker better handles tombstones found for the same object but with "
+"different inodes."
+
+msgid ""
+"The relinker can now target specific storage policies or partitions by using "
+"the new ``--policy`` and ``--partition`` options."
+msgstr ""
+"The relinker can now target specific storage policies or partitions by using "
+"the new ``--policy`` and ``--partition`` options."
+
+msgid ""
+"The relinker now performs eventlet-hub selection the same way as other "
+"daemons. In particular, ``epolls`` will no longer be selected, as it seemed "
+"to cause occassional hangs."
+msgstr ""
+"The relinker now performs eventlet-hub selection the same way as other "
+"daemons. In particular, ``epolls`` will no longer be selected, as it seemed "
+"to cause occasional hangs."
+
+msgid ""
+"The relinker now spawns multiple subprocesses to process disks in parallel. "
+"By default, one worker is spawned per disk; use the new ``--workers`` option "
+"to control how many subprocesses are used. Use ``--workers=0`` to maintain "
+"the previous behavior."
+msgstr ""
+"The relinker now spawns multiple subprocesses to process disks in parallel. "
+"By default, one worker is spawned per disk; use the new ``--workers`` option "
+"to control how many subprocesses are used. Use ``--workers=0`` to maintain "
+"the previous behaviour."
+
+msgid ""
+"The sharder and swift-manage-shard-ranges now consider total row count "
+"(instead of just object count) when deciding whether a shard is a candidate "
+"for shrinking."
+msgstr ""
+"The sharder and swift-manage-shard-ranges now consider total row count "
+"(instead of just object count) when deciding whether a shard is a candidate "
+"for shrinking."
+
+msgid ""
+"The sharder daemon has been enhanced to better support the shrinking of "
+"shards that are no longer required. Shard containers will now discover from "
+"their root container if they should be shrinking. They will also discover "
+"the shards into which they should shrink, which may include the root "
+"container itself."
+msgstr ""
+"The sharder daemon has been enhanced to better support the shrinking of "
+"shards that are no longer required. Shard containers will now discover from "
+"their root container if they should be shrinking. They will also discover "
+"the shards into which they should shrink, which may include the root "
+"container itself."
+
+msgid ""
+"The sharder now correctly identifies and fails audits for shard ranges that "
+"overlap exactly."
+msgstr ""
+"The sharder now correctly identifies and fails audits for shard ranges that "
+"overlap exactly."
+
+msgid ""
+"The sharding progress reports in recon cache now continue to be included for "
+"a period of time after sharding has completed. The time period may be "
+"configured using the ``recon_sharded_timeout`` option in the ``[container-"
+"sharder]`` section of container-server.conf, and defaults to 12 hours."
+msgstr ""
+"The sharding progress reports in recon cache now continue to be included for "
+"a period of time after sharding has completed. The time period may be "
+"configured using the ``recon_sharded_timeout`` option in the ``[container-"
+"sharder]`` section of container-server.conf, and defaults to 12 hours."
+
+msgid ""
"The tempurl digest algorithm is now configurable, and Swift added support "
"for both SHA-256 and SHA-512. Supported tempurl digests are exposed to "
"clients in ``/info``. Additionally, tempurl signatures can now be base64 "
@@ -2668,6 +3196,9 @@ msgstr ""
"clients in ``/info``. Additionally, tempurl signatures can now be base64 "
"encoded."
+msgid "This is the final stable branch that will support Python 2.7."
+msgstr "This is the final stable branch that will support Python 2.7."
+
msgid ""
"Throttle update_auditor_status calls so it updates no more than once per "
"minute."
@@ -2689,6 +3220,13 @@ msgid "Truncate error logs to prevent log handler from running out of buffer."
msgstr "Truncate error logs to prevent log handler from running out of buffer."
msgid ""
+"Turned off thread-logging when monkey-patching with eventlet. This addresses "
+"a potential hang in the proxy-server while logging client disconnects."
+msgstr ""
+"Turned off thread-logging when monkey-patching with eventlet. This addresses "
+"a potential hang in the proxy-server while logging client disconnects."
+
+msgid ""
"Ubuntu 18.04 and RDO's CentOS 7 repos package liberasurecode 1.5.0, while "
"Ubuntu 20.04 and RDO's CentOS 8 repos currently package liberasurecode 1.6.0 "
"or 1.6.1. Take care when upgrading major distro versions!"
@@ -2725,6 +3263,9 @@ msgstr ""
"race detection. Also simplified hashing logic to prevent race conditions and "
"optimise for the common case."
+msgid "Updates are now sent over the replication network."
+msgstr "Updates are now sent over the replication network."
+
msgid ""
"Upgrade Impact: If you upgrade and roll back, you must delete all `hashes."
"pkl` files."
@@ -2762,6 +3303,13 @@ msgstr ""
"these newly-written fragments but will instead respond ``500 Internal Server "
"Error``."
+msgid ""
+"Upon detecting a ring change, the reconstructor now only aborts the jobs for "
+"that ring and continues processing jobs for other rings."
+msgstr ""
+"Upon detecting a ring change, the reconstructor now only aborts the jobs for "
+"that ring and continues processing jobs for other rings."
+
msgid "Ussuri Series Release Notes"
msgstr "Ussuri Series Release Notes"
@@ -2785,6 +3333,12 @@ msgstr ""
msgid "WSGI server processes can now notify systemd when they are ready."
msgstr "WSGI server processes can now notify systemd when they are ready."
+msgid "Wallaby Series Release Notes"
+msgstr "Wallaby Series Release Notes"
+
+msgid "Warn when relinking/cleaning up and any disks are unmounted."
+msgstr "Warn when relinking/cleaning up and any disks are unmounted."
+
msgid ""
"We do not yet have CLI tools for creating composite rings, but the "
"functionality has been enabled in the ring modules to support this advanced "
@@ -2795,6 +3349,15 @@ msgstr ""
"functionality. CLI tools will be delivered in a subsequent release."
msgid ""
+"When building a listing from shards, any failure to retrieve listings will "
+"result in a 503 response. Previously, failures fetching a partiucular shard "
+"would result in a gap in listings."
+msgstr ""
+"When building a listing from shards, any failure to retrieve listings will "
+"result in a 503 response. Previously, failures fetching a particular shard "
+"would result in a gap in listings."
+
+msgid ""
"When listing objects in a container in json format, static large objects "
"(SLOs) will now include an additional new \"slo_etag\" key that matches the "
"etag returned when requesting the SLO. The existing \"hash\" key remains "
@@ -2889,9 +3452,23 @@ msgstr ""
"X-Delete-At computation now uses X-Timestamp instead of system time. This "
"prevents clock skew causing inconsistent expiry data."
+msgid "Xena Series Release Notes"
+msgstr "Xena Series Release Notes"
+
+msgid "Yoga Series Release Notes"
+msgstr "Yoga Series Release Notes"
+
msgid "``Content-Type`` can now be updated when copying an object."
msgstr "``Content-Type`` can now be updated when copying an object."
+msgid ""
+"``EIO`` errors during read now cause object diskfiles to be quarantined."
+msgstr ""
+"``EIO`` errors during read now cause object diskfiles to be quarantined."
+
+msgid "``EPIPE`` errors no longer log tracebacks."
+msgstr "``EPIPE`` errors no longer log tracebacks."
+
msgid "``fallocate_reserve`` may be specified as a percentage in more places."
msgstr "``fallocate_reserve`` may be specified as a percentage in more places."
@@ -2911,6 +3488,23 @@ msgstr "``swift-dispersion-populate``"
msgid "``swift-drive-recon``"
msgstr "``swift-drive-recon``"
+msgid ""
+"``swift-manage-shard-ranges`` can now accept a config file; this may be used "
+"to ensure consistency of threshold values with the container-sharder config."
+msgstr ""
+"``swift-manage-shard-ranges`` can now accept a config file; this may be used "
+"to ensure consistency of threshold values with the container-sharder config."
+
+msgid "``swift-manage-shard-ranges`` improvements:"
+msgstr "``swift-manage-shard-ranges`` improvements:"
+
+msgid ""
+"``swift-recon-cron`` now includes the last time it was run in the recon "
+"information."
+msgstr ""
+"``swift-recon-cron`` now includes the last time it was run in the recon "
+"information."
+
msgid "``swift-recon``"
msgstr "``swift-recon``"
diff --git a/swift/cli/manage_shard_ranges.py b/swift/cli/manage_shard_ranges.py
index 5f825c9c6..9bfd89e54 100644
--- a/swift/cli/manage_shard_ranges.py
+++ b/swift/cli/manage_shard_ranges.py
@@ -167,19 +167,21 @@ from six.moves import input
from swift.common.utils import Timestamp, get_logger, ShardRange, readconf, \
- ShardRangeList
+ ShardRangeList, non_negative_int, config_positive_int_value
from swift.container.backend import ContainerBroker, UNSHARDED
from swift.container.sharder import make_shard_ranges, sharding_enabled, \
CleavingContext, process_compactible_shard_sequences, \
find_compactible_shard_sequences, find_overlapping_ranges, \
find_paths, rank_paths, finalize_shrinking, DEFAULT_SHARDER_CONF, \
- ContainerSharderConf
+ ContainerSharderConf, find_paths_with_gaps
EXIT_SUCCESS = 0
EXIT_ERROR = 1
EXIT_INVALID_ARGS = 2 # consistent with argparse exit code for invalid args
EXIT_USER_QUIT = 3
+MIN_SHARD_RANGE_AGE_FOR_REPAIR = 4 * 3600
+
# Some CLI options derive their default values from DEFAULT_SHARDER_CONF if
# they have not been set. It is therefore important that the CLI parser
# provides None as a default so that we can detect that no value was set on the
@@ -206,6 +208,25 @@ class InvalidSolutionException(ManageShardRangesException):
self.overlapping_donors = overlapping_donors
+def wrap_for_argparse(func, msg=None):
+ """
+ Wrap the given ``func`` to catch any ``ValueError`` and raise an
+ ``argparse.ArgumentTypeError`` instead.
+
+ :param func: a function.
+ :param msg: an optional message to use with any exception that is used; if
+ not given then the string representation of the ValueError will be
+ used.
+ :return: a function wrapper.
+ """
+ def wrapped_func(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except ValueError as err:
+ raise argparse.ArgumentTypeError(str(err) if msg is None else msg)
+ return wrapped_func
+
+
def _proceed(args):
if args.dry_run:
choice = 'no'
@@ -225,8 +246,8 @@ def _print_shard_range(sr, level=0):
print(indent + '%r' % sr.name)
print(indent + ' objects: %9d, tombstones: %9d, lower: %r'
% (sr.object_count, sr.tombstones, sr.lower_str))
- print(indent + ' state: %9s, upper: %r'
- % (sr.state_text, sr.upper_str))
+ print(indent + ' state: %9s, deleted: %d upper: %r'
+ % (sr.state_text, sr.deleted, sr.upper_str))
@contextmanager
@@ -543,6 +564,41 @@ def compact_shard_ranges(broker, args):
return EXIT_SUCCESS
+def _remove_young_overlapping_ranges(acceptor_path, overlapping_donors, args):
+ # For range shard repair subcommand, check possible parent-children
+ # relationship between acceptors and donors.
+ if args.min_shard_age == 0:
+ return acceptor_path, overlapping_donors
+ ts_now = Timestamp.now()
+ # Remove overlapping donor shard ranges who were created recently within
+ # 'min_shard_age' age limit.
+ qualified_donors = ShardRangeList(
+ [sr for sr in overlapping_donors
+ if float(sr.timestamp) + args.min_shard_age < float(ts_now)])
+ young_donors = len(overlapping_donors) - len(qualified_donors)
+ if young_donors > 0:
+ print('%d overlapping donor shards ignored due to minimum age '
+ 'limit' % young_donors)
+ if not qualified_donors:
+ return acceptor_path, None
+ # Remove those overlapping donors whose overlapping acceptors were created
+ # within age limit.
+ possible_parent_donors = set()
+ for acceptor_sr in acceptor_path:
+ if float(acceptor_sr.timestamp) + args.min_shard_age < float(ts_now):
+ continue
+ possible_parent_donors.update([sr for sr in qualified_donors
+ if acceptor_sr.overlaps(sr)])
+ if possible_parent_donors:
+ qualified_donors = ShardRangeList(
+ [sr for sr in qualified_donors
+ if sr not in possible_parent_donors])
+ print('%d donor shards ignored due to existence of overlapping young '
+ 'acceptors' % len(possible_parent_donors))
+
+ return acceptor_path, qualified_donors
+
+
def _find_overlapping_donors(shard_ranges, own_sr, args):
shard_ranges = ShardRangeList(shard_ranges)
if ShardRange.SHARDING in shard_ranges.states:
@@ -593,7 +649,80 @@ def _find_overlapping_donors(shard_ranges, own_sr, args):
'Isolated cleaved and/or active shard ranges in donor ranges',
acceptor_path, overlapping_donors)
- return acceptor_path, overlapping_donors
+ return _remove_young_overlapping_ranges(
+ acceptor_path, overlapping_donors, args)
+
+
+def _fix_gaps(broker, args, paths_with_gaps):
+ timestamp = Timestamp.now()
+ solutions = []
+ print('Found %d gaps:' % len(paths_with_gaps))
+ for start_path, gap_range, end_path in paths_with_gaps:
+ if end_path[0].state == ShardRange.ACTIVE:
+ expanding_range = end_path[0]
+ solutions.append((gap_range, expanding_range))
+ elif start_path[-1].state == ShardRange.ACTIVE:
+ expanding_range = start_path[-1]
+ solutions.append((gap_range, expanding_range))
+ else:
+ expanding_range = None
+ print(' gap: %r - %r'
+ % (gap_range.lower, gap_range.upper))
+ print(' apparent gap contents:')
+ for sr in broker.get_shard_ranges(marker=gap_range.lower,
+ end_marker=gap_range.upper,
+ include_deleted=True):
+ _print_shard_range(sr, 3)
+ if expanding_range:
+ print(' gap can be fixed by expanding neighbor range:')
+ _print_shard_range(expanding_range, 3)
+ else:
+ print('Warning: cannot fix gap: non-ACTIVE neighbors')
+
+ if args.max_expanding >= 0:
+ solutions = solutions[:args.max_expanding]
+
+ # it's possible that an expanding range is used twice, expanding both down
+ # and up; if so, we only want one copy of it in our merged shard ranges
+ expanding_ranges = {}
+ for gap_range, expanding_range in solutions:
+ expanding_range.expand([gap_range])
+ expanding_range.timestamp = timestamp
+ expanding_ranges[expanding_range.name] = expanding_range
+
+ print('')
+ print('Repairs necessary to fill gaps.')
+ print('The following expanded shard range(s) will be applied to the DB:')
+ for expanding_range in sorted(expanding_ranges.values(),
+ key=lambda s: s.lower):
+ _print_shard_range(expanding_range, 2)
+ print('')
+ print(
+ 'It is recommended that no other concurrent changes are made to the \n'
+ 'shard ranges while fixing gaps. If necessary, abort this change \n'
+ 'and stop any auto-sharding processes before repeating this command.'
+ )
+ print('')
+
+ if not _proceed(args):
+ return EXIT_USER_QUIT
+
+ broker.merge_shard_ranges(list(expanding_ranges.values()))
+ print('Run container-replicator to replicate the changes to other nodes.')
+ print('Run container-sharder on all nodes to fill gaps.')
+ return EXIT_SUCCESS
+
+
+def repair_gaps(broker, args):
+ shard_ranges = broker.get_shard_ranges()
+ paths_with_gaps = find_paths_with_gaps(shard_ranges)
+ if paths_with_gaps:
+ return _fix_gaps(broker, args, paths_with_gaps)
+ else:
+ print('Found one complete sequence of %d shard ranges with no gaps.'
+ % len(shard_ranges))
+ print('No repairs necessary.')
+ return EXIT_SUCCESS
def print_repair_solution(acceptor_path, overlapping_donors):
@@ -647,12 +776,7 @@ def find_repair_solution(shard_ranges, own_sr, args):
return acceptor_path, overlapping_donors
-def repair_shard_ranges(broker, args):
- if not broker.is_root_container():
- print('WARNING: Shard containers cannot be repaired.')
- print('This command should be used on a root container.')
- return EXIT_ERROR
-
+def repair_overlaps(broker, args):
shard_ranges = broker.get_shard_ranges()
if not shard_ranges:
print('No shards found, nothing to do.')
@@ -682,6 +806,17 @@ def repair_shard_ranges(broker, args):
return EXIT_SUCCESS
+def repair_shard_ranges(broker, args):
+ if not broker.is_root_container():
+ print('WARNING: Shard containers cannot be repaired.')
+ print('This command should be used on a root container.')
+ return EXIT_ERROR
+ if args.gaps:
+ return repair_gaps(broker, args)
+ else:
+ return repair_overlaps(broker, args)
+
+
def analyze_shard_ranges(args):
shard_data = _load_and_validate_shard_data(args, require_index=False)
for data in shard_data:
@@ -697,13 +832,6 @@ def analyze_shard_ranges(args):
return EXIT_SUCCESS
-def _positive_int(arg):
- val = int(arg)
- if val <= 0:
- raise argparse.ArgumentTypeError('must be > 0')
- return val
-
-
def _add_find_args(parser):
parser.add_argument(
'rows_per_shard', nargs='?', type=int, default=USE_SHARDER_DEFAULT,
@@ -712,7 +840,8 @@ def _add_find_args(parser):
'given in a conf file specified with --config, otherwise %s.'
% DEFAULT_SHARDER_CONF['rows_per_shard'])
parser.add_argument(
- '--minimum-shard-size', type=_positive_int,
+ '--minimum-shard-size',
+ type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
default=USE_SHARDER_DEFAULT,
help='Minimum size of the final shard range. If this is greater than '
'one then the final shard range may be extended to more than '
@@ -720,13 +849,17 @@ def _add_find_args(parser):
'than minimum-shard-size rows.')
-def _add_replace_args(parser):
+def _add_account_prefix_arg(parser):
parser.add_argument(
'--shards_account_prefix', metavar='shards_account_prefix', type=str,
required=False, default='.shards_',
help="Prefix for shards account. The default is '.shards_'. This "
"should only be changed if the auto_create_account_prefix option "
"has been similarly changed in swift.conf.")
+
+
+def _add_replace_args(parser):
+ _add_account_prefix_arg(parser)
parser.add_argument(
'--replace-timeout', type=int, default=600,
help='Minimum DB timeout to use when replacing shard ranges.')
@@ -756,6 +889,15 @@ def _add_prompt_args(parser):
'Cannot be used with --yes option.')
+def _add_max_expanding_arg(parser):
+ parser.add_argument(
+ '--max-expanding', nargs='?',
+ type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
+ default=USE_SHARDER_DEFAULT,
+ help='Maximum number of shards that should be '
+ 'expanded. Defaults to unlimited.')
+
+
def _make_parser():
parser = argparse.ArgumentParser(description='Manage shard ranges')
parser.add_argument('path_to_file',
@@ -850,37 +992,36 @@ def _make_parser():
'of rows. This command only works on root containers.')
_add_prompt_args(compact_parser)
compact_parser.add_argument(
- '--shrink-threshold', nargs='?', type=_positive_int,
+ '--shrink-threshold', nargs='?',
+ type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
default=USE_SHARDER_DEFAULT,
help='The number of rows below which a shard can qualify for '
- 'shrinking. '
- 'Defaults to %d' % DEFAULT_SHARDER_CONF['shrink_threshold'])
+ 'shrinking. '
+ 'Defaults to %d' % DEFAULT_SHARDER_CONF['shrink_threshold'])
compact_parser.add_argument(
- '--expansion-limit', nargs='?', type=_positive_int,
+ '--expansion-limit', nargs='?',
+ type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
default=USE_SHARDER_DEFAULT,
help='Maximum number of rows for an expanding shard to have after '
- 'compaction has completed. '
- 'Defaults to %d' % DEFAULT_SHARDER_CONF['expansion_limit'])
+ 'compaction has completed. '
+ 'Defaults to %d' % DEFAULT_SHARDER_CONF['expansion_limit'])
# If just one donor shard is chosen to shrink to an acceptor then the
# expanded acceptor will handle object listings as soon as the donor shard
# has shrunk. If more than one donor shard are chosen to shrink to an
# acceptor then the acceptor may not handle object listings for some donor
# shards that have shrunk until *all* donors have shrunk, resulting in
# temporary gap(s) in object listings where the shrunk donors are missing.
- compact_parser.add_argument('--max-shrinking', nargs='?',
- type=_positive_int,
- default=USE_SHARDER_DEFAULT,
- help='Maximum number of shards that should be '
- 'shrunk into each expanding shard. '
- 'Defaults to 1. Using values greater '
- 'than 1 may result in temporary gaps in '
- 'object listings until all selected '
- 'shards have shrunk.')
- compact_parser.add_argument('--max-expanding', nargs='?',
- type=_positive_int,
- default=USE_SHARDER_DEFAULT,
- help='Maximum number of shards that should be '
- 'expanded. Defaults to unlimited.')
+ compact_parser.add_argument(
+ '--max-shrinking', nargs='?',
+ type=wrap_for_argparse(config_positive_int_value, 'must be > 0'),
+ default=USE_SHARDER_DEFAULT,
+ help='Maximum number of shards that should be '
+ 'shrunk into each expanding shard. '
+ 'Defaults to 1. Using values greater '
+ 'than 1 may result in temporary gaps in '
+ 'object listings until all selected '
+ 'shards have shrunk.')
+ _add_max_expanding_arg(compact_parser)
compact_parser.set_defaults(func=compact_shard_ranges)
# repair
@@ -889,6 +1030,20 @@ def _make_parser():
help='Repair overlapping shard ranges. No action will be taken '
'without user confirmation unless the -y option is used.')
_add_prompt_args(repair_parser)
+ repair_parser.add_argument(
+ '--min-shard-age', nargs='?',
+ type=wrap_for_argparse(non_negative_int, 'must be >= 0'),
+ default=MIN_SHARD_RANGE_AGE_FOR_REPAIR,
+ help='Minimum age of a shard for it to be considered as an overlap '
+ 'that is due for repair. Overlapping shards younger than this '
+ 'age will be ignored. Value of 0 means no recent shards will be '
+ 'ignored. Defaults to %d.' % MIN_SHARD_RANGE_AGE_FOR_REPAIR)
+ # TODO: maybe this should be a separate subcommand given that it needs
+ # some extra options vs repairing overlaps?
+ repair_parser.add_argument(
+ '--gaps', action='store_true', default=False,
+ help='Repair gaps in shard ranges.')
+ _add_max_expanding_arg(repair_parser)
repair_parser.set_defaults(func=repair_shard_ranges)
# analyze
@@ -896,6 +1051,14 @@ def _make_parser():
'analyze',
help='Analyze shard range json data read from file. Use -v to see '
'more detailed analysis.')
+ analyze_parser.add_argument(
+ '--min-shard-age', nargs='?',
+ type=wrap_for_argparse(non_negative_int, 'must be >= 0'),
+ default=0,
+ help='Minimum age of a shard for it to be considered as an overlap '
+ 'that is due for repair. Overlapping shards younger than this '
+ 'age will be ignored. Value of 0 means no recent shards will be '
+ 'ignored. Defaults to 0.')
analyze_parser.set_defaults(func=analyze_shard_ranges)
return parser
diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py
index 1b9910578..5ab6a6f3a 100644
--- a/swift/cli/ringbuilder.py
+++ b/swift/cli/ringbuilder.py
@@ -476,6 +476,7 @@ def _make_display_device_table(builder):
rep_ip_width = 14
rep_port_width = 4
ip_ipv6 = rep_ipv6 = False
+ weight_width = 6
for dev in builder._iter_devs():
if is_valid_ipv6(dev['ip']):
ip_ipv6 = True
@@ -486,6 +487,8 @@ def _make_display_device_table(builder):
port_width = max(len(str(dev['port'])), port_width)
rep_port_width = max(len(str(dev['replication_port'])),
rep_port_width)
+ weight_width = max(len('%6.02f' % dev['weight']),
+ weight_width)
if ip_ipv6:
ip_width += 2
if rep_ipv6:
@@ -493,7 +496,7 @@ def _make_display_device_table(builder):
header_line = ('Devices:%5s %6s %4s %' + str(ip_width)
+ 's:%-' + str(port_width) + 's %' +
str(rep_ip_width) + 's:%-' + str(rep_port_width) +
- 's %5s %6s %10s %7s %5s %s') % (
+ 's %5s %' + str(weight_width) + 's %10s %7s %5s %s') % (
'id', 'region', 'zone', 'ip address',
'port', 'replication ip', 'port', 'name',
'weight', 'partitions', 'balance', 'flags',
@@ -511,7 +514,8 @@ def _make_display_device_table(builder):
'%', str(ip_width), 's:%-',
str(port_width), 'd ', '%',
str(rep_ip_width), 's', ':%-',
- str(rep_port_width), 'd %5s %6.02f'
+ str(rep_port_width), 'd %5s %',
+ str(weight_width), '.02f'
' %10s %7.02f %5s %s'])
args = (dev['id'], dev['region'], dev['zone'], dev_ip, dev['port'],
dev_replication_ip, dev['replication_port'], dev['device'],
diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py
index 47fb58c29..3e82e2dd8 100644
--- a/swift/common/db_replicator.py
+++ b/swift/common/db_replicator.py
@@ -34,7 +34,7 @@ from swift.common.utils import get_logger, whataremyips, storage_directory, \
renamer, mkdirs, lock_parent_directory, config_true_value, \
unlink_older_than, dump_recon_cache, rsync_module_interpolation, \
parse_override_options, round_robin_iter, Everything, get_db_files, \
- parse_db_filename, quote, RateLimitedIterator
+ parse_db_filename, quote, RateLimitedIterator, config_auto_int_value
from swift.common import ring
from swift.common.ring.utils import is_local_device
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE, \
@@ -174,6 +174,7 @@ class ReplConnection(BufferedHTTPConnection):
response.data = response.read()
return response
except (Exception, Timeout):
+ self.close()
self.logger.exception(
_('ERROR reading HTTP response from %s'), self.node)
return None
@@ -195,7 +196,7 @@ class Replicator(Daemon):
self.cpool = GreenPool(size=concurrency)
swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
- self._local_device_ids = set()
+ self._local_device_ids = {}
self.per_diff = int(conf.get('per_diff', 1000))
self.max_diffs = int(conf.get('max_diffs') or 100)
self.interval = float(conf.get('interval') or
@@ -238,6 +239,8 @@ class Replicator(Daemon):
self.extract_device_re = re.compile('%s%s([^%s]+)' % (
self.root, os.path.sep, os.path.sep))
self.handoffs_only = config_true_value(conf.get('handoffs_only', 'no'))
+ self.handoff_delete = config_auto_int_value(
+ conf.get('handoff_delete', 'auto'), 0)
def _zero_stats(self):
"""Zero out the stats."""
@@ -554,7 +557,13 @@ class Replicator(Daemon):
reason = '%s new rows' % max_row_delta
self.logger.debug(log_template, reason)
return True
- if not (responses and all(responses)):
+ if self.handoff_delete:
+ # delete handoff if we have had handoff_delete successes
+ successes_count = len([resp for resp in responses if resp])
+ delete_handoff = successes_count >= self.handoff_delete
+ else:
+ delete_handoff = responses and all(responses)
+ if not delete_handoff:
reason = '%s/%s success' % (responses.count(True), len(responses))
self.logger.debug(log_template, reason)
return True
@@ -779,13 +788,14 @@ class Replicator(Daemon):
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
- if self.handoffs_only:
+ if self.handoffs_only or self.handoff_delete:
self.logger.warning(
- 'Starting replication pass with handoffs_only enabled. '
- 'This mode is not intended for normal '
- 'operation; use handoffs_only with care.')
+ 'Starting replication pass with handoffs_only '
+ 'and/or handoffs_delete enabled. '
+ 'These modes are not intended for normal '
+ 'operation; use these options with care.')
- self._local_device_ids = set()
+ self._local_device_ids = {}
found_local = False
for node in self.ring.devs:
if node and is_local_device(ips, self.port,
@@ -812,7 +822,7 @@ class Replicator(Daemon):
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
- self._local_device_ids.add(node['id'])
+ self._local_device_ids[node['id']] = node
part_filt = self._partition_dir_filter(
node['id'], partitions_to_replicate)
dirs.append((datadir, node['id'], part_filt))
@@ -826,10 +836,11 @@ class Replicator(Daemon):
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
- if self.handoffs_only:
+ if self.handoffs_only or self.handoff_delete:
self.logger.warning(
- 'Finished replication pass with handoffs_only enabled. '
- 'If handoffs_only is no longer required, disable it.')
+ 'Finished replication pass with handoffs_only and/or '
+ 'handoffs_delete enabled. If these are no longer required, '
+ 'disable them.')
self._report_stats()
def run_forever(self, *args, **kwargs):
diff --git a/swift/common/digest.py b/swift/common/digest.py
new file mode 100644
index 000000000..34db19409
--- /dev/null
+++ b/swift/common/digest.py
@@ -0,0 +1,151 @@
+# Copyright (c) 2022 NVIDIA
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import binascii
+import hashlib
+import hmac
+import six
+
+from swift.common.utils import strict_b64decode
+
+
+DEFAULT_ALLOWED_DIGESTS = 'sha1 sha256 sha512'
+DEPRECATED_DIGESTS = {'sha1'}
+SUPPORTED_DIGESTS = set(DEFAULT_ALLOWED_DIGESTS.split()) | DEPRECATED_DIGESTS
+
+
+def get_hmac(request_method, path, expires, key, digest="sha1",
+ ip_range=None):
+ """
+ Returns the hexdigest string of the HMAC (see RFC 2104) for
+ the request.
+
+ :param request_method: Request method to allow.
+ :param path: The path to the resource to allow access to.
+ :param expires: Unix timestamp as an int for when the URL
+ expires.
+ :param key: HMAC shared secret.
+ :param digest: constructor or the string name for the digest to use in
+ calculating the HMAC
+ Defaults to SHA1
+ :param ip_range: The ip range from which the resource is allowed
+ to be accessed. We need to put the ip_range as the
+ first argument to hmac to avoid manipulation of the path
+ due to newlines being valid in paths
+ e.g. /v1/a/c/o\\n127.0.0.1
+ :returns: hexdigest str of the HMAC for the request using the specified
+ digest algorithm.
+ """
+ # These are the three mandatory fields.
+ parts = [request_method, str(expires), path]
+ formats = [b"%s", b"%s", b"%s"]
+
+ if ip_range:
+ parts.insert(0, ip_range)
+ formats.insert(0, b"ip=%s")
+
+ if not isinstance(key, six.binary_type):
+ key = key.encode('utf8')
+
+ message = b'\n'.join(
+ fmt % (part if isinstance(part, six.binary_type)
+ else part.encode("utf-8"))
+ for fmt, part in zip(formats, parts))
+
+ if six.PY2 and isinstance(digest, six.string_types):
+ digest = getattr(hashlib, digest)
+
+ return hmac.new(key, message, digest).hexdigest()
+
+
+def get_allowed_digests(conf_digests, logger=None):
+ """
+ Pulls out 'allowed_digests' from the supplied conf. Then compares them with
+ the list of supported and deprecated digests and returns whatever remain.
+
+ When something is unsupported or deprecated it'll log a warning.
+
+ :param conf_digests: iterable of allowed digests. If empty, defaults to
+ DEFAULT_ALLOWED_DIGESTS.
+ :param logger: optional logger; if provided, use it issue deprecation
+ warnings
+ :returns: A set of allowed digests that are supported and a set of
+ deprecated digests.
+ :raises: ValueError, if there are no digests left to return.
+ """
+ allowed_digests = set(digest.lower() for digest in conf_digests)
+ if not allowed_digests:
+ allowed_digests = SUPPORTED_DIGESTS
+
+ not_supported = allowed_digests - SUPPORTED_DIGESTS
+ if not_supported:
+ if logger:
+ logger.warning('The following digest algorithms are configured '
+ 'but not supported: %s', ', '.join(not_supported))
+ allowed_digests -= not_supported
+ deprecated = allowed_digests & DEPRECATED_DIGESTS
+ if deprecated and logger:
+ if not conf_digests:
+ logger.warning('The following digest algorithms are allowed by '
+ 'default but deprecated: %s. Support will be '
+ 'disabled by default in a future release, and '
+ 'later removed entirely.', ', '.join(deprecated))
+ else:
+ logger.warning('The following digest algorithms are configured '
+ 'but deprecated: %s. Support will be removed in a '
+ 'future release.', ', '.join(deprecated))
+ if not allowed_digests:
+ raise ValueError('No valid digest algorithms are configured')
+
+ return allowed_digests, deprecated
+
+
+def extract_digest_and_algorithm(value):
+ """
+ Returns a tuple of (digest_algorithm, hex_encoded_digest)
+ from a client-provided string of the form::
+
+ <hex-encoded digest>
+
+ or::
+
+ <algorithm>:<base64-encoded digest>
+
+ Note that hex-encoded strings must use one of sha1, sha256, or sha512.
+
+ :raises: ValueError on parse failures
+ """
+ if ':' in value:
+ algo, value = value.split(':', 1)
+ # accept both standard and url-safe base64
+ if ('-' in value or '_' in value) and not (
+ '+' in value or '/' in value):
+ value = value.replace('-', '+').replace('_', '/')
+ value = binascii.hexlify(strict_b64decode(value + '=='))
+ if not six.PY2:
+ value = value.decode('ascii')
+ else:
+ try:
+ binascii.unhexlify(value) # make sure it decodes
+ except TypeError:
+ # This is just for py2
+ raise ValueError('Non-hexadecimal digit found')
+ algo = {
+ 40: 'sha1',
+ 64: 'sha256',
+ 128: 'sha512',
+ }.get(len(value))
+ if not algo:
+ raise ValueError('Bad digest length')
+ return algo, value
diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py
index 47df42646..2c1c99cc0 100644
--- a/swift/common/internal_client.py
+++ b/swift/common/internal_client.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from eventlet import sleep, Timeout
+from eventlet import sleep, Timeout, spawn
from eventlet.green import httplib, socket
import json
import six
@@ -206,7 +206,8 @@ class InternalClient(object):
if params:
req.params = params
try:
- resp = req.get_response(self.app)
+ # execute in a separate greenthread to not polute corolocals
+ resp = spawn(req.get_response, self.app).wait()
except (Exception, Timeout):
exc_type, exc_value, exc_traceback = exc_info()
else:
diff --git a/swift/common/middleware/crypto/decrypter.py b/swift/common/middleware/crypto/decrypter.py
index 4ff78b688..34dfef43b 100644
--- a/swift/common/middleware/crypto/decrypter.py
+++ b/swift/common/middleware/crypto/decrypter.py
@@ -92,7 +92,7 @@ class BaseDecrypterContext(CryptoWSGIContext):
the value itself, otherwise return the value unmodified.
A value should either be a string that does not contain the ';'
- character or should be of the form:
+ character or should be of the form::
<base64-encoded ciphertext>;swift_meta=<crypto meta>
diff --git a/swift/common/middleware/crypto/keymaster.py b/swift/common/middleware/crypto/keymaster.py
index 56a247296..42f723d3d 100644
--- a/swift/common/middleware/crypto/keymaster.py
+++ b/swift/common/middleware/crypto/keymaster.py
@@ -208,10 +208,10 @@ class BaseKeyMaster(object):
This provides some basic helpers for:
- - loading from a separate config path,
- - deriving keys based on path, and
- - installing a ``swift.callback.fetch_crypto_keys`` hook
- in the request environment.
+ - loading from a separate config path,
+ - deriving keys based on path, and
+ - installing a ``swift.callback.fetch_crypto_keys`` hook
+ in the request environment.
Subclasses should define ``log_route``, ``keymaster_opts``, and
``keymaster_conf_section`` attributes, and implement the
diff --git a/swift/common/middleware/formpost.py b/swift/common/middleware/formpost.py
index 84a8ee09b..b3dde1832 100644
--- a/swift/common/middleware/formpost.py
+++ b/swift/common/middleware/formpost.py
@@ -84,11 +84,11 @@ desired.
The expires attribute is the Unix timestamp before which the form
must be submitted before it is invalidated.
-The signature attribute is the HMAC-SHA1 signature of the form. Here is
+The signature attribute is the HMAC signature of the form. Here is
sample code for computing the signature::
import hmac
- from hashlib import sha1
+ from hashlib import sha512
from time import time
path = '/v1/account/container/object_prefix'
redirect = 'https://srv.com/some-page' # set to '' if redirect not in form
@@ -98,7 +98,7 @@ sample code for computing the signature::
key = 'mykey'
hmac_body = '%s\n%s\n%s\n%s\n%s' % (path, redirect,
max_file_size, max_file_count, expires)
- signature = hmac.new(key, hmac_body, sha1).hexdigest()
+ signature = hmac.new(key, hmac_body, sha512).hexdigest()
The key is the value of either the account (X-Account-Meta-Temp-URL-Key,
X-Account-Meta-Temp-Url-Key-2) or the container
@@ -123,7 +123,7 @@ the file are simply ignored).
__all__ = ['FormPost', 'filter_factory', 'READ_CHUNK_SIZE', 'MAX_VALUE_LENGTH']
import hmac
-from hashlib import sha1
+import hashlib
from time import time
import six
@@ -132,9 +132,11 @@ from six.moves.urllib.parse import quote
from swift.common.constraints import valid_api_version
from swift.common.exceptions import MimeInvalid
from swift.common.middleware.tempurl import get_tempurl_keys_from_metadata
+from swift.common.digest import get_allowed_digests, \
+ extract_digest_and_algorithm, DEFAULT_ALLOWED_DIGESTS
from swift.common.utils import streq_const_time, parse_content_disposition, \
parse_mime_headers, iter_multipart_mime_documents, reiterate, \
- close_if_possible
+ close_if_possible, get_logger
from swift.common.registry import register_swift_info
from swift.common.wsgi import make_pre_authed_env
from swift.common.swob import HTTPUnauthorized, wsgi_to_str, str_to_wsgi
@@ -205,11 +207,17 @@ class FormPost(object):
:param conf: The configuration dict for the middleware.
"""
- def __init__(self, app, conf):
+ def __init__(self, app, conf, logger=None):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict.
self.conf = conf
+ self.logger = logger or get_logger(conf, log_route='formpost')
+ # Defaulting to SUPPORTED_DIGESTS just so we don't completely
+ # deprecate sha1 yet. We'll change this to DEFAULT_ALLOWED_DIGESTS
+ # later.
+ self.allowed_digests = conf.get(
+ 'allowed_digests', DEFAULT_ALLOWED_DIGESTS.split())
def __call__(self, env, start_response):
"""
@@ -405,16 +413,25 @@ class FormPost(object):
hmac_body = hmac_body.encode('utf-8')
has_valid_sig = False
+ signature = attributes.get('signature', '')
+ try:
+ hash_name, signature = extract_digest_and_algorithm(signature)
+ except ValueError:
+ raise FormUnauthorized('invalid signature')
+ if hash_name not in self.allowed_digests:
+ raise FormUnauthorized('invalid signature')
+ hash_algorithm = getattr(hashlib, hash_name) if six.PY2 else hash_name
+
for key in keys:
# Encode key like in swift.common.utls.get_hmac.
if not isinstance(key, six.binary_type):
key = key.encode('utf8')
- sig = hmac.new(key, hmac_body, sha1).hexdigest()
- if streq_const_time(sig, (attributes.get('signature') or
- 'invalid')):
+ sig = hmac.new(key, hmac_body, hash_algorithm).hexdigest()
+ if streq_const_time(sig, signature):
has_valid_sig = True
if not has_valid_sig:
raise FormUnauthorized('invalid signature')
+ self.logger.increment('formpost.digests.%s' % hash_name)
substatus = [None]
subheaders = [None]
@@ -467,6 +484,12 @@ def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
- register_swift_info('formpost')
-
+ logger = get_logger(conf, log_route='formpost')
+ allowed_digests, deprecated_digests = get_allowed_digests(
+ conf.get('allowed_digests', '').split(), logger)
+ info = {'allowed_digests': sorted(allowed_digests)}
+ if deprecated_digests:
+ info['deprecated_digests'] = sorted(deprecated_digests)
+ register_swift_info('formpost', **info)
+ conf.update(info)
return lambda app: FormPost(app, conf)
diff --git a/swift/common/middleware/proxy_logging.py b/swift/common/middleware/proxy_logging.py
index 29920aaac..d89343479 100644
--- a/swift/common/middleware/proxy_logging.py
+++ b/swift/common/middleware/proxy_logging.py
@@ -151,6 +151,8 @@ class ProxyLoggingMiddleware(object):
self.anonymization_salt),
'remote_addr': StrAnonymizer('4.3.2.1', self.anonymization_method,
self.anonymization_salt),
+ 'domain': StrAnonymizer('', self.anonymization_method,
+ self.anonymization_salt),
'path': StrAnonymizer('/', self.anonymization_method,
self.anonymization_salt),
'referer': StrAnonymizer('ref', self.anonymization_method,
@@ -236,6 +238,10 @@ class ProxyLoggingMiddleware(object):
:param wire_status_int: the on the wire status int
"""
self.obscure_req(req)
+ domain = req.environ.get('HTTP_HOST',
+ req.environ.get('SERVER_NAME', None))
+ if ':' in domain:
+ domain, port = domain.rsplit(':', 1)
resp_headers = resp_headers or {}
logged_headers = None
if self.log_hdrs:
@@ -267,6 +273,8 @@ class ProxyLoggingMiddleware(object):
'remote_addr': StrAnonymizer(req.remote_addr,
self.anonymization_method,
self.anonymization_salt),
+ 'domain': StrAnonymizer(domain, self.anonymization_method,
+ self.anonymization_salt),
'path': StrAnonymizer(req.path_qs, self.anonymization_method,
self.anonymization_salt),
'referer': StrAnonymizer(req.referer, self.anonymization_method,
diff --git a/swift/common/middleware/s3api/acl_utils.py b/swift/common/middleware/s3api/acl_utils.py
index 4951038d1..b2821a3d2 100644
--- a/swift/common/middleware/s3api/acl_utils.py
+++ b/swift/common/middleware/s3api/acl_utils.py
@@ -41,6 +41,11 @@ def swift_acl_translate(acl, group='', user='', xml=False):
# ['HTTP_X_CONTAINER_READ', group + ':' + user]]
swift_acl['private'] = [['X-Container-Write', '.'],
['X-Container-Read', '.']]
+
+ # Swift doesn't have per-object ACLs, so this is best-effort
+ swift_acl['bucket-owner-full-control'] = swift_acl['private']
+ swift_acl['bucket-owner-read'] = swift_acl['private']
+
if xml:
# We are working with XML and need to parse it
try:
@@ -62,7 +67,7 @@ def swift_acl_translate(acl, group='', user='', xml=False):
else:
acl = 'unsupported'
- if acl == 'authenticated-read':
+ if acl in ('authenticated-read', 'log-delivery-write'):
raise S3NotImplemented()
elif acl not in swift_acl:
raise ACLError()
diff --git a/swift/common/middleware/s3api/controllers/multi_upload.py b/swift/common/middleware/s3api/controllers/multi_upload.py
index 3f23f25a1..fc7615e62 100644
--- a/swift/common/middleware/s3api/controllers/multi_upload.py
+++ b/swift/common/middleware/s3api/controllers/multi_upload.py
@@ -109,6 +109,7 @@ def _get_upload_info(req, app, upload_id):
try:
return req.get_response(app, 'HEAD', container=container, obj=obj)
except NoSuchKey:
+ upload_marker_path = req.environ.get('s3api.backend_path')
try:
resp = req.get_response(app, 'HEAD')
if resp.sysmeta_headers.get(sysmeta_header(
@@ -116,6 +117,11 @@ def _get_upload_info(req, app, upload_id):
return resp
except NoSuchKey:
pass
+ finally:
+ # Ops often find it more useful for us to log the upload marker
+ # path, so put it back
+ if upload_marker_path is not None:
+ req.environ['s3api.backend_path'] = upload_marker_path
raise NoSuchUpload(upload_id=upload_id)
finally:
# ...making sure to restore any copy-source before returning
@@ -326,7 +332,8 @@ class UploadsController(Controller):
'last_modified': object_info['last_modified']}
return obj_dict
- is_part = re.compile('/[0-9]+$')
+ is_segment = re.compile('.*/[0-9]+$')
+
while len(uploads) < maxuploads:
try:
resp = req.get_response(self.app, container=container,
@@ -338,8 +345,8 @@ class UploadsController(Controller):
if not objects:
break
- new_uploads = [object_to_upload(obj) for obj in objects if
- is_part.search(obj.get('name', '')) is None]
+ new_uploads = [object_to_upload(obj) for obj in objects
+ if not is_segment.match(obj.get('name', ''))]
new_prefixes = []
if 'delimiter' in req.params:
prefix = get_param(req, 'prefix', '')
diff --git a/swift/common/middleware/s3api/s3request.py b/swift/common/middleware/s3api/s3request.py
index f53d12b12..6f8963b5f 100644
--- a/swift/common/middleware/s3api/s3request.py
+++ b/swift/common/middleware/s3api/s3request.py
@@ -916,6 +916,8 @@ class S3Request(swob.Request):
src_resp = self.get_response(app, 'HEAD', src_bucket,
swob.str_to_wsgi(src_obj),
headers=headers, query=query)
+ # we can't let this HEAD req spoil our COPY
+ self.headers.pop('x-backend-storage-policy-index')
if src_resp.status_int == 304: # pylint: disable-msg=E1101
raise PreconditionFailed()
@@ -1483,8 +1485,10 @@ class S3Request(swob.Request):
info = get_container_info(sw_req.environ, app, swift_source='S3')
if is_success(info['status']):
return info
- elif info['status'] == 404:
+ elif info['status'] == HTTP_NOT_FOUND:
raise NoSuchBucket(self.container_name)
+ elif info['status'] == HTTP_SERVICE_UNAVAILABLE:
+ raise ServiceUnavailable()
else:
raise InternalError(
'unexpected status code %d' % info['status'])
diff --git a/swift/common/middleware/s3api/subresource.py b/swift/common/middleware/s3api/subresource.py
index c941040d8..1aa47b4b2 100644
--- a/swift/common/middleware/s3api/subresource.py
+++ b/swift/common/middleware/s3api/subresource.py
@@ -182,18 +182,19 @@ class Grantee(object):
"""
Convert a grantee string in the HTTP header to an Grantee instance.
"""
- type, value = grantee.split('=', 1)
+ grantee_type, value = grantee.split('=', 1)
+ grantee_type = grantee_type.lower()
value = value.strip('"\'')
- if type == 'id':
+ if grantee_type == 'id':
return User(value)
- elif type == 'emailAddress':
+ elif grantee_type == 'emailaddress':
raise S3NotImplemented()
- elif type == 'uri':
+ elif grantee_type == 'uri':
# return a subclass instance of Group class
subclass = get_group_subclass_from_uri(value)
return subclass()
else:
- raise InvalidArgument(type, value,
+ raise InvalidArgument(grantee_type, value,
'Argument format not recognized')
diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py
index 857a52f22..b4493d4a6 100644
--- a/swift/common/middleware/slo.py
+++ b/swift/common/middleware/slo.py
@@ -1523,7 +1523,17 @@ class StaticLargeObject(object):
new_env['PATH_INFO'] = (
'/%s/%s/%s' % (vrs, account, str_to_wsgi(obj_name.lstrip('/')))
)
- resp = Request.blank('', new_env).get_response(self.app)
+ # Just request the last byte of non-SLO objects so we don't waste
+ # a bunch of resources in drain_and_close() below
+ manifest_req = Request.blank('', new_env, range='bytes=-1')
+ update_ignore_range_header(manifest_req, 'X-Static-Large-Object')
+ resp = manifest_req.get_response(self.app)
+
+ if resp.is_success and config_true_value(resp.headers.get(
+ 'X-Static-Large-Object')) and len(resp.body) == 1:
+ # pre-2.24.0 object-server
+ manifest_req = Request.blank('', new_env)
+ resp = manifest_req.get_response(self.app)
if resp.is_success:
if config_true_value(resp.headers.get('X-Static-Large-Object')):
@@ -1532,6 +1542,8 @@ class StaticLargeObject(object):
except ValueError:
raise HTTPServerError('Unable to load SLO manifest')
else:
+ # Drain and close GET request (prevents socket leaks)
+ drain_and_close(resp)
raise HTTPBadRequest('Not an SLO manifest')
elif resp.status_int == HTTP_NOT_FOUND:
raise HTTPNotFound('SLO manifest not found')
diff --git a/swift/common/middleware/staticweb.py b/swift/common/middleware/staticweb.py
index 23377dfae..c01f720f1 100644
--- a/swift/common/middleware/staticweb.py
+++ b/swift/common/middleware/staticweb.py
@@ -282,7 +282,7 @@ class _StaticWebContext(WSGIContext):
body = b''.join(resp)
if body:
listing = json.loads(body)
- if not listing:
+ if prefix and not listing:
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
headers = {'Content-Type': 'text/html; charset=UTF-8'}
diff --git a/swift/common/middleware/tempurl.py b/swift/common/middleware/tempurl.py
index 86a6e91b3..ffb900d78 100644
--- a/swift/common/middleware/tempurl.py
+++ b/swift/common/middleware/tempurl.py
@@ -64,7 +64,7 @@ signature is generated using the HTTP method to allow (``GET``, ``PUT``,
the full path to the object, and the key set on the account.
The digest algorithm to be used may be configured by the operator. By default,
-HMAC-SHA1, HMAC-SHA256, and HMAC-SHA512 are supported. Check the
+HMAC-SHA256 and HMAC-SHA512 are supported. Check the
``tempurl.allowed_digests`` entry in the cluster's capabilities response to
see which algorithms are supported by your deployment; see
:doc:`api/discoverability` for more information. On older clusters,
@@ -75,24 +75,25 @@ For example, here is code generating the signature for a ``GET`` for 60
seconds on ``/v1/AUTH_account/container/object``::
import hmac
- from hashlib import sha1
+ from hashlib import sha256
from time import time
method = 'GET'
expires = int(time() + 60)
path = '/v1/AUTH_account/container/object'
key = 'mykey'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
- sig = hmac.new(key, hmac_body, sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, sha256).hexdigest()
Be certain to use the full path, from the ``/v1/`` onward.
Let's say ``sig`` ends up equaling
-``da39a3ee5e6b4b0d3255bfef95601890afd80709`` and ``expires`` ends up
-``1323479485``. Then, for example, the website could provide a link to::
+``732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b`` and
+``expires`` ends up ``1512508563``. Then, for example, the website could
+provide a link to::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
- temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
- temp_url_expires=1323479485
+ temp_url_sig=732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b&
+ temp_url_expires=1512508563
For longer hashes, a hex encoding becomes unwieldy. Base64 encoding is also
supported, and indicated by prefixing the signature with ``"<digest name>:"``.
@@ -124,11 +125,11 @@ Supposing that ``sig`` ends up equaling
You may also use ISO 8601 UTC timestamps with the format
``"%Y-%m-%dT%H:%M:%SZ"`` instead of UNIX timestamps in the URL
(but NOT in the code above for generating the signature!).
-So, the above HMAC-SHA1 URL could also be formulated as::
+So, the above HMAC-SHA246 URL could also be formulated as::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
- temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
- temp_url_expires=2011-12-10T01:11:25Z
+ temp_url_sig=732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b&
+ temp_url_expires=2017-12-05T21:16:03Z
If a prefix-based signature with the prefix ``pre`` is desired, set path to::
@@ -140,31 +141,31 @@ a query parameter called ``temp_url_prefix``. So, if ``sig`` and ``expires``
would end up like above, following URL would be valid::
https://swift-cluster.example.com/v1/AUTH_account/container/pre/object?
- temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
- temp_url_expires=1323479485&
+ temp_url_sig=732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b&
+ temp_url_expires=1512508563&
temp_url_prefix=pre
Another valid URL::
https://swift-cluster.example.com/v1/AUTH_account/container/pre/
subfolder/another_object?
- temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
- temp_url_expires=1323479485&
+ temp_url_sig=732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b&
+ temp_url_expires=1512508563&
temp_url_prefix=pre
If you wish to lock down the ip ranges from where the resource can be accessed
to the ip ``1.2.3.4``::
import hmac
- from hashlib import sha1
+ from hashlib import sha256
from time import time
method = 'GET'
expires = int(time() + 60)
path = '/v1/AUTH_account/container/object'
ip_range = '1.2.3.4'
- key = 'mykey'
+ key = b'mykey'
hmac_body = 'ip=%s\n%s\n%s\n%s' % (ip_range, method, expires, path)
- sig = hmac.new(key, hmac_body, sha1).hexdigest()
+ sig = hmac.new(key, hmac_body.encode('ascii'), sha256).hexdigest()
The generated signature would only be valid from the ip ``1.2.3.4``. The
middleware detects an ip-based temporary URL by a query parameter called
@@ -172,29 +173,29 @@ middleware detects an ip-based temporary URL by a query parameter called
above, following URL would be valid::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
- temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
- temp_url_expires=1323479485&
+ temp_url_sig=3f48476acaf5ec272acd8e99f7b5bad96c52ddba53ed27c60613711774a06f0c&
+ temp_url_expires=1648082711&
temp_url_ip_range=1.2.3.4
Similarly to lock down the ip to a range of ``1.2.3.X`` so starting
from the ip ``1.2.3.0`` to ``1.2.3.255``::
import hmac
- from hashlib import sha1
+ from hashlib import sha256
from time import time
method = 'GET'
expires = int(time() + 60)
path = '/v1/AUTH_account/container/object'
ip_range = '1.2.3.0/24'
- key = 'mykey'
+ key = b'mykey'
hmac_body = 'ip=%s\n%s\n%s\n%s' % (ip_range, method, expires, path)
- sig = hmac.new(key, hmac_body, sha1).hexdigest()
+ sig = hmac.new(key, hmac_body.encode('ascii'), sha256).hexdigest()
Then the following url would be valid::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
- temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
- temp_url_expires=1323479485&
+ temp_url_sig=6ff81256b8a3ba11d239da51a703b9c06a56ffddeb8caab74ca83af8f73c9c83&
+ temp_url_expires=1648082711&
temp_url_ip_range=1.2.3.0/24
@@ -222,16 +223,16 @@ can override this with a filename query parameter. Modifying the
above example::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
- temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
- temp_url_expires=1323479485&filename=My+Test+File.pdf
+ temp_url_sig=732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b&
+ temp_url_expires=1512508563&filename=My+Test+File.pdf
If you do not want the object to be downloaded, you can cause
``Content-Disposition: inline`` to be set on the response by adding the
``inline`` parameter to the query string, like so::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
- temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
- temp_url_expires=1323479485&inline
+ temp_url_sig=732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b&
+ temp_url_expires=1512508563&inline
In some cases, the client might not able to present the content of the object,
but you still want the content able to save to local with the specific
@@ -240,8 +241,8 @@ set on the response by adding the ``inline&filename=...`` parameter to the
query string, like so::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
- temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
- temp_url_expires=1323479485&inline&filename=My+Test+File.pdf
+ temp_url_sig=732fcac368abb10c78a4cbe95c3fab7f311584532bf779abd5074e13cbe8b88b&
+ temp_url_expires=1512508563&inline&filename=My+Test+File.pdf
---------------------
Cluster Configuration
@@ -288,7 +289,7 @@ This middleware understands the following configuration settings:
A whitespace delimited list of digest algorithms that are allowed
to be used when calculating the signature for a temporary URL.
- Default: ``sha1 sha256 sha512``
+ Default: ``sha256 sha512``
"""
__all__ = ['TempURL', 'filter_factory',
@@ -297,9 +298,7 @@ __all__ = ['TempURL', 'filter_factory',
'DEFAULT_OUTGOING_REMOVE_HEADERS',
'DEFAULT_OUTGOING_ALLOW_HEADERS']
-import binascii
from calendar import timegm
-import hashlib
import six
from os.path import basename
from time import time, strftime, strptime, gmtime
@@ -310,10 +309,12 @@ from six.moves.urllib.parse import urlencode
from swift.proxy.controllers.base import get_account_info, get_container_info
from swift.common.header_key_dict import HeaderKeyDict
+from swift.common.digest import get_allowed_digests, \
+ extract_digest_and_algorithm, DEFAULT_ALLOWED_DIGESTS, get_hmac
from swift.common.swob import header_to_environ_key, HTTPUnauthorized, \
HTTPBadRequest, wsgi_to_str
from swift.common.utils import split_path, get_valid_utf8_str, \
- get_hmac, streq_const_time, quote, get_logger, strict_b64decode
+ streq_const_time, quote, get_logger
from swift.common.registry import register_swift_info, register_sensitive_param
@@ -341,9 +342,6 @@ DEFAULT_OUTGOING_REMOVE_HEADERS = 'x-object-meta-*'
#: '*' to indicate a prefix match.
DEFAULT_OUTGOING_ALLOW_HEADERS = 'x-object-meta-public-*'
-DEFAULT_ALLOWED_DIGESTS = 'sha1 sha256 sha512'
-SUPPORTED_DIGESTS = set(DEFAULT_ALLOWED_DIGESTS.split())
-
CONTAINER_SCOPE = 'container'
ACCOUNT_SCOPE = 'account'
@@ -423,11 +421,12 @@ class TempURL(object):
:param conf: The configuration dict for the middleware.
"""
- def __init__(self, app, conf):
+ def __init__(self, app, conf, logger=None):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict.
self.conf = conf
+ self.logger = logger or get_logger(conf, log_route='tempurl')
self.allowed_digests = conf.get(
'allowed_digests', DEFAULT_ALLOWED_DIGESTS.split())
@@ -504,23 +503,10 @@ class TempURL(object):
if not temp_url_sig or not temp_url_expires:
return self._invalid(env, start_response)
- if ':' in temp_url_sig:
- hash_algorithm, temp_url_sig = temp_url_sig.split(':', 1)
- if ('-' in temp_url_sig or '_' in temp_url_sig) and not (
- '+' in temp_url_sig or '/' in temp_url_sig):
- temp_url_sig = temp_url_sig.replace('-', '+').replace('_', '/')
- try:
- temp_url_sig = binascii.hexlify(strict_b64decode(
- temp_url_sig + '=='))
- if not six.PY2:
- temp_url_sig = temp_url_sig.decode('ascii')
- except ValueError:
- return self._invalid(env, start_response)
- elif len(temp_url_sig) == 40:
- hash_algorithm = 'sha1'
- elif len(temp_url_sig) == 64:
- hash_algorithm = 'sha256'
- else:
+ try:
+ hash_algorithm, temp_url_sig = extract_digest_and_algorithm(
+ temp_url_sig)
+ except ValueError:
return self._invalid(env, start_response)
if hash_algorithm not in self.allowed_digests:
return self._invalid(env, start_response)
@@ -573,6 +559,7 @@ class TempURL(object):
break
if not is_valid_hmac:
return self._invalid(env, start_response)
+ self.logger.increment('tempurl.digests.%s' % hash_algorithm)
# disallowed headers prevent accidentally allowing upload of a pointer
# to data that the PUT tempurl would not otherwise allow access for.
# It should be safe to provide a GET tempurl for data that an
@@ -749,12 +736,10 @@ class TempURL(object):
if not request_method:
request_method = env['REQUEST_METHOD']
- digest = getattr(hashlib, hash_algorithm)
-
return [
(get_hmac(
request_method, path, expires, key,
- digest=digest, ip_range=ip_range
+ digest=hash_algorithm, ip_range=ip_range
), scope)
for (key, scope) in scoped_keys]
@@ -846,32 +831,26 @@ def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
+ logger = get_logger(conf, log_route='tempurl')
+
defaults = {
'methods': 'GET HEAD PUT POST DELETE',
'incoming_remove_headers': DEFAULT_INCOMING_REMOVE_HEADERS,
'incoming_allow_headers': DEFAULT_INCOMING_ALLOW_HEADERS,
'outgoing_remove_headers': DEFAULT_OUTGOING_REMOVE_HEADERS,
'outgoing_allow_headers': DEFAULT_OUTGOING_ALLOW_HEADERS,
- 'allowed_digests': DEFAULT_ALLOWED_DIGESTS,
}
info_conf = {k: conf.get(k, v).split() for k, v in defaults.items()}
- allowed_digests = set(digest.lower()
- for digest in info_conf['allowed_digests'])
- not_supported = allowed_digests - SUPPORTED_DIGESTS
- if not_supported:
- logger = get_logger(conf, log_route='tempurl')
- logger.warning('The following digest algorithms are configured but '
- 'not supported: %s', ', '.join(not_supported))
- allowed_digests -= not_supported
- if not allowed_digests:
- raise ValueError('No valid digest algorithms are configured '
- 'for tempurls')
+ allowed_digests, deprecated_digests = get_allowed_digests(
+ conf.get('allowed_digests', '').split(), logger)
info_conf['allowed_digests'] = sorted(allowed_digests)
+ if deprecated_digests:
+ info_conf['deprecated_digests'] = sorted(deprecated_digests)
register_swift_info('tempurl', **info_conf)
conf.update(info_conf)
register_sensitive_param('temp_url_sig')
- return lambda app: TempURL(app, conf)
+ return lambda app: TempURL(app, conf, logger)
diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py
index 57d108b7a..e35f4979a 100644
--- a/swift/common/ring/builder.py
+++ b/swift/common/ring/builder.py
@@ -178,7 +178,7 @@ class RingBuilder(object):
@contextmanager
def debug(self):
"""
- Temporarily enables debug logging, useful in tests, e.g.
+ Temporarily enables debug logging, useful in tests, e.g.::
with rb.debug():
rb.rebalance()
diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py
index 3045cc98d..88c892aa6 100644
--- a/swift/common/storage_policy.py
+++ b/swift/common/storage_policy.py
@@ -37,11 +37,11 @@ DEFAULT_EC_OBJECT_SEGMENT_SIZE = 1048576
class BindPortsCache(object):
- def __init__(self, swift_dir, bind_ip):
+ def __init__(self, swift_dir, ring_ip):
self.swift_dir = swift_dir
self.mtimes_by_ring_path = {}
self.portsets_by_ring_path = {}
- self.my_ips = set(whataremyips(bind_ip))
+ self.my_ips = set(whataremyips(ring_ip))
def all_bind_ports_for_node(self):
"""
diff --git a/swift/common/utils.py b/swift/common/utils.py
index fbbeaab11..b0fef4419 100644
--- a/swift/common/utils.py
+++ b/swift/common/utils.py
@@ -25,7 +25,6 @@ import errno
import fcntl
import grp
import hashlib
-import hmac
import json
import math
import operator
@@ -40,7 +39,6 @@ import uuid
import functools
import platform
import email.parser
-from hashlib import sha1
from random import random, shuffle
from contextlib import contextmanager, closing
import ctypes
@@ -283,46 +281,6 @@ except (InvalidHashPathConfigError, IOError):
pass
-def get_hmac(request_method, path, expires, key, digest=sha1,
- ip_range=None):
- """
- Returns the hexdigest string of the HMAC (see RFC 2104) for
- the request.
-
- :param request_method: Request method to allow.
- :param path: The path to the resource to allow access to.
- :param expires: Unix timestamp as an int for when the URL
- expires.
- :param key: HMAC shared secret.
- :param digest: constructor for the digest to use in calculating the HMAC
- Defaults to SHA1
- :param ip_range: The ip range from which the resource is allowed
- to be accessed. We need to put the ip_range as the
- first argument to hmac to avoid manipulation of the path
- due to newlines being valid in paths
- e.g. /v1/a/c/o\\n127.0.0.1
- :returns: hexdigest str of the HMAC for the request using the specified
- digest algorithm.
- """
- # These are the three mandatory fields.
- parts = [request_method, str(expires), path]
- formats = [b"%s", b"%s", b"%s"]
-
- if ip_range:
- parts.insert(0, ip_range)
- formats.insert(0, b"ip=%s")
-
- if not isinstance(key, six.binary_type):
- key = key.encode('utf8')
-
- message = b'\n'.join(
- fmt % (part if isinstance(part, six.binary_type)
- else part.encode("utf-8"))
- for fmt, part in zip(formats, parts))
-
- return hmac.new(key, message, digest).hexdigest()
-
-
def backward(f, blocksize=4096):
"""
A generator returning lines from a file starting with the last line,
@@ -1331,7 +1289,7 @@ class Timestamp(object):
"""
Get an isoformat string representation of the 'normal' part of the
Timestamp with microsecond precision and no trailing timezone, for
- example:
+ example::
1970-01-01T00:00:00.000000
@@ -2564,12 +2522,12 @@ def get_hub():
Another note about epoll: it's hard to use when forking. epoll works
like so:
- * create an epoll instance: efd = epoll_create(...)
+ * create an epoll instance: ``efd = epoll_create(...)``
- * register file descriptors of interest with epoll_ctl(efd,
- EPOLL_CTL_ADD, fd, ...)
+ * register file descriptors of interest with
+ ``epoll_ctl(efd, EPOLL_CTL_ADD, fd, ...)``
- * wait for events with epoll_wait(efd, ...)
+ * wait for events with ``epoll_wait(efd, ...)``
If you fork, you and all your child processes end up using the same
epoll instance, and everyone becomes confused. It is possible to use
@@ -2746,25 +2704,25 @@ def expand_ipv6(address):
return socket.inet_ntop(socket.AF_INET6, packed_ip)
-def whataremyips(bind_ip=None):
+def whataremyips(ring_ip=None):
"""
Get "our" IP addresses ("us" being the set of services configured by
one `*.conf` file). If our REST listens on a specific address, return it.
Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including
the loopback.
- :param str bind_ip: Optional bind_ip from a config file; may be IP address
- or hostname.
+ :param str ring_ip: Optional ring_ip/bind_ip from a config file; may be
+ IP address or hostname.
:returns: list of Strings of ip addresses
"""
- if bind_ip:
+ if ring_ip:
# See if bind_ip is '0.0.0.0'/'::'
try:
_, _, _, _, sockaddr = socket.getaddrinfo(
- bind_ip, None, 0, socket.SOCK_STREAM, 0,
+ ring_ip, None, 0, socket.SOCK_STREAM, 0,
socket.AI_NUMERICHOST)[0]
if sockaddr[0] not in ('0.0.0.0', '::'):
- return [bind_ip]
+ return [ring_ip]
except socket.gaierror:
pass
@@ -5262,6 +5220,11 @@ class ShardRange(object):
MIN = MinBound()
MAX = MaxBound()
+ __slots__ = (
+ 'account', 'container',
+ '_timestamp', '_meta_timestamp', '_state_timestamp', '_epoch',
+ '_lower', '_upper', '_deleted', '_state', '_count', '_bytes',
+ '_tombstones', '_reported')
def __init__(self, name, timestamp, lower=MIN, upper=MAX,
object_count=0, bytes_used=0, meta_timestamp=None,
@@ -5398,10 +5361,9 @@ class ShardRange(object):
@lower.setter
def lower(self, value):
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', UnicodeWarning)
- if value in (None, b'', u''):
- value = ShardRange.MIN
+ if value is None or (value == b"" if isinstance(value, bytes) else
+ value == u""):
+ value = ShardRange.MIN
try:
value = self._encode_bound(value)
except TypeError as err:
@@ -5426,10 +5388,9 @@ class ShardRange(object):
@upper.setter
def upper(self, value):
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', UnicodeWarning)
- if value in (None, b'', u''):
- value = ShardRange.MAX
+ if value is None or (value == b"" if isinstance(value, bytes) else
+ value == u""):
+ value = ShardRange.MAX
try:
value = self._encode_bound(value)
except TypeError as err:
@@ -6483,7 +6444,7 @@ def make_db_file_path(db_path, epoch):
def get_db_files(db_path):
"""
Given the path to a db file, return a sorted list of all valid db files
- that actually exist in that path's dir. A valid db filename has the form:
+ that actually exist in that path's dir. A valid db filename has the form::
<hash>[_<epoch>].db
diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py
index eb5c0ffe2..925d90359 100644
--- a/swift/common/wsgi.py
+++ b/swift/common/wsgi.py
@@ -866,8 +866,11 @@ class ServersPerPortStrategy(StrategyBase):
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring_check_interval = float(conf.get('ring_check_interval', 15))
- bind_ip = conf.get('bind_ip', '0.0.0.0')
- self.cache = BindPortsCache(self.swift_dir, bind_ip)
+ # typically ring_ip will be the same as bind_ip, but in a container the
+ # bind_ip might be differnt than the host ip address used to lookup
+ # devices/ports in the ring
+ ring_ip = conf.get('ring_ip', conf.get('bind_ip', '0.0.0.0'))
+ self.cache = BindPortsCache(self.swift_dir, ring_ip)
def _reload_bind_ports(self):
self.bind_ports = self.cache.all_bind_ports_for_node()
diff --git a/swift/container/backend.py b/swift/container/backend.py
index 17546a887..eaabc2c52 100644
--- a/swift/container/backend.py
+++ b/swift/container/backend.py
@@ -322,20 +322,20 @@ class ContainerBroker(DatabaseBroker):
Note that this may involve multiple on-disk DB files if the container
becomes sharded:
- * :attr:`_db_file` is the path to the legacy container DB name, i.e.
- ``<hash>.db``. This file should exist for an initialised broker that
- has never been sharded, but will not exist once a container has been
- sharded.
- * :attr:`db_files` is a list of existing db files for the broker. This
- list should have at least one entry for an initialised broker, and
- should have two entries while a broker is in SHARDING state.
- * :attr:`db_file` is the path to whichever db is currently authoritative
- for the container. Depending on the container's state, this may not be
- the same as the ``db_file`` argument given to :meth:`~__init__`, unless
- ``force_db_file`` is True in which case :attr:`db_file` is always equal
- to the ``db_file`` argument given to :meth:`~__init__`.
- * :attr:`pending_file` is always equal to :attr:`_db_file` extended with
- ``.pending``, i.e. ``<hash>.db.pending``.
+ * :attr:`_db_file` is the path to the legacy container DB name, i.e.
+ ``<hash>.db``. This file should exist for an initialised broker that
+ has never been sharded, but will not exist once a container has been
+ sharded.
+ * :attr:`db_files` is a list of existing db files for the broker. This
+ list should have at least one entry for an initialised broker, and
+ should have two entries while a broker is in SHARDING state.
+ * :attr:`db_file` is the path to whichever db is currently authoritative
+ for the container. Depending on the container's state, this may not be
+ the same as the ``db_file`` argument given to :meth:`~__init__`, unless
+ ``force_db_file`` is True in which case :attr:`db_file` is always equal
+ to the ``db_file`` argument given to :meth:`~__init__`.
+ * :attr:`pending_file` is always equal to :attr:`_db_file` extended with
+ ``.pending``, i.e. ``<hash>.db.pending``.
"""
db_type = 'container'
db_contains_type = 'object'
@@ -377,7 +377,10 @@ class ContainerBroker(DatabaseBroker):
:param put_timestamp: initial timestamp if broker needs to be
initialized
:param storage_policy_index: the storage policy index
- :return: a :class:`swift.container.backend.ContainerBroker` instance
+ :return: a tuple of (``broker``, ``initialized``) where ``broker`` is
+ an instance of :class:`swift.container.backend.ContainerBroker` and
+ ``initialized`` is True if the db file was initialized, False
+ otherwise.
"""
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
@@ -385,12 +388,14 @@ class ContainerBroker(DatabaseBroker):
os.path.join(device_path, db_dir, hsh + '.db'), epoch)
broker = ContainerBroker(db_path, account=account, container=container,
logger=logger)
+ initialized = False
if not os.path.exists(broker.db_file):
try:
broker.initialize(put_timestamp, storage_policy_index)
+ initialized = True
except DatabaseAlreadyExists:
pass
- return broker
+ return broker, initialized
def get_db_state(self):
"""
@@ -868,7 +873,7 @@ class ContainerBroker(DatabaseBroker):
try:
data = conn.execute(('''
SELECT account, container, created_at, put_timestamp,
- delete_timestamp, status_changed_at,
+ delete_timestamp, status, status_changed_at,
object_count, bytes_used,
reported_put_timestamp, reported_delete_timestamp,
reported_object_count, reported_bytes_used, hash,
@@ -923,7 +928,7 @@ class ContainerBroker(DatabaseBroker):
Get global data for the container.
:returns: dict with keys: account, container, created_at,
- put_timestamp, delete_timestamp, status_changed_at,
+ put_timestamp, delete_timestamp, status, status_changed_at,
object_count, bytes_used, reported_put_timestamp,
reported_delete_timestamp, reported_object_count,
reported_bytes_used, hash, id, x_container_sync_point1,
@@ -2005,17 +2010,25 @@ class ContainerBroker(DatabaseBroker):
self.path, err)
return False
- # Set the created_at and hash in the container_info table the same
- # in both brokers
+ # sync the retiring container stat into the fresh db. At least the
+ # things that either aren't covered through the normal
+ # broker api, and things that wont just be regenerated.
try:
- fresh_broker_conn.execute(
- 'UPDATE container_stat SET created_at=?',
- (info['created_at'],))
+ sql = 'UPDATE container_stat SET created_at=?, '
+ sql += 'delete_timestamp=?, status=?, status_changed_at=?'
+ sql_data = (info['created_at'], info['delete_timestamp'],
+ info['status'], info['status_changed_at'])
+ # 'reported_*' items are not sync'd because this is consistent
+ # with when a new DB is created after rsync'ing to another
+ # node (see _newid()). 'hash' should not be sync'd because
+ # this DB has no object rows.
+ fresh_broker_conn.execute(sql, sql_data)
fresh_broker_conn.commit()
except sqlite3.OperationalError as err:
- self.logger.error('Failed to set matching created_at time in '
- 'the fresh database for %s: %s',
- self.path, err)
+ self.logger.error(
+ 'Failed to sync the container_stat table/view with the '
+ 'fresh database for %s: %s',
+ self.path, err)
return False
# Rename to the new database
diff --git a/swift/container/replicator.py b/swift/container/replicator.py
index d45a5e8df..07c6f8bd3 100644
--- a/swift/container/replicator.py
+++ b/swift/container/replicator.py
@@ -14,10 +14,10 @@
# limitations under the License.
import os
-import itertools
import json
from collections import defaultdict
from eventlet import Timeout
+from random import choice
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR, SHARDED
@@ -27,7 +27,6 @@ from swift.container.reconciler import (
from swift.common import db_replicator
from swift.common.storage_policy import POLICIES
from swift.common.swob import HTTPOk, HTTPAccepted
-from swift.common.exceptions import DeviceUnavailable
from swift.common.http import is_success
from swift.common.utils import Timestamp, majority_size, get_db_files
@@ -144,18 +143,37 @@ class ContainerReplicator(db_replicator.Replicator):
def find_local_handoff_for_part(self, part):
"""
- Look through devices in the ring for the first handoff device that was
- identified during job creation as available on this node.
+ Find a device in the ring that is on this node on which to place a
+ partition. Preference is given to a device that is a primary location
+ for the partition. If no such device is found then a local device with
+ weight is chosen, and failing that any local device.
+ :param part: a partition
:returns: a node entry from the ring
"""
- nodes = self.ring.get_part_nodes(part)
- more_nodes = self.ring.get_more_nodes(part)
+ if not self._local_device_ids:
+ raise RuntimeError('Cannot find local handoff; no local devices')
- for node in itertools.chain(nodes, more_nodes):
+ for node in self.ring.get_part_nodes(part):
if node['id'] in self._local_device_ids:
return node
- return None
+
+ # don't attempt to minimize handoff depth: just choose any local
+ # device, but start by only picking a device with a weight, just in
+ # case some devices are being drained...
+ local_devs_with_weight = [
+ dev for dev in self._local_device_ids.values()
+ if dev.get('weight', 0)]
+ if local_devs_with_weight:
+ return choice(local_devs_with_weight)
+
+ # we have to return something, so choose any local device..
+ node = choice(list(self._local_device_ids.values()))
+ self.logger.warning(
+ "Could not find a non-zero weight device for handoff partition "
+ "%d, falling back device %s" %
+ (part, node['device']))
+ return node
def get_reconciler_broker(self, timestamp):
"""
@@ -173,14 +191,12 @@ class ContainerReplicator(db_replicator.Replicator):
account = MISPLACED_OBJECTS_ACCOUNT
part = self.ring.get_part(account, container)
node = self.find_local_handoff_for_part(part)
- if not node:
- raise DeviceUnavailable(
- 'No mounted devices found suitable to Handoff reconciler '
- 'container %s in partition %s' % (container, part))
- broker = ContainerBroker.create_broker(
+ broker, initialized = ContainerBroker.create_broker(
os.path.join(self.root, node['device']), part, account, container,
logger=self.logger, put_timestamp=timestamp,
storage_policy_index=0)
+ self.logger.increment('reconciler_db_created' if initialized
+ else 'reconciler_db_exists')
if self.reconciler_containers is not None:
self.reconciler_containers[container] = part, broker, node['id']
return broker
@@ -198,8 +214,9 @@ class ContainerReplicator(db_replicator.Replicator):
try:
reconciler = self.get_reconciler_broker(container)
- except DeviceUnavailable as e:
- self.logger.warning('DeviceUnavailable: %s', e)
+ except Exception:
+ self.logger.exception('Failed to get reconciler broker for '
+ 'container %s', container)
return False
self.logger.debug('Adding %d objects to the reconciler at %s',
len(item_list), reconciler.db_file)
diff --git a/swift/container/sharder.py b/swift/container/sharder.py
index 2b99e819e..363853214 100644
--- a/swift/container/sharder.py
+++ b/swift/container/sharder.py
@@ -31,7 +31,6 @@ from swift.common import internal_client
from swift.common.constraints import check_drive, AUTO_CREATE_ACCOUNT_PREFIX
from swift.common.direct_client import (direct_put_container,
DirectClientException)
-from swift.common.exceptions import DeviceUnavailable
from swift.common.request_helpers import USE_REPLICATION_NETWORK_HEADER
from swift.common.ring.utils import is_local_device
from swift.common.swob import str_to_wsgi
@@ -80,25 +79,78 @@ def make_shard_ranges(broker, shard_data, shards_account_prefix):
return shard_ranges
-def find_missing_ranges(shard_ranges):
+def _find_discontinuity(paths, start):
+ # select the path that reaches furthest from start into the namespace
+ start_paths = [path for path in paths if path.lower == start]
+ start_paths.sort(key=lambda p: p.upper)
+ longest_start_path = start_paths[-1]
+ # search for paths that end further into the namespace (note: these must
+ # have a lower that differs from the start_path upper, otherwise they would
+ # be part of the start_path longer!)
+ end_paths = [path for path in paths
+ if path.upper > longest_start_path.upper]
+ if end_paths:
+ # select those that begin nearest the start of the namespace
+ end_paths.sort(key=lambda p: p.lower)
+ end_paths = [p for p in end_paths if p.lower == end_paths[0].lower]
+ # select the longest of those
+ end_paths.sort(key=lambda p: p.upper)
+ longest_end_path = end_paths[-1]
+ else:
+ longest_end_path = None
+ return longest_start_path, longest_end_path
+
+
+def find_paths_with_gaps(shard_ranges):
"""
- Find any ranges in the entire object namespace that are not covered by any
- shard range in the given list.
-
- :param shard_ranges: A list of :class:`~swift.utils.ShardRange`
- :return: a list of missing ranges
+ Find gaps in the shard ranges and pairs of shard range paths that lead to
+ and from those gaps. For each gap a single pair of adjacent paths is
+ selected. The concatenation of all selected paths and gaps will span the
+ entire namespace with no overlaps.
+
+ :param shard_ranges: a list of instances of ShardRange.
+ :return: A list of tuples of ``(start_path, gap_range, end_path)`` where
+ ``start_path`` is a list of ShardRanges leading to the gap,
+ ``gap_range`` is a ShardRange synthesized to describe the namespace
+ gap, and ``end_path`` is a list of ShardRanges leading from the gap.
+ When gaps start or end at the namespace minimum or maximum bounds,
+ ``start_path`` and ``end_path`` may be 'null' paths that contain a
+ single ShardRange covering either the minimum or maximum of the
+ namespace.
"""
- gaps = []
- if not shard_ranges:
- return ((ShardRange.MIN, ShardRange.MAX),)
- if shard_ranges[0].lower > ShardRange.MIN:
- gaps.append((ShardRange.MIN, shard_ranges[0].lower))
- for first, second in zip(shard_ranges, shard_ranges[1:]):
- if first.upper < second.lower:
- gaps.append((first.upper, second.lower))
- if shard_ranges[-1].upper < ShardRange.MAX:
- gaps.append((shard_ranges[-1].upper, ShardRange.MAX))
- return gaps
+ timestamp = Timestamp.now()
+ shard_ranges = ShardRangeList(shard_ranges)
+ # note: find_paths results do not include shrinking ranges
+ paths = find_paths(shard_ranges)
+ # add paths covering no namespace at start and end of namespace to ensure
+ # that a start_path and end_path is always found even when there is a gap
+ # at the start or end of the namespace
+ null_start = ShardRange('null/start', timestamp,
+ lower=ShardRange.MIN,
+ upper=ShardRange.MIN,
+ state=ShardRange.FOUND)
+ null_end = ShardRange('null/end', timestamp,
+ lower=ShardRange.MAX,
+ upper=ShardRange.MAX,
+ state=ShardRange.FOUND)
+ paths.extend([ShardRangeList([null_start]), ShardRangeList([null_end])])
+ paths_with_gaps = []
+ start = null_start.lower
+ while True:
+ start_path, end_path = _find_discontinuity(paths, start)
+ if end_path is None:
+ # end of namespace reached
+ break
+ start = end_path.lower
+ if start_path.upper > end_path.lower:
+ # overlap
+ continue
+ gap_range = ShardRange('gap/index_%06d' % len(paths_with_gaps),
+ timestamp,
+ lower=start_path.upper,
+ upper=end_path.lower)
+ paths_with_gaps.append((start_path, gap_range, end_path))
+ return paths_with_gaps
def find_overlapping_ranges(shard_ranges):
@@ -446,6 +498,48 @@ def rank_paths(paths, shard_range_to_span):
class CleavingContext(object):
+ """
+ Encapsulates metadata associated with the process of cleaving a retiring
+ DB. This metadata includes:
+
+ * ``ref``: The unique part of the key that is used when persisting a
+ serialized ``CleavingContext`` as sysmeta in the DB. The unique part of
+ the key is based off the DB id. This ensures that each context is
+ associated with a specific DB file. The unique part of the key is
+ included in the ``CleavingContext`` but should not be modified by any
+ caller.
+
+ * ``cursor``: the upper bound of the last shard range to have been
+ cleaved from the retiring DB.
+
+ * ``max_row``: the retiring DB's max row; this is updated to the value of
+ the retiring DB's ``max_row`` every time a ``CleavingContext`` is
+ loaded for that DB, and may change during the process of cleaving the
+ DB.
+
+ * ``cleave_to_row``: the value of ``max_row`` at the moment when cleaving
+ starts for the DB. When cleaving completes (i.e. the cleave cursor has
+ reached the upper bound of the cleaving namespace), ``cleave_to_row``
+ is compared to the current ``max_row``: if the two values are not equal
+ then rows have been added to the DB which may not have been cleaved, in
+ which case the ``CleavingContext`` is ``reset`` and cleaving is
+ re-started.
+
+ * ``last_cleave_to_row``: the minimum DB row from which cleaving should
+ select objects to cleave; this is initially set to None i.e. all rows
+ should be cleaved. If the ``CleavingContext`` is ``reset`` then the
+ ``last_cleave_to_row`` is set to the current value of
+ ``cleave_to_row``, which in turn is set to the current value of
+ ``max_row`` by a subsequent call to ``start``. The repeated cleaving
+ therefore only selects objects in rows greater than the
+ ``last_cleave_to_row``, rather than cleaving the whole DB again.
+
+ * ``ranges_done``: the number of shard ranges that have been cleaved from
+ the retiring DB.
+
+ * ``ranges_todo``: the number of shard ranges that are yet to be
+ cleaved from the retiring DB.
+ """
def __init__(self, ref, cursor='', max_row=None, cleave_to_row=None,
last_cleave_to_row=None, cleaving_done=False,
misplaced_done=False, ranges_done=0, ranges_todo=0):
@@ -499,9 +593,9 @@ class CleavingContext(object):
@classmethod
def load_all(cls, broker):
"""
- Returns all cleaving contexts stored in the broker.
+ Returns all cleaving contexts stored in the broker's DB.
- :param broker:
+ :param broker: an instance of :class:`ContainerBroker`
:return: list of tuples of (CleavingContext, timestamp)
"""
brokers = broker.get_brokers()
@@ -521,17 +615,11 @@ class CleavingContext(object):
@classmethod
def load(cls, broker):
"""
- Returns a context dict for tracking the progress of cleaving this
- broker's retiring DB. The context is persisted in sysmeta using a key
- that is based off the retiring db id and max row. This form of
- key ensures that a cleaving context is only loaded for a db that
- matches the id and max row when the context was created; if a db is
- modified such that its max row changes then a different context, or no
- context, will be loaded.
-
- :return: A dict to which cleave progress metadata may be added. The
- dict initially has a key ``ref`` which should not be modified by
- any caller.
+ Returns a CleavingContext tracking the cleaving progress of the given
+ broker's DB.
+
+ :param broker: an instances of :class:`ContainerBroker`
+ :return: An instance of :class:`CleavingContext`.
"""
brokers = broker.get_brokers()
ref = cls._make_ref(brokers[0])
@@ -542,6 +630,12 @@ class CleavingContext(object):
return cls(**data)
def store(self, broker):
+ """
+ Persists the serialized ``CleavingContext`` as sysmeta in the given
+ broker's DB.
+
+ :param broker: an instances of :class:`ContainerBroker`
+ """
broker.set_sharding_sysmeta('Context-' + self.ref,
json.dumps(dict(self)))
@@ -758,11 +852,15 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
else:
self.stats['sharding'][category][key] = max(current, value)
- def _increment_stat(self, category, key, step=1, statsd=False):
- self.stats['sharding'][category][key] += step
- if statsd:
- statsd_key = '%s_%s' % (category, key)
- self.logger.increment(statsd_key)
+ def _increment_stat(self, category, key, statsd=False):
+ self._update_stat(category, key, step=1, statsd=statsd)
+
+ def _update_stat(self, category, key, step=1, statsd=False):
+ if step:
+ self.stats['sharding'][category][key] += step
+ if statsd:
+ statsd_key = '%s_%s' % (category, key)
+ self.logger.update_stats(statsd_key, step)
def _make_stats_info(self, broker, node, own_shard_range):
try:
@@ -917,34 +1015,31 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if newest:
headers['X-Newest'] = 'true'
try:
- try:
- resp = self.int_client.make_request(
- 'GET', path, headers, acceptable_statuses=(2,),
- params=params)
- except internal_client.UnexpectedResponse as err:
- self.logger.warning("Failed to get shard ranges from %s: %s",
- quote(broker.root_path), err)
- return None
- record_type = resp.headers.get('x-backend-record-type')
- if record_type != 'shard':
- err = 'unexpected record type %r' % record_type
- self.logger.error("Failed to get shard ranges from %s: %s",
- quote(broker.root_path), err)
- return None
-
- try:
- data = json.loads(resp.body)
- if not isinstance(data, list):
- raise ValueError('not a list')
- return [ShardRange.from_dict(shard_range)
- for shard_range in data]
- except (ValueError, TypeError, KeyError) as err:
- self.logger.error(
- "Failed to get shard ranges from %s: invalid data: %r",
- quote(broker.root_path), err)
+ resp = self.int_client.make_request(
+ 'GET', path, headers, acceptable_statuses=(2,),
+ params=params)
+ except internal_client.UnexpectedResponse as err:
+ self.logger.warning("Failed to get shard ranges from %s: %s",
+ quote(broker.root_path), err)
+ return None
+ record_type = resp.headers.get('x-backend-record-type')
+ if record_type != 'shard':
+ err = 'unexpected record type %r' % record_type
+ self.logger.error("Failed to get shard ranges from %s: %s",
+ quote(broker.root_path), err)
return None
- finally:
- self.logger.txn_id = None
+
+ try:
+ data = json.loads(resp.body)
+ if not isinstance(data, list):
+ raise ValueError('not a list')
+ return [ShardRange.from_dict(shard_range)
+ for shard_range in data]
+ except (ValueError, TypeError, KeyError) as err:
+ self.logger.error(
+ "Failed to get shard ranges from %s: invalid data: %r",
+ quote(broker.root_path), err)
+ return None
def _put_container(self, node, part, account, container, headers, body):
try:
@@ -954,12 +1049,14 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
headers=headers, contents=body)
except DirectClientException as err:
self.logger.warning(
- 'Failed to put shard ranges to %s:%s/%s: %s',
- node['ip'], node['port'], node['device'], err.http_status)
+ 'Failed to put shard ranges to %s:%s/%s %s/%s: %s',
+ node['ip'], node['port'], node['device'],
+ quote(account), quote(container), err.http_status)
except (Exception, Timeout) as err:
self.logger.exception(
- 'Failed to put shard ranges to %s:%s/%s: %s',
- node['ip'], node['port'], node['device'], err)
+ 'Failed to put shard ranges to %s:%s/%s %s/%s: %s',
+ node['ip'], node['port'], node['device'],
+ quote(account), quote(container), err)
else:
return True
return False
@@ -995,20 +1092,19 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
:param shard_range: a :class:`~swift.common.utils.ShardRange`
:param root_path: the path of the shard's root container
:param policy_index: the storage policy index
- :returns: a tuple of ``(part, broker, node_id)`` where ``part`` is the
- shard container's partition, ``broker`` is an instance of
+ :returns: a tuple of ``(part, broker, node_id, put_timestamp)`` where
+ ``part`` is the shard container's partition,
+ ``broker`` is an instance of
:class:`~swift.container.backend.ContainerBroker`,
- ``node_id`` is the id of the selected node.
+ ``node_id`` is the id of the selected node,
+ ``put_timestamp`` is the put_timestamp if the broker needed to
+ be initialized.
"""
part = self.ring.get_part(shard_range.account, shard_range.container)
node = self.find_local_handoff_for_part(part)
- put_timestamp = Timestamp.now().internal
- if not node:
- raise DeviceUnavailable(
- 'No mounted devices found suitable for creating shard broker '
- 'for %s in partition %s' % (quote(shard_range.name), part))
- shard_broker = ContainerBroker.create_broker(
+ put_timestamp = Timestamp.now().internal
+ shard_broker, initialized = ContainerBroker.create_broker(
os.path.join(self.root, node['device']), part, shard_range.account,
shard_range.container, epoch=shard_range.epoch,
storage_policy_index=policy_index, put_timestamp=put_timestamp)
@@ -1028,6 +1124,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
'X-Container-Sysmeta-Sharding':
('True', Timestamp.now().internal)})
+ put_timestamp = put_timestamp if initialized else None
return part, shard_broker, node['id'], put_timestamp
def _audit_root_container(self, broker):
@@ -1040,12 +1137,12 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if own_shard_range.state in (ShardRange.SHARDING, ShardRange.SHARDED):
shard_ranges = [sr for sr in broker.get_shard_ranges()
if sr.state != ShardRange.SHRINKING]
- missing_ranges = find_missing_ranges(shard_ranges)
- if missing_ranges:
+ paths_with_gaps = find_paths_with_gaps(shard_ranges)
+ if paths_with_gaps:
warnings.append(
'missing range(s): %s' %
- ' '.join(['%s-%s' % (lower, upper)
- for lower, upper in missing_ranges]))
+ ' '.join(['%s-%s' % (gap.lower, gap.upper)
+ for (_, gap, _) in paths_with_gaps]))
for state in ShardRange.STATES:
if state == ShardRange.SHRINKING:
@@ -1056,8 +1153,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
overlaps = find_overlapping_ranges(shard_ranges)
if overlaps:
self._increment_stat('audit_root', 'has_overlap')
- self._increment_stat('audit_root', 'num_overlap',
- step=len(overlaps))
+ self._update_stat('audit_root', 'num_overlap',
+ step=len(overlaps))
all_overlaps = ', '.join(
[' '.join(['%s-%s' % (sr.lower, sr.upper)
for sr in overlapping_ranges])
@@ -1370,8 +1467,11 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
continue
if dest_shard_range not in dest_brokers:
- part, dest_broker, node_id, _junk = self._get_shard_broker(
- dest_shard_range, src_broker.root_path, policy_index)
+ part, dest_broker, node_id, put_timestamp = \
+ self._get_shard_broker(
+ dest_shard_range, src_broker.root_path, policy_index)
+ stat = 'db_exists' if put_timestamp is None else 'db_created'
+ self._increment_stat('misplaced', stat, statsd=True)
# save the broker info that was sampled prior to the *first*
# yielded objects for this destination
destination = {'part': part,
@@ -1396,8 +1496,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
success &= self._replicate_and_delete(
src_broker, dest_shard_range, **dest_args)
- self._increment_stat('misplaced', 'placed', step=placed)
- self._increment_stat('misplaced', 'unplaced', step=unplaced)
+ self._update_stat('misplaced', 'placed', step=placed, statsd=True)
+ self._update_stat('misplaced', 'unplaced', step=unplaced, statsd=True)
return success, placed, unplaced
def _make_shard_range_fetcher(self, broker, src_shard_range):
@@ -1499,7 +1599,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self._increment_stat('misplaced', 'found', statsd=True)
self.logger.debug('Placed %s misplaced objects (%s unplaced)',
num_placed, num_unplaced)
- self._increment_stat('misplaced', 'success' if success else 'failure')
+ self._increment_stat('misplaced', 'success' if success else 'failure',
+ statsd=True)
self.logger.debug('Finished handling misplaced objects')
return success
@@ -1547,7 +1648,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
num_found = len(shard_ranges)
self.logger.info(
"Completed scan for shard ranges: %d found", num_found)
- self._increment_stat('scanned', 'found', step=num_found)
+ self._update_stat('scanned', 'found', step=num_found)
self._min_stat('scanned', 'min_time', round(elapsed / num_found, 3))
self._max_stat('scanned', 'max_time', round(elapsed / num_found, 3))
@@ -1610,6 +1711,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
def _cleave_shard_broker(self, broker, cleaving_context, shard_range,
own_shard_range, shard_broker, put_timestamp,
shard_part, node_id):
+ result = CLEAVE_SUCCESS
start = time.time()
# only cleave from the retiring db - misplaced objects handler will
# deal with any objects in the fresh db
@@ -1635,15 +1737,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# This was just created; don't need to replicate this
# SR because there was nothing there. So cleanup and
# remove the shard_broker from its hand off location.
- self.delete_db(shard_broker)
- cleaving_context.range_done(shard_range.upper_str)
- if shard_range.upper >= own_shard_range.upper:
- # cleaving complete
- cleaving_context.cleaving_done = True
- cleaving_context.store(broker)
# Because nothing was here we wont count it in the shard
# batch count.
- return CLEAVE_EMPTY
+ result = CLEAVE_EMPTY
# Else, it wasn't newly created by us, and
# we don't know what's in it or why. Let it get
# replicated and counted in the batch count.
@@ -1692,40 +1788,46 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
shard_broker.merge_shard_ranges(shard_range)
replication_quorum = self.shard_replication_quorum
- self.logger.info(
- 'Replicating new shard container %s for %s',
- quote(shard_broker.path), own_shard_range)
-
- success, responses = self._replicate_object(
- shard_part, shard_broker.db_file, node_id)
-
- replication_successes = responses.count(True)
- if (not success and (not responses or
- replication_successes < replication_quorum)):
- # insufficient replication or replication not even attempted;
- # break because we don't want to progress the cleave cursor
- # until each shard range has been successfully cleaved
- self.logger.warning(
- 'Failed to sufficiently replicate cleaved shard %s for %s: '
- '%s successes, %s required.', shard_range, quote(broker.path),
- replication_successes, replication_quorum)
- self._increment_stat('cleaved', 'failure', statsd=True)
- return CLEAVE_FAILED
-
- elapsed = round(time.time() - start, 3)
- self._min_stat('cleaved', 'min_time', elapsed)
- self._max_stat('cleaved', 'max_time', elapsed)
- broker.merge_shard_ranges(shard_range)
- cleaving_context.range_done(shard_range.upper_str)
- if shard_range.upper >= own_shard_range.upper:
- # cleaving complete
- cleaving_context.cleaving_done = True
- cleaving_context.store(broker)
- self.logger.info(
- 'Cleaved %s for shard range %s in %gs.',
- quote(broker.path), shard_range, elapsed)
- self._increment_stat('cleaved', 'success', statsd=True)
- return CLEAVE_SUCCESS
+ if result == CLEAVE_EMPTY:
+ self.delete_db(shard_broker)
+ else: # result == CLEAVE_SUCCESS:
+ self.logger.info(
+ 'Replicating new shard container %s for %s',
+ quote(shard_broker.path), own_shard_range)
+
+ success, responses = self._replicate_object(
+ shard_part, shard_broker.db_file, node_id)
+
+ replication_successes = responses.count(True)
+ if (not success and (not responses or
+ replication_successes < replication_quorum)):
+ # insufficient replication or replication not even attempted;
+ # break because we don't want to progress the cleave cursor
+ # until each shard range has been successfully cleaved
+ self.logger.warning(
+ 'Failed to sufficiently replicate cleaved shard %s for %s:'
+ ' %s successes, %s required.', shard_range,
+ quote(broker.path),
+ replication_successes, replication_quorum)
+ self._increment_stat('cleaved', 'failure', statsd=True)
+ result = CLEAVE_FAILED
+ else:
+ elapsed = round(time.time() - start, 3)
+ self._min_stat('cleaved', 'min_time', elapsed)
+ self._max_stat('cleaved', 'max_time', elapsed)
+ self.logger.info(
+ 'Cleaved %s for shard range %s in %gs.',
+ quote(broker.path), shard_range, elapsed)
+ self._increment_stat('cleaved', 'success', statsd=True)
+
+ if result in (CLEAVE_SUCCESS, CLEAVE_EMPTY):
+ broker.merge_shard_ranges(shard_range)
+ cleaving_context.range_done(shard_range.upper_str)
+ if shard_range.upper >= own_shard_range.upper:
+ # cleaving complete
+ cleaving_context.cleaving_done = True
+ cleaving_context.store(broker)
+ return result
def _cleave_shard_range(self, broker, cleaving_context, shard_range,
own_shard_range):
@@ -1735,18 +1837,14 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
quote(shard_range.name), shard_range)
self._increment_stat('cleaved', 'attempted')
policy_index = broker.storage_policy_index
- try:
- shard_part, shard_broker, node_id, put_timestamp = \
- self._get_shard_broker(shard_range, broker.root_path,
- policy_index)
- except DeviceUnavailable as duex:
- self.logger.warning(str(duex))
- self._increment_stat('cleaved', 'failure', statsd=True)
- return CLEAVE_FAILED
- else:
- return self._cleave_shard_broker(
- broker, cleaving_context, shard_range, own_shard_range,
- shard_broker, put_timestamp, shard_part, node_id)
+ shard_part, shard_broker, node_id, put_timestamp = \
+ self._get_shard_broker(shard_range, broker.root_path,
+ policy_index)
+ stat = 'db_exists' if put_timestamp is None else 'db_created'
+ self._increment_stat('cleaved', stat, statsd=True)
+ return self._cleave_shard_broker(
+ broker, cleaving_context, shard_range, own_shard_range,
+ shard_broker, put_timestamp, shard_part, node_id)
def _cleave(self, broker):
# Returns True if misplaced objects have been moved and the entire
@@ -1970,12 +2068,18 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# hammering the root
own_shard_range.reported = True
broker.merge_shard_ranges(own_shard_range)
+ self.logger.debug(
+ 'updated root objs=%d, tombstones=%s (%s)',
+ own_shard_range.object_count, own_shard_range.tombstones,
+ quote(broker.path))
def _process_broker(self, broker, node, part):
broker.get_info() # make sure account/container are populated
state = broker.get_db_state()
- self.logger.debug('Starting processing %s state %s',
- quote(broker.path), state)
+ is_deleted = broker.is_deleted()
+ self.logger.debug('Starting processing %s state %s%s',
+ quote(broker.path), state,
+ ' (deleted)' if is_deleted else '')
if not self._audit_container(broker):
return
@@ -1983,13 +2087,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# now look and deal with misplaced objects.
self._move_misplaced_objects(broker)
- if broker.is_deleted():
- # This container is deleted so we can skip it. We still want
- # deleted containers to go via misplaced items because they may
- # have new objects sitting in them that may need to move.
- return
-
- is_leader = node['index'] == 0 and self.auto_shard
+ is_leader = node['index'] == 0 and self.auto_shard and not is_deleted
if state in (UNSHARDED, COLLAPSED):
if is_leader and broker.is_root_container():
# bootstrap sharding of root container
@@ -2041,29 +2139,31 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self.logger.debug('Remaining in sharding state %s',
quote(broker.path))
- if state == SHARDED and broker.is_root_container():
- # look for shrink stats
- self._identify_shrinking_candidate(broker, node)
- if is_leader:
- self._find_and_enable_shrinking_candidates(broker)
- self._find_and_enable_sharding_candidates(broker)
- for shard_range in broker.get_shard_ranges(
- states=[ShardRange.SHARDING]):
- self._send_shard_ranges(
- shard_range.account, shard_range.container,
- [shard_range])
+ if not broker.is_deleted():
+ if state == SHARDED and broker.is_root_container():
+ # look for shrink stats
+ self._identify_shrinking_candidate(broker, node)
+ if is_leader:
+ self._find_and_enable_shrinking_candidates(broker)
+ self._find_and_enable_sharding_candidates(broker)
+ for shard_range in broker.get_shard_ranges(
+ states=[ShardRange.SHARDING]):
+ self._send_shard_ranges(
+ shard_range.account, shard_range.container,
+ [shard_range])
- if not broker.is_root_container():
- # Update the root container with this container's shard range
- # info; do this even when sharded in case previous attempts
- # failed; don't do this if there is no own shard range. When
- # sharding a shard, this is when the root will see the new
- # shards move to ACTIVE state and the sharded shard
- # simultaneously become deleted.
- self._update_root_container(broker)
-
- self.logger.debug('Finished processing %s state %s',
- quote(broker.path), broker.get_db_state())
+ if not broker.is_root_container():
+ # Update the root container with this container's shard range
+ # info; do this even when sharded in case previous attempts
+ # failed; don't do this if there is no own shard range. When
+ # sharding a shard, this is when the root will see the new
+ # shards move to ACTIVE state and the sharded shard
+ # simultaneously become deleted.
+ self._update_root_container(broker)
+
+ self.logger.debug('Finished processing %s state %s%s',
+ quote(broker.path), broker.get_db_state(),
+ ' (deleted)' if is_deleted else '')
def _one_shard_cycle(self, devices_to_shard, partitions_to_shard):
"""
@@ -2087,9 +2187,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self.logger.info('(Override partitions: %s)',
', '.join(str(p) for p in partitions_to_shard))
self._zero_stats()
- self._local_device_ids = set()
+ self._local_device_ids = {}
dirs = []
- self.ips = whataremyips(bind_ip=self.bind_ip)
+ self.ips = whataremyips(self.bind_ip)
for node in self.ring.devs:
device_path = self._check_node(node)
if not device_path:
@@ -2098,7 +2198,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if os.path.isdir(datadir):
# Populate self._local_device_ids so we can find devices for
# shard containers later
- self._local_device_ids.add(node['id'])
+ self._local_device_ids[node['id']] = node
if node['device'] not in devices_to_shard:
continue
part_filt = self._partition_dir_filter(
diff --git a/swift/container/sync.py b/swift/container/sync.py
index 874edccd7..55a7a77ec 100644
--- a/swift/container/sync.py
+++ b/swift/container/sync.py
@@ -96,28 +96,28 @@ class ContainerSync(Daemon):
An example may help. Assume replica count is 3 and perfectly matching
ROWIDs starting at 1.
- First sync run, database has 6 rows:
-
- * SyncPoint1 starts as -1.
- * SyncPoint2 starts as -1.
- * No rows between points, so no "all updates" rows.
- * Six rows newer than SyncPoint1, so a third of the rows are sent
- by node 1, another third by node 2, remaining third by node 3.
- * SyncPoint1 is set as 6 (the newest ROWID known).
- * SyncPoint2 is left as -1 since no "all updates" rows were synced.
-
- Next sync run, database has 12 rows:
-
- * SyncPoint1 starts as 6.
- * SyncPoint2 starts as -1.
- * The rows between -1 and 6 all trigger updates (most of which
- should short-circuit on the remote end as having already been
- done).
- * Six more rows newer than SyncPoint1, so a third of the rows are
- sent by node 1, another third by node 2, remaining third by node
- 3.
- * SyncPoint1 is set as 12 (the newest ROWID known).
- * SyncPoint2 is set as 6 (the newest "all updates" ROWID).
+ First sync run, database has 6 rows:
+
+ * SyncPoint1 starts as -1.
+ * SyncPoint2 starts as -1.
+ * No rows between points, so no "all updates" rows.
+ * Six rows newer than SyncPoint1, so a third of the rows are sent
+ by node 1, another third by node 2, remaining third by node 3.
+ * SyncPoint1 is set as 6 (the newest ROWID known).
+ * SyncPoint2 is left as -1 since no "all updates" rows were synced.
+
+ Next sync run, database has 12 rows:
+
+ * SyncPoint1 starts as 6.
+ * SyncPoint2 starts as -1.
+ * The rows between -1 and 6 all trigger updates (most of which
+ should short-circuit on the remote end as having already been
+ done).
+ * Six more rows newer than SyncPoint1, so a third of the rows are
+ sent by node 1, another third by node 2, remaining third by node
+ 3.
+ * SyncPoint1 is set as 12 (the newest ROWID known).
+ * SyncPoint2 is set as 6 (the newest "all updates" ROWID).
In this way, under normal circumstances each node sends its share of
updates each run and just sends a batch of older updates to ensure nothing
diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po
index 6d02c03a5..1985a1d29 100644
--- a/swift/locale/de/LC_MESSAGES/swift.po
+++ b/swift/locale/de/LC_MESSAGES/swift.po
@@ -8,16 +8,15 @@
# Jonas John <jonas.john@e-werkzeug.eu>, 2015
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
# Andreas Jaeger <jaegerandi@gmail.com>, 2019. #zanata
-# Andreas Jaeger <jaegerandi@gmail.com>, 2020. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-04-25 11:39+0000\n"
+"PO-Revision-Date: 2019-10-03 06:41+0000\n"
"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
@@ -33,37 +32,6 @@ msgstr ""
"Durch Benutzer beendet"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - parallel, %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"%(checked)d Suffixe überprüft - %(hashed).2f%% hashverschlüsselt, "
-"%(synced).2f%% synchronisiert"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) Partitionen repliziert in "
-"%(time).2fs (%(rate).2f/s, %(remaining)s verbleibend)"
-
-#, python-format
-msgid "%(replication_ip)s/%(device)s responded as unmounted"
-msgstr "%(replication_ip)s/%(device)s antwortet als unmounted"
-
-#, python-format
msgid "%(server)s #%(number)d not running (%(conf)s)"
msgstr "%(server)s #%(number)d wird nicht ausgeführt (%(conf)s)"
@@ -84,10 +52,6 @@ msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s Erfolge, %(failure)s Fehlschläge"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s gab 503 für %(statuses)s zurück"
-
-#, python-format
msgid "%(type)s: %(value)s"
msgstr "%(type)s: %(value)s"
@@ -100,35 +64,9 @@ msgid "%s does not exist"
msgstr "%s existiert nicht"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s ist nicht eingehängt"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s zurückgemeldet als ausgehängt"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s: Verbindung zurückgesetzt durch Peer"
-msgid "Account"
-msgstr "Konto"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "Kontoprüfungsmodus \"once\" abgeschlossen: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "Kontoprüfungsdurchlauf abgeschlossen: %.02fs"
-
-#, python-format
-msgid ""
-"Adding required filter %(filter_name)s to pipeline at position %(insert_at)d"
-msgstr ""
-"Füge erforderlichen Filter %(filter_name)s zu Pipeline an Position "
-"%(insert_at)d hinzu"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
@@ -141,44 +79,9 @@ msgid "Audit Failed for %(path)s: %(err)s"
msgstr "Prüfung fehlgeschlagen für %(path)s: %(err)s"
#, python-format
-msgid "Audit passed for %s"
-msgstr "Prüfung für %s erfolgt"
-
-#, python-format
msgid "Bad key for %(name)r: %(err)s"
msgstr "Schlechter Schlüssel für %(name)r: %(err)s"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Falscher rsync-Rückgabecode: %(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "Kontoprüfungsmodus \"once\" wird gestartet"
-
-msgid "Begin account audit pass."
-msgstr "Kontoprüfungsdurchlauf wird gestartet."
-
-msgid "Begin container audit \"once\" mode"
-msgstr "Containerprüfungsmodus \"once\" wird gestartet"
-
-msgid "Begin container audit pass."
-msgstr "Containerprüfungsdurchlauf wird gestartet."
-
-msgid "Begin container sync \"once\" mode"
-msgstr "Containersynchronisationsmodus \"once\" wird gestartet"
-
-msgid "Begin container update single threaded sweep"
-msgstr "Einzelthread-Scanvorgang für Containeraktualisierung wird gestartet"
-
-msgid "Begin container update sweep"
-msgstr "Scanvorgang für Containeraktualisierung wird gestartet"
-
-msgid "Begin object update single threaded sweep"
-msgstr "Einzelthread-Scanvorgang für Objektaktualisierung wird gestartet"
-
-msgid "Begin object update sweep"
-msgstr "Scanvorgang für Objektaktualisierung wird gestartet"
-
msgid "Beginning replication run"
msgstr "Replizierungsdurchlauf wird gestartet"
@@ -195,41 +98,6 @@ msgstr "Kann nicht auf die Datei %s zugreifen."
msgid "Can not load profile data from %s."
msgstr "Die Profildaten von %s können nicht geladen werden."
-#, python-format
-msgid "Cannot read %(auditor_status)s (%(err)s)"
-msgstr "%(auditor_status)s (%(err)s) kann nicht gelesen werden."
-
-#, python-format
-msgid "Cannot write %(auditor_status)s (%(err)s)"
-msgstr "Schreiben von %(auditor_status)s (%(err)s) nicht möglich."
-
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "Client konnte nicht innerhalb von %ss vom Proxy lesen"
-
-msgid "Client disconnected without sending enough data"
-msgstr "Client getrennt ohne dem Senden von genügend Daten"
-
-msgid "Client disconnected without sending last chunk"
-msgstr ""
-"Die Verbindung zum Client wurde getrennt, bevor der letzte Chunk gesendet "
-"wurde. "
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"Clientpfad %(client)s entspricht nicht dem in den Objektmetadaten "
-"gespeicherten Pfad %(meta)s"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"Konfigurationsoption internal_client_conf_path nicht definiert. "
-"Standardkonfiguration wird verwendet. Informationen zu den Optionen finden "
-"Sie in internal-client.conf-sample."
-
msgid "Connection refused"
msgstr "Verbindung abgelehnt"
@@ -239,59 +107,6 @@ msgstr "Verbindung zurückgesetzt"
msgid "Connection timeout"
msgstr "Verbindungszeitüberschreitung"
-msgid "Container"
-msgstr "Container"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "Containerprüfungsmodus \"once\" abgeschlossen: %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "Containerprüfungsdurchlauf abgeschlossen: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "Containersynchronisationsmodus \"once\" abgeschlossen: %.02fs"
-
-#, python-format
-msgid ""
-"Container sync report: %(container)s, time window start: %(start)s, time "
-"window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, "
-"bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, "
-"total_rows: %(total)s"
-msgstr ""
-"Container Synchronisierungsbericht: %(container)s, Beginn Zeitfenster: "
-"%(start)s, Ende Zeitfenster: %(end)s, puts: %(puts)s, posts: %(posts)s, "
-"deletes: %(deletes)s, bytes: %(bytes)s, sync_point1: %(point1)s, "
-"sync_point2: %(point2)s, total_rows: %(total)s"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Einzelthread-Scanvorgang für Containeraktualisierung abgeschlossen: "
-"%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler, %(no_change)s ohne "
-"Änderungen"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "Scanvorgang für Containeraktualisierung abgeschlossen: %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Scanvorgang für Containeraktualisierung von %(path)s abgeschlossen: "
-"%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler, %(no_change)s ohne "
-"Änderungen"
-
-#, python-format
-msgid "Could not autocreate account %r"
-msgstr "Kann das Konto %r nicht automatisch erstellen"
-
#, python-format
msgid ""
"Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds"
@@ -307,166 +122,17 @@ msgstr "%(conf)r konnte nicht geladen werden: %(error)s"
msgid "Data download error: %s"
msgstr "Fehler beim Downloaden von Daten: %s"
-#, python-format
-msgid "Directory %(directory)r does not map to a valid policy (%(error)s)"
-msgstr ""
-"Das Verzeichnis %(directory)r kann keiner gültigen Richtlinie (%(error)s) "
-"zugeordnet werden."
-
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "FEHLER %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "FEHLER %(status)d %(body)s von %(type)s Server"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "FEHLER %(status)d %(body)s Vom Objektserver bezüglich: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "FEHLER %(status)d Erwartet: 100-continue von Objektserver"
-
-#, python-format
-msgid "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server"
-msgstr "FEHLER %(status)d Versuch, %(method)s %(path)s von %(type)s Server"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"FEHLER Kontoaktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s "
-"(wird zu einem späteren Zeitpunkt erneut versucht): Antwort %(status)s "
-"%(reason)s"
-
-#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%(hosts)s\" vs \"%(devices)s\""
-msgstr ""
-"FEHLER Kontoaktualisierung fehlgeschlagen: Unterschiedliche Anzahl von Hosts "
-"und Einheiten in der Anforderung: \"%(hosts)s\" contra \"%(devices)s\""
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "FEHLER Client-Lesezeitüberschreitung (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"FEHLER Containeraktualisierung fehlgeschlagen (wird für asynchrone "
-"Aktualisierung zu einem späteren Zeitpunkt gespeichert): %(status)d Antwort "
-"von %(ip)s:%(port)s/%(dev)s"
-
-#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%(hosts)s\" vs \"%(devices)s\""
-msgstr ""
-"FEHLER Container Aktualisierung fehlgeschlagen: Unterschiedliche Anzahl von "
-"Hosts und Geräten in der Anfrage: \"%(hosts)s\" vs \"%(devices)s\""
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "FEHLER Kontoinfo %s konnte nicht abgerufen werden"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "FEHLER Containerinformation %s konnte nicht geholt werden"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr ""
-"FEHLER Fehler beim Schließen von DiskFile %(data_file)s: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr ""
-"FEHLER Ausnahme, die zu einer Unterbrechung der Verbindung zum Client führt"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr "FEHLER: Ausnahme bei der Übertragung von Daten an die Ojektserver %s"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "FEHLER Eigene IPs konnten nicht abgerufen werden?"
-msgid "ERROR Insufficient Storage"
-msgstr "FEHLER Nicht genügend Speicher"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr ""
-"FEHLER Objekt %(obj)s hat die Prüfung nicht bestanden und wurde unter "
-"Quarantäne gestellt: %(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "FEHLER Pickle-Problem, %s wird unter Quarantäne gestellt"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "FEHLER Entferntes Laufwerk nicht eingehängt %s"
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "FEHLER beim Synchronisieren %(db_file)s %(row)s"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "FEHLER beim Synchronisieren %s"
-
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "FEHLER beim Versuch, %s zu prüfen"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "FEHLER Nicht behandelte Ausnahme in Anforderung"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "FEHLER __call__-Fehler mit %(method)s %(path)s "
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"FEHLER Containeraktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s "
-"(wird zu einem späteren Zeitpunkt erneut versucht)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"FEHLER Kontoaktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s "
-"(wird später erneut versucht): "
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "FEHLER asynchrone anstehende Datei mit unerwartetem Namen %s"
-
msgid "ERROR auditing"
msgstr "FEHLER bei der Prüfung"
#, python-format
-msgid "ERROR auditing: %s"
-msgstr "FEHLER bei der Prüfung: %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"FEHLER Containeraktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(dev)s "
-"(wird für asynchrone Aktualisierung zu einem späteren Zeitpunkt gespeichert)"
-
-#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "FEHLER beim Lesen der HTTP-Antwort von %s"
@@ -487,47 +153,6 @@ msgid "ERROR trying to replicate"
msgstr "FEHLER beim Versuch zu replizieren"
#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "FEHLER beim Versuch, %s zu bereinigen"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr "FEHLER mit %(type)s Server %(ip)s:%(port)s/%(device)s AW: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "FEHLER beim Laden von Unterdrückungen von %s: "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "FEHLER mit entferntem Server %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr ""
-"FEHLER: Pfade zu Laufwerkpartitionen konnten nicht abgerufen werden: %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "FEHLER: Auf %(path)s kann nicht zugegriffen werden: %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "FEHLER: Prüfung konnte nicht durchgeführt werden: %s"
-
-#, python-format
-msgid ""
-"Error code %(status)d is returned from remote server %(ip)s: %(port)s / "
-"%(device)s"
-msgstr ""
-"Fehlercode %(status)d wurde vom entfernten Server %(ip)s:%(port)s / "
-"%(device)s zurück gegeben"
-
-#, python-format
-msgid "Error decoding fragments for %r"
-msgstr "Fehler beim Dekodieren von Fragmenten für %r"
-
-#, python-format
msgid "Error decrypting %(resp_type)s: %(reason)s"
msgstr "Fehler beim Entschlüsseln %(resp_type)s: %(reason)s"
@@ -543,9 +168,6 @@ msgstr "Fehler beim Entschlüsseln des header %(header)s: %(error)s"
msgid "Error decrypting object: %s"
msgstr "Fehler beim Entschlüsseln object: %s"
-msgid "Error hashing suffix"
-msgstr "Fehler beim Hashing des Suffix"
-
#, python-format
msgid "Error in %(conf)r with mtime_check_interval: %(error)s"
msgstr "Fehler in %(conf)r mit mtime_check_interval: %(error)s"
@@ -576,22 +198,6 @@ msgstr "Fehler beim Abrufen der recon-Daten"
msgid "Error sending UDP message to %(target)r: %(err)s"
msgstr "Fehler beim Senden von UDP Nachricht zu %(target)r: %(err)s"
-msgid "Error syncing handoff partition"
-msgstr "Fehler bei der Synchronisierung der Übergabepartition"
-
-msgid "Error syncing partition"
-msgstr "Fehler beim Syncen der Partition"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "Fehler beim Synchronisieren mit Knoten: %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-"Fehler bei Versuch, erneuten Build zu erstellen für %(path)s policy#"
-"%(policy)d frag#%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "Fehler: Ein Fehler ist aufgetreten"
@@ -603,49 +209,12 @@ msgid "Error: unable to locate %s"
msgstr "Fehler: %s kann nicht lokalisiert werden"
#, python-format
-msgid "Exception fetching fragments for %r"
-msgstr "Ausnahme beim Abrufen von Fragmenten für %r"
-
-msgid "Exception in top-level reconstruction loop"
-msgstr "Ausnahme in Replizierungsloop der höchsten Ebene"
-
-#, python-format
-msgid "Exception in top-level replication loop: %s"
-msgstr "Ausnahme in Replizierungsschleife der höchsten Ebene: %s"
-
-#, python-format
-msgid "Exception while deleting container %(account)s %(container)s %(err)s"
-msgstr "Ausnahme beim Löschen von Container %(account)s %(container)s %(err)s"
-
-#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "Ausnahme bei %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "Erwartet: 100-continue auf %s"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "CNAME-Kette für %(given_domain)s bis %(found_domain)s wird gefolgt"
msgid "Found configs:"
msgstr "Gefundene Konfigurationen:"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-"Der Modus 'handoffs_first' ist noch nicht abgeschlossen. Der aktuelle "
-"Replikationsdurchgang wird abgebrochen."
-
-msgid ""
-"Handoffs only mode still has handoffs remaining. Next pass will continue to "
-"revert handoffs."
-msgstr ""
-"Der Modus 'handoffs_first' ist noch nicht abgeschlossen. Der nächste "
-"Durchgang setzt die Übergaben fort."
-
msgid "Host unreachable"
msgstr "Host nicht erreichbar"
@@ -662,14 +231,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Ungültiger ausstehender Eintrag %(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "Ungültige Rückmeldung %(resp)s von %(full_path)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "Ungültige Rückmeldung %(resp)s von %(ip)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -681,17 +242,6 @@ msgid "Invalid swift_bytes"
msgstr "Ungültige swift_bytes"
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "Lange laufendes rsync wird gekillt: %s"
-
-#, python-format
-msgid "Loading JSON from %(auditor_status)s failed (%(err)s)"
-msgstr "Laden von JSON aus %(auditor_status)s fehlgeschlagen: (%(err)s)"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "Suche erkannt. Live-Coros werden gelöscht."
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s zugeordnet zu %(found_domain)s"
@@ -722,166 +272,14 @@ msgstr "Keine Richtlinie mit Index %s"
msgid "No realm key for %r"
msgstr "Kein Bereichsschlüssel für %r"
-#, python-format
-msgid "No space left on device for %(file)s (%(err)s)"
-msgstr "Kein freier Speicherplatz im Gerät für %(file)s (%(err)s) vorhanden."
-
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "Knotenfehler begrenzt %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "Es wurden nicht genügend Objektserver bestätigt (got %d)."
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"Nicht gefunden %(sync_from)r => %(sync_to)r - Objekt "
-"%(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "Für %s Sekunden nichts rekonstruiert."
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "Für %s Sekunden nichts repliziert."
-
-msgid "Object"
-msgstr "Objekt"
-
-msgid "Object PUT"
-msgstr "Objekt PUT"
-
-#, python-format
-msgid ""
-"Object PUT exceptions after last send, %(conns)s/%(nodes)s required "
-"connections"
-msgstr ""
-"Objekt PUT Ausnahme nach letztem Senden, %(conns)s/%(nodes)s erfordert eine "
-"Verbindung"
-
-#, python-format
-msgid ""
-"Object PUT exceptions during send, %(conns)s/%(nodes)s required connections"
-msgstr ""
-"Objekt PUT Ausnahme beim Senden %(conns)s/%(nodes)s erfordern eine Verbindung"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-"PUT-Operation für ein Objekt gibt 202 für 409 zurück: %(req_timestamp)s <= "
-"%(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "Objekt PUT Rückgabe 412, %(statuses)r"
-
-#, python-format
-msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections"
-msgstr ""
-"Objekt PUT gibt 503 zurück, %(conns)s/%(nodes)s erfordert eine Verbindung"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Objektprüfung (%(type)s) \"%(mode)s\" Modus abgeschlossen: %(elapsed).02fs. "
-"Unter Quarantäne gestellt insgesamt: %(quars)d, Fehler insgesamt: "
-"%(errors)d, Dateien/s insgesamt: %(frate).2f, Bytes/s insgesamt: "
-"%(brate).2f, Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Objektprüfung (%(type)s). Seit %(start_time)s: Lokal: %(passes)d übergeben, "
-"%(quars)d unter Quarantäne gestellt, %(errors)d Fehler, Dateien/s: "
-"%(frate).2f, Bytes/s: %(brate).2f, Zeit insgesamt: %(total).2f, "
-"Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "Objektprüfungsstatistik: %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "Objektrekonstruktion vollständig (einmal). (%.02f Minuten)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "Objektrekonstruktion vollständig. (%.02f Minuten)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "Objektreplizierung abgeschlossen (einmal). (%.02f Minuten)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "Objektreplikation vollständig. (%.02f Minuten)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "Objektserver haben %s nicht übereinstimmende Etags zurückgegeben"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "Scanvorgang für Objektaktualisierung abgeschlossen: %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr ""
"Parameter, Abfragen und Fragmente nicht zulässig in X-Container-Sync-To"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr ""
-"Partitionszeiten: max. %(max).4fs, min. %(min).4fs, durchschnittl. %(med).4fs"
-
-#, python-format
-msgid "Pass completed in %(time)ds; %(objects)d objects expired"
-msgstr "Durchgänge abgeschlossen in %(time)ds; %(objects)d Objekte abgelaufen"
-
-#, python-format
-msgid "Pass so far %(time)ds; %(objects)d objects expired"
-msgstr "Bisherige Durchgänge %(time)ds; %(objects)d Objekte abgelaufen"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Pfad in X-Container-Sync-To ist erforderlich"
#, python-format
-msgid "Pipeline is \"%s\""
-msgstr "Pipeline ist \"%s\""
-
-#, python-format
-msgid "Pipeline was modified. New pipeline is \"%s\"."
-msgstr "Pipeline wurde geändert. Neue Pipeline ist \"%s\"."
-
-#, python-format
-msgid "Problem checking EC fragment %(datadir)s: %(err)s"
-msgstr "Problemüberprüfung EC Fragment %(datadir)s: %(err)s"
-
-#, python-format
-msgid "Problem cleaning up %(datadir)s (%(err)s)"
-msgstr "Problem bei der Bereinigung von %(datadir)s (%(err)s)"
-
-#, python-format
-msgid "Problem cleaning up %s"
-msgstr "Problem bei der Bereinigung von %s"
-
-#, python-format
-msgid "Problem with fragment response: %s"
-msgstr "Problem mit Antwort von Fragment: %s"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "Fehler bei der Profilerstellung: %s"
@@ -892,19 +290,6 @@ msgstr ""
"verschoben."
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"%(hsh_path)s bis %(quar_path)s wurden unter Quarantäne gestellt, da es sich "
-"nicht um ein Verzeichnis handelt"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"%(object_path)s bis %(quar_path)s wurden unter Quarantäne gestellt, da es "
-"sich nicht um ein Verzeichnis handelt"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "Datenbank %s wird unter Quarantäne gestellt"
@@ -919,14 +304,6 @@ msgid "Removed %(remove)d dbs"
msgstr "%(remove)d Datenbanken entfernt"
#, python-format
-msgid "Removing %s objects"
-msgstr "%s Objekte werden entfernt"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "Partition wird entfernt: %s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "PID-Datei %(pid_file)s mit falscher PID %(pid)d wird entfernt"
@@ -946,60 +323,14 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "497 wird aufgrund von Blacklisting zurückgegeben: %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"498 wird für %(meth)s auf %(acc)s/%(cont)s/%(obj)s zurückgegeben. "
-"Geschwindigkeitsbegrenzung (Max. Inaktivität) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr ""
-"Ringänderung erkannt. Aktueller Rekonstruktionsdurchgang wird abgebrochen."
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr ""
-"Ringänderung erkannt. Aktueller Replizierungsdurchlauf wird abgebrochen."
-
-#, python-format
msgid "Running %s once"
msgstr "%s läuft einmal"
-msgid "Running object reconstructor in script mode."
-msgstr "Objektrekonstruktor läuft im Skriptmodus."
-
-msgid "Running object replicator in script mode."
-msgstr "Objektreplikator läuft im Skriptmodus."
-
#, python-format
msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s"
msgstr "Signal %(server)s PID: %(pid)s Signal: %(signal)s"
#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"Seit %(time)s: %(sync)s synchronisiert [%(delete)s Löschungen, %(put)s "
-"Puts], %(skip)s übersprungen, %(fail)s fehlgeschlagen"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr ""
-"Seit %(time)s: Kontoprüfungen: %(passed)s bestandene Prüfung,%(failed)s "
-"nicht bestandene Prüfung"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"Seit %(time)s: Containerprüfungen: %(pass)s bestandene Prüfung, %(fail)s "
-"nicht bestandene Prüfung"
-
-#, python-format
msgid "Skipping %(datadir)s because %(err)s"
msgstr "Überspringe %(datadir)s aufgrund %(err)s"
@@ -1011,67 +342,9 @@ msgstr "%s wird übersprungen, weil es nicht eingehängt ist"
msgid "Starting %s"
msgstr "%s wird gestartet"
-msgid "Starting object reconstruction pass."
-msgstr "Objektrekonstruktionsdurchgang wird gestartet."
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "Objektrekonstruktor wird im Daemon-Modus gestartet."
-
-msgid "Starting object replication pass."
-msgstr "Objektreplikationsdurchgang wird gestartet."
-
-msgid "Starting object replicator in daemon mode."
-msgstr "Objektreplikator wird im Dämonmodus gestartet."
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "Erfolgreiches rsync von %(src)s um %(dst)s (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "Auf den Dateityp darf nicht zugegriffen werden!"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"Die Gesamtsumme an %(key)s für den Container (%(total)s) entspricht nicht "
-"der Summe der %(key)s für alle Richtlinien (%(sum)s)"
-
-#, python-format
-msgid "Timeout fetching fragments for %r"
-msgstr "Zeitüberschreitung beim Abrufen von Fragmenten für %r"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "Versuch, %(method)s %(path)s"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "Versuch, %(full_path)s mit GET abzurufen"
-
-#, python-format
-msgid "Trying to get %(status_type)s status of PUT to %(path)s"
-msgstr ""
-"Es wird versucht, %(status_type)s Status von PUT für %(path)s abzurufen"
-
-msgid "Trying to read during GET"
-msgstr "Versuch, während des GET-Vorgangs zu lesen"
-
-msgid "Trying to read object during GET (retrying)"
-msgstr "Versuch, während des GET-Vorgangs zu lesen (Wiederholung)"
-
-msgid "Trying to send to client"
-msgstr "Versuch, an den Client zu senden"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "Es wird versucht, Suffixe mit %s zu synchronisieren."
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "Versuch, an %s zu schreiben"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "NICHT ABGEFANGENE AUSNAHME"
@@ -1081,12 +354,6 @@ msgstr ""
"%(section)s-Konfigurationsabschnitt in %(conf)s kann nicht gefunden werden"
#, python-format
-msgid "Unable to load internal client from config: %(conf)r (%(error)s)"
-msgstr ""
-"Interner Client konnte nicht aus der Konfiguration geladen werden: %(conf)r "
-"(%(error)s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr ""
"%s konnte nicht in libc gefunden werden. Wird als Nullbefehl verlassen."
@@ -1113,29 +380,6 @@ msgstr "fsync() kann für Verzeichnis %(dir)s nicht ausgeführt werden: %(err)s"
msgid "Unable to read config from %s"
msgstr "Konfiguration aus %s kann nicht gelesen werden"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "Nicht genehmigte %(sync_from)r => %(sync_to)r"
-
-#, python-format
-msgid ""
-"Unexpected fragment data type (not quarantined) %(datadir)s: %(type)s at "
-"offset 0x%(offset)x"
-msgstr ""
-"Unerwarteter Fragment Datentyp (nicht unter Quarantäne) %(datadir)s: "
-"%(type)s at offset 0x%(offset)x"
-
-msgid "Unhandled exception"
-msgstr "Nicht behandelte Exception"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "Aktualisierungsbericht fehlgeschlagen für %(container)s %(dbfile)s"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "Aktualisierungsbericht gesendet für %(container)s %(dbfile)s"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
@@ -1191,10 +435,6 @@ msgstr ""
"durchgeführt werden"
#, python-format
-msgid "autocreate account %r"
-msgstr "Automatisch erstelltes Konto %r"
-
-#, python-format
msgid "method %s is not allowed."
msgstr "Methode %s ist nicht erlaubt."
diff --git a/swift/locale/en_GB/LC_MESSAGES/swift.po b/swift/locale/en_GB/LC_MESSAGES/swift.po
index d8c92ce4a..232f9ead0 100644
--- a/swift/locale/en_GB/LC_MESSAGES/swift.po
+++ b/swift/locale/en_GB/LC_MESSAGES/swift.po
@@ -4,19 +4,18 @@
#
# Translators:
# Andi Chandler <andi@gowling.com>, 2016. #zanata
-# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2019. #zanata
-# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-10-10 02:32+0000\n"
+"PO-Revision-Date: 2022-06-01 06:49+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language: en_GB\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
@@ -32,44 +31,6 @@ msgstr ""
"user quit"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - parallel, %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-
-#, python-format
-msgid "%(replication_ip)s/%(device)s responded as unmounted"
-msgstr "%(replication_ip)s/%(device)s responded as unmounted"
-
-#, python-format
msgid "%(server)s #%(number)d not running (%(conf)s)"
msgstr "%(server)s #%(number)d not running (%(conf)s)"
@@ -86,12 +47,16 @@ msgid "%(server)s running (%(pid)s - %(pid_file)s)"
msgstr "%(server)s running (%(pid)s - %(pid_file)s)"
#, python-format
-msgid "%(success)s successes, %(failure)s failures"
-msgstr "%(success)s successes, %(failure)s failures"
+msgid "%(server_type)s audit \"once\" mode completed: %(elapsed).02fs"
+msgstr "%(server_type)s audit \"once\" mode completed: %(elapsed).02fs"
+
+#, python-format
+msgid "%(server_type)s audit pass completed: %(elapsed).02fs"
+msgstr "%(server_type)s audit pass completed: %(elapsed).02fs"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s returning 503 for %(statuses)s"
+msgid "%(success)s successes, %(failure)s failures"
+msgstr "%(success)s successes, %(failure)s failures"
#, python-format
msgid "%(type)s: %(value)s"
@@ -106,34 +71,9 @@ msgid "%s does not exist"
msgstr "%s does not exist"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s is not mounted"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s responded as unmounted"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s: Connection reset by peer"
-msgid "Account"
-msgstr "Account"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "Account audit \"once\" mode completed: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "Account audit pass completed: %.02fs"
-
-#, python-format
-msgid ""
-"Adding required filter %(filter_name)s to pipeline at position %(insert_at)d"
-msgstr ""
-"Adding required filter %(filter_name)s to pipeline at position %(insert_at)d"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
@@ -145,51 +85,21 @@ msgid "Audit Failed for %(path)s: %(err)s"
msgstr "Audit Failed for %(path)s: %(err)s"
#, python-format
-msgid "Audit passed for %s"
-msgstr "Audit passed for %s"
-
-#, python-format
msgid "Bad key for %(name)r: %(err)s"
msgstr "Bad key for %(name)r: %(err)s"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Bad rsync return code: %(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "Begin account audit \"once\" mode"
-
-msgid "Begin account audit pass."
-msgstr "Begin account audit pass."
-
-msgid "Begin container audit \"once\" mode"
-msgstr "Begin container audit \"once\" mode"
-
-msgid "Begin container audit pass."
-msgstr "Begin container audit pass."
+msgid "Begin {} audit \"once\" mode"
+msgstr "Begin {} audit \"once\" mode"
-msgid "Begin container sync \"once\" mode"
-msgstr "Begin container sync \"once\" mode"
-
-msgid "Begin container update single threaded sweep"
-msgstr "Begin container update single threaded sweep"
-
-msgid "Begin container update sweep"
-msgstr "Begin container update sweep"
-
-#, python-format
-msgid "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)"
-msgstr "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)"
-
-msgid "Begin object update single threaded sweep"
-msgstr "Begin object update single threaded sweep"
-
-msgid "Begin object update sweep"
-msgstr "Begin object update sweep"
+msgid "Begin {} audit pass."
+msgstr "Begin {} audit pass."
msgid "Beginning replication run"
msgstr "Beginning replication run"
+msgid "Broken pipe"
+msgstr "Broken pipe"
+
msgid "Broker error trying to rollback locked connection"
msgstr "Broker error trying to rollback locked connection"
@@ -201,37 +111,6 @@ msgstr "Can not access the file %s."
msgid "Can not load profile data from %s."
msgstr "Can not load profile data from %s."
-#, python-format
-msgid "Cannot read %(auditor_status)s (%(err)s)"
-msgstr "Cannot read %(auditor_status)s (%(err)s)"
-
-#, python-format
-msgid "Cannot write %(auditor_status)s (%(err)s)"
-msgstr "Cannot write %(auditor_status)s (%(err)s)"
-
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "Client did not read from proxy within %ss"
-
-msgid "Client disconnected without sending enough data"
-msgstr "Client disconnected without sending enough data"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "Client disconnected without sending last chunk"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-
msgid "Connection refused"
msgstr "Connection refused"
@@ -241,57 +120,6 @@ msgstr "Connection reset"
msgid "Connection timeout"
msgstr "Connection timeout"
-msgid "Container"
-msgstr "Container"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "Container audit \"once\" mode completed: %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "Container audit pass completed: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "Container sync \"once\" mode completed: %.02fs"
-
-#, python-format
-msgid ""
-"Container sync report: %(container)s, time window start: %(start)s, time "
-"window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, "
-"bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, "
-"total_rows: %(total)s"
-msgstr ""
-"Container sync report: %(container)s, time window start: %(start)s, time "
-"window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, "
-"bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, "
-"total_rows: %(total)s"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "Container update sweep completed: %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-
-#, python-format
-msgid "Could not autocreate account %r"
-msgstr "Could not autocreate account %r"
-
#, python-format
msgid ""
"Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds"
@@ -310,115 +138,17 @@ msgid "Did not get a keys dict"
msgstr "Did not get a keys dict"
#, python-format
-msgid "Directory %(directory)r does not map to a valid policy (%(error)s)"
-msgstr "Directory %(directory)r does not map to a valid policy (%(error)s)"
-
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "ERROR %(status)d %(body)s From %(type)s Server"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server"
-msgstr "ERROR %(status)d %(body)s From Object Server"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "ERROR %(status)d Expect: 100-continue From Object Server"
-
-#, python-format
-msgid "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server"
-msgstr "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-
-#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%(hosts)s\" vs \"%(devices)s\""
-msgstr ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%(hosts)s\" vs \"%(devices)s\""
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "ERROR Client read timeout (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-
-#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%(hosts)s\" vs \"%(devices)s\""
-msgstr ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%(hosts)s\" vs \"%(devices)s\""
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "ERROR Could not get account info %s"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "ERROR Could not get container info %s"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "ERROR Exception causing client disconnect"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr "ERROR Exception transferring data to object servers %s"
+msgid "ERROR Could not get %(server_type)s info %(path)s"
+msgstr "ERROR Could not get %(server_type)s info %(path)s"
msgid "ERROR Failed to get my own IPs?"
msgstr "ERROR Failed to get my own IPs?"
-msgid "ERROR Insufficient Storage"
-msgstr "ERROR Insufficient Storage"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "ERROR Pickle problem, quarantining %s"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "ERROR Remote drive not mounted %s"
#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "ERROR Syncing %(db_file)s %(row)s"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "ERROR Syncing %s"
-
-#, python-format
msgid ""
"ERROR There are not enough handoff nodes to reach replica count for "
"partition %s"
@@ -426,52 +156,9 @@ msgstr ""
"ERROR There are not enough hand-off nodes to reach replica count for "
"partition %s"
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "ERROR Trying to audit %s"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "ERROR Unhandled exception in request"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "ERROR __call__ error with %(method)s %(path)s "
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "ERROR async pending file with unexpected name %s"
-
msgid "ERROR auditing"
msgstr "ERROR auditing"
-#, python-format
-msgid "ERROR auditing: %s"
-msgstr "ERROR auditing: %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-
msgid "ERROR get_keys() missing callback"
msgstr "ERROR get_keys() missing callback"
@@ -495,46 +182,6 @@ msgid "ERROR trying to replicate"
msgstr "ERROR trying to replicate"
#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "ERROR while trying to clean up %s"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "ERROR with loading suppressions from %s: "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "ERROR: Failed to get paths to drive partitions: %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "ERROR: Unable to access %(path)s: %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "ERROR: Unable to run auditing: %s"
-
-#, python-format
-msgid ""
-"Error code %(status)d is returned from remote server %(ip)s: %(port)s / "
-"%(device)s"
-msgstr ""
-"Error code %(status)d is returned from remote server %(ip)s: %(port)s / "
-"%(device)s"
-
-#, python-format
-msgid "Error decoding fragments for %r"
-msgstr "Error decoding fragments for %r"
-
-#, python-format
msgid "Error decrypting %(resp_type)s: %(reason)s"
msgstr "Error decrypting %(resp_type)s: %(reason)s"
@@ -550,9 +197,6 @@ msgstr "Error decrypting header %(header)s: %(error)s"
msgid "Error decrypting object: %s"
msgstr "Error decrypting object: %s"
-msgid "Error hashing suffix"
-msgstr "Error hashing suffix"
-
#, python-format
msgid "Error in %(conf)r with mtime_check_interval: %(error)s"
msgstr "Error in %(conf)r with mtime_check_interval: %(error)s"
@@ -583,20 +227,6 @@ msgstr "Error retrieving recon data"
msgid "Error sending UDP message to %(target)r: %(err)s"
msgstr "Error sending UDP message to %(target)r: %(err)s"
-msgid "Error syncing handoff partition"
-msgstr "Error syncing hand-off partition"
-
-msgid "Error syncing partition"
-msgstr "Error syncing partition"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "Error syncing with node: %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "Error: An error occurred"
@@ -608,56 +238,12 @@ msgid "Error: unable to locate %s"
msgstr "Error: unable to locate %s"
#, python-format
-msgid "Exception fetching fragments for %r"
-msgstr "Exception fetching fragments for %r"
-
-msgid "Exception in top-level reconstruction loop"
-msgstr "Exception in top-level reconstruction loop"
-
-#, python-format
-msgid "Exception in top-level replication loop: %s"
-msgstr "Exception in top-level replication loop: %s"
-
-#, python-format
-msgid "Exception while deleting container %(account)s %(container)s %(err)s"
-msgstr "Exception while deleting container %(account)s %(container)s %(err)s"
-
-#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "Exception with %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "Expect: 100-continue on %s"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgid "Found configs:"
msgstr "Found configs:"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-"Hand-offs first mode still has hand-offs remaining. Aborting current "
-"replication pass."
-
-msgid ""
-"Handoffs only mode found no handoffs remaining. You should disable "
-"handoffs_only once all nodes are reporting no handoffs remaining."
-msgstr ""
-"Hand-offs only mode found no hand-offs remaining. You should disable "
-"handoffs_only once all nodes are reporting no hand-offs remaining."
-
-msgid ""
-"Handoffs only mode still has handoffs remaining. Next pass will continue to "
-"revert handoffs."
-msgstr ""
-"Hand-offs only mode still has hand-offs remaining. Next pass will continue "
-"to revert hand-offs."
-
msgid "Host unreachable"
msgstr "Host unreachable"
@@ -674,14 +260,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Invalid pending entry %(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "Invalid response %(resp)s from %(full_path)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "Invalid response %(resp)s from %(ip)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -693,17 +271,6 @@ msgid "Invalid swift_bytes"
msgstr "Invalid swift_bytes"
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "Killing long-running rsync: %s"
-
-#, python-format
-msgid "Loading JSON from %(auditor_status)s failed (%(err)s)"
-msgstr "Loading JSON from %(auditor_status)s failed (%(err)s)"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "Lockup detected.. killing live coros."
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "Mapped %(given_domain)s to %(found_domain)s"
@@ -737,173 +304,13 @@ msgstr "No policy with index %s"
msgid "No realm key for %r"
msgstr "No realm key for %r"
-#, python-format
-msgid "No space left on device for %(file)s (%(err)s)"
-msgstr "No space left on device for %(file)s (%(err)s)"
-
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "Node error limited %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "Not enough object servers ack'ed (got %d)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "Nothing reconstructed for %s seconds."
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "Nothing replicated for %s seconds."
-
-msgid "Object"
-msgstr "Object"
-
-msgid "Object PUT"
-msgstr "Object PUT"
-
-#, python-format
-msgid ""
-"Object PUT exceptions after last send, %(conns)s/%(nodes)s required "
-"connections"
-msgstr ""
-"Object PUT exceptions after last send, %(conns)s/%(nodes)s required "
-"connections"
-
-#, python-format
-msgid ""
-"Object PUT exceptions during send, %(conns)s/%(nodes)s required connections"
-msgstr ""
-"Object PUT exceptions during send, %(conns)s/%(nodes)s required connections"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "Object PUT returning 412, %(statuses)r"
-
-#, python-format
-msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections"
-msgstr "Object PUT returning 503, %(conns)s/%(nodes)s required connections"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "Object audit stats: %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "Object reconstruction complete (once). (%.02f minutes)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "Object reconstruction complete. (%.02f minutes)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "Object replication complete (once). (%.02f minutes)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "Object replication complete. (%.02f minutes)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "Object servers returned %s mismatched etags"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "Object update sweep completed: %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "Params, queries, and fragments not allowed in X-Container-Sync-To"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-
-#, python-format
-msgid ""
-"Pass beginning for task account %(account)s; %(container_count)s possible "
-"containers; %(obj_count)s possible objects"
-msgstr ""
-"Pass beginning for task account %(account)s; %(container_count)s possible "
-"containers; %(obj_count)s possible objects"
-
-#, python-format
-msgid "Pass completed in %(time)ds; %(objects)d objects expired"
-msgstr "Pass completed in %(time)ds; %(objects)d objects expired"
-
-#, python-format
-msgid "Pass so far %(time)ds; %(objects)d objects expired"
-msgstr "Pass so far %(time)ds; %(objects)d objects expired"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Path required in X-Container-Sync-To"
#, python-format
-msgid "Pipeline is \"%s\""
-msgstr "Pipeline is \"%s\""
-
-#, python-format
-msgid "Pipeline was modified. New pipeline is \"%s\"."
-msgstr "Pipeline was modified. New pipeline is \"%s\"."
-
-#, python-format
-msgid "Problem checking EC fragment %(datadir)s: %(err)s"
-msgstr "Problem checking EC fragment %(datadir)s: %(err)s"
-
-#, python-format
-msgid "Problem cleaning up %(datadir)s (%(err)s)"
-msgstr "Problem cleaning up %(datadir)s (%(err)s)"
-
-#, python-format
-msgid "Problem cleaning up %s"
-msgstr "Problem cleaning up %s"
-
-#, python-format
-msgid "Problem making data file durable %(file)s (%(err)s)"
-msgstr "Problem making data file durable %(file)s (%(err)s)"
-
-#, python-format
-msgid "Problem with fragment response: %s"
-msgstr "Problem with fragment response: %s"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "Profiling Error: %s"
@@ -912,17 +319,6 @@ msgid "Quarantined %(db_dir)s to %(quar_path)s due to %(reason)s"
msgstr "Quarantined %(db_dir)s to %(quar_path)s due to %(reason)s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "Quarantining DB %s"
@@ -936,14 +332,6 @@ msgid "Removed %(remove)d dbs"
msgstr "Removed %(remove)d dbs"
#, python-format
-msgid "Removing %s objects"
-msgstr "Removing %s objects"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "Removing partition: %s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "Removing pid file %(pid_file)s with wrong pid %(pid)d"
@@ -963,56 +351,24 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "Returning 497 because of blacklisting: %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr "Ring change detected. Aborting current reconstruction pass."
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr "Ring change detected. Aborting current replication pass."
+msgid "Returning 498 for %(meth)s to %(path)s. Ratelimit (Max Sleep) %(e)s"
+msgstr "Returning 498 for %(meth)s to %(path)s. Ratelimit (Max Sleep) %(e)s"
#, python-format
msgid "Running %s once"
msgstr "Running %s once"
-msgid "Running object reconstructor in script mode."
-msgstr "Running object reconstructor in script mode."
-
-msgid "Running object replicator in script mode."
-msgstr "Running object replicator in script mode."
-
#, python-format
msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s"
msgstr "Signal %(server)s pid: %(pid)s signal: %(signal)s"
#, python-format
msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
+"Since %(time)s: %(server_type)s audits: %(pass)s passed audit, %(fail)s "
+"failed audit"
msgstr ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
+"Since %(time)s: %(server_type)s audits: %(pass)s passed audit, %(fail)s "
+"failed audit"
#, python-format
msgid "Skipping %(datadir)s because %(err)s"
@@ -1026,66 +382,9 @@ msgstr "Skipping %s as it is not mounted"
msgid "Starting %s"
msgstr "Starting %s"
-msgid "Starting object reconstruction pass."
-msgstr "Starting object reconstruction pass."
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "Starting object reconstructor in daemon mode."
-
-msgid "Starting object replication pass."
-msgstr "Starting object replication pass."
-
-msgid "Starting object replicator in daemon mode."
-msgstr "Starting object replicator in daemon mode."
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "The file type are forbidden to access!"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-
-#, python-format
-msgid "Timeout fetching fragments for %r"
-msgstr "Timeout fetching fragments for %r"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "Trying to %(method)s %(path)s"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "Trying to GET %(full_path)s"
-
-#, python-format
-msgid "Trying to get %(status_type)s status of PUT to %(path)s"
-msgstr "Trying to get %(status_type)s status of PUT to %(path)s"
-
-msgid "Trying to read during GET"
-msgstr "Trying to read during GET"
-
-msgid "Trying to read object during GET (retrying)"
-msgstr "Trying to read object during GET (retrying)"
-
-msgid "Trying to send to client"
-msgstr "Trying to send to client"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "Trying to sync suffixes with %s"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "Trying to write to %s"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "UNCAUGHT EXCEPTION"
@@ -1094,10 +393,6 @@ msgid "Unable to find %(section)s config section in %(conf)s"
msgstr "Unable to find %(section)s config section in %(conf)s"
#, python-format
-msgid "Unable to load internal client from config: %(conf)r (%(error)s)"
-msgstr "Unable to load internal client from config: %(conf)r (%(error)s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "Unable to locate %s in libc. Leaving as a no-op."
@@ -1122,29 +417,6 @@ msgstr "Unable to perform fsync() on directory %(dir)s: %(err)s"
msgid "Unable to read config from %s"
msgstr "Unable to read config from %s"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "Unauth %(sync_from)r => %(sync_to)r"
-
-#, python-format
-msgid ""
-"Unexpected fragment data type (not quarantined) %(datadir)s: %(type)s at "
-"offset 0x%(offset)x"
-msgstr ""
-"Unexpected fragment data type (not quarantined) %(datadir)s: %(type)s at "
-"offset 0x%(offset)x"
-
-msgid "Unhandled exception"
-msgstr "Unhandled exception"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "Update report failed for %(container)s %(dbfile)s"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "Update report sent for %(container)s %(dbfile)s"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
@@ -1194,17 +466,9 @@ msgid "Warning: Cannot ratelimit without a memcached client"
msgstr "Warning: Cannot ratelimit without a memcached client"
#, python-format
-msgid "autocreate account %r"
-msgstr "autocreate account %r"
-
-#, python-format
msgid "method %s is not allowed."
msgstr "method %s is not allowed."
-#, python-format
-msgid "next_part_power set in policy '%s'. Skipping"
-msgstr "next_part_power set in policy '%s'. Skipping"
-
msgid "no log file found"
msgstr "no log file found"
@@ -1217,11 +481,3 @@ msgstr "plotting results failed due to %s"
msgid "python-matplotlib not installed."
msgstr "python-matplotlib not installed."
-
-#, python-format
-msgid ""
-"sorting_method is set to '%(method)s', not 'affinity'; %(label)s "
-"read_affinity setting will have no effect."
-msgstr ""
-"sorting_method is set to '%(method)s', not 'affinity'; %(label)s "
-"read_affinity setting will have no effect."
diff --git a/swift/locale/es/LC_MESSAGES/swift.po b/swift/locale/es/LC_MESSAGES/swift.po
index ff606a2ef..2d8fc7134 100644
--- a/swift/locale/es/LC_MESSAGES/swift.po
+++ b/swift/locale/es/LC_MESSAGES/swift.po
@@ -9,11 +9,11 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2016-07-18 10:57+0000\n"
+"PO-Revision-Date: 2016-07-18 10:20+0000\n"
"Last-Translator: Pablo Caruana <pcaruana@redhat.com>\n"
"Language: es\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
@@ -29,33 +29,6 @@ msgstr ""
"salida del usuario"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - paralelo, %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"%(checked)d sufijos comprobados - %(hashed).2f%% con hash, %(synced).2f%% "
-"sincronizados"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) particiones replicadas en "
-"%(time).2fs (%(rate).2f/segundo, %(remaining)s restantes)"
-
-#, python-format
msgid "%(server)s #%(number)d not running (%(conf)s)"
msgstr "%(server)s #%(number)d not running (%(conf)s)"
@@ -76,10 +49,6 @@ msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s éxitos, %(failure)s fallos"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s devuelve 503 para %(statuses)s"
-
-#, python-format
msgid "%(type)s: %(value)s"
msgstr "%(type)s: %(value)s"
@@ -92,28 +61,9 @@ msgid "%s does not exist"
msgstr "%s no existe"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s no está montado"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s ha respondido como desmontado"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s: Restablecimiento de conexión por igual"
-msgid "Account"
-msgstr "Cuenta"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "Auditoría de cuenta en modalidad de \"una vez\" finalizada: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "Paso de auditoría de cuenta finalizado: %.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
@@ -129,43 +79,6 @@ msgstr "Ha fallado la auditoría para %(path)s: %(err)s"
msgid "Bad key for %(name)r: %(err)s"
msgstr "Clave errónea para %(name)r: %(err)s"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Código de retorno de resincronización erróneo: %(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "Comenzar auditoría de cuenta en modalidad de \"una vez\""
-
-msgid "Begin account audit pass."
-msgstr "Comenzar a pasar la auditoría de cuenta."
-
-msgid "Begin container audit \"once\" mode"
-msgstr "Comenzar auditoría de contenedor en modalidad de \"una vez\""
-
-msgid "Begin container audit pass."
-msgstr "Comenzar a pasar la auditoría de contenedor."
-
-msgid "Begin container sync \"once\" mode"
-msgstr "Comenzar sincronización de contenedor en modalidad de \"una vez\""
-
-msgid "Begin container update single threaded sweep"
-msgstr "Comenzar el barrido de hebra única de actualización del contenedor"
-
-msgid "Begin container update sweep"
-msgstr "Comenzar el barrido de actualización del contenedor"
-
-#, python-format
-msgid "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)"
-msgstr ""
-"Comenzar auditoría de objetos en modalidad \"%(mode)s\" mode (%(audi_type)s"
-"%(description)s)"
-
-msgid "Begin object update single threaded sweep"
-msgstr "Comenzar el barrido de hebra única de actualización del objeto"
-
-msgid "Begin object update sweep"
-msgstr "Comenzar el barrido de actualización del objeto"
-
msgid "Beginning replication run"
msgstr "Iniciando la ejecución de la replicación"
@@ -180,96 +93,12 @@ msgstr "No se puede acceder al archivo %s."
msgid "Can not load profile data from %s."
msgstr "No se pueden cargar los datos de perfil desde %s."
-#, python-format
-msgid "Cannot read %(auditor_status)s (%(err)s)"
-msgstr "No se puede leer %(auditor_status)s (%(err)s)"
-
-#, python-format
-msgid "Cannot write %(auditor_status)s (%(err)s)"
-msgstr "No se puede escribir %(auditor_status)s (%(err)s)"
-
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "El cliente pudo realizar la lectura desde el proxy en %ss"
-
-msgid "Client disconnected without sending enough data"
-msgstr "El cliente se ha desconectado sin enviar suficientes datos"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "El cliente se ha desconectado sin enviar el último fragmento"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"La vía de acceso de cliente %(client)s no coincide con la vía de acceso "
-"almacenada en los metadatos de objeto %(meta)s"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"La opción de configuración internal_client_conf_path no está definida. Se "
-"utilizará la configuración predeterminada, Consulte internal-client.conf-"
-"sample para ver las opciones"
-
msgid "Connection refused"
msgstr "Conexión rechazada"
msgid "Connection timeout"
msgstr "Tiempo de espera de conexión agotado"
-msgid "Container"
-msgstr "Contenedor"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "Auditoría de contenedor en modalidad de \"una vez\" finalizada: %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "Paso de auditoría de contenedor finalizado: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr ""
-"Sincronización de contenedor en modalidad de \"una vez\" finalizada: %.02fs"
-
-#, python-format
-msgid ""
-"Container sync report: %(container)s, time window start: %(start)s, time "
-"window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, "
-"bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, "
-"total_rows: %(total)s"
-msgstr ""
-"Informe de sincronización de contenedores: %(container)s, inicio de la "
-"ventana de tiempo: %(start)s, extremo ventana de tiempo: %(end)s, "
-"colocaciones: %(puts)s, publicaciones:: %(posts)s, eliminados: %(deletes)s, "
-"bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, "
-"total_filas: %(total)s"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Barrido de hebra única de actualización del contenedor finalizado: "
-"%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos, %(no_change)s sin "
-"cambios"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "Barrido de actualización del contenedor finalizado: %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Barrido de actualización del contenedor de %(path)s finalizado: "
-"%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos, %(no_change)s sin "
-"cambios"
-
#, python-format
msgid ""
"Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds"
@@ -288,166 +117,16 @@ msgstr "Error de descarga de datos: %s"
msgid "Did not get a keys dict"
msgstr "No tuvimos un diccionario de claves"
-#, python-format
-msgid "Directory %(directory)r does not map to a valid policy (%(error)s)"
-msgstr ""
-"El directorio %(directory)r no está correlacionado con una política válida "
-"(%(error)s)"
-
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "ERROR %(status)d %(body)s Desde el servidor %(type)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "ERROR %(status)d %(body)s Desde el servidor de objeto re: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "ERROR %(status)d Esperado: 100-continuo Desde el servidor de objeto"
-
-#, python-format
-msgid "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server"
-msgstr ""
-"ERROR %(status)d Intentando %(method)s %(path)s Desde %(type)s de "
-"Servidor"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/"
-"%(device)s (se volverá a intentar más tarde): Respuesta %(status)s %(reason)s"
-
-#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%(hosts)s\" vs \"%(devices)s\""
-msgstr ""
-"ERROR La actualización de la cuenta ha fallado: hay números distintos de "
-"hosts y dispositivos en la solicitud: %(hosts)s\" frente a %(devices)s\""
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "ERROR Tiempo de espera de lectura de cliente agotado (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"ERROR La actualización del contenedor ha fallado (guardando para una "
-"actualización asíncrona posterior): %(status)d respuesta desde %(ip)s:"
-"%(port)s/%(dev)s"
-
-#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%(hosts)s\" vs \"%(devices)s\""
-msgstr ""
-"ERROR La actualización de la cuenta ha fallado: hay números distintos de "
-"hosts y dispositivos en la solicitud: %(hosts)s\" frente a %(devices)s\""
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "ERROR No se ha podido obtener la información de cuenta %s"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "ERROR No se ha podido obtener la información de contenedor %s"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr ""
-"ERROR Fallo al cerrar el archivo de disco %(data_file)s: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "ERROR Excepción que provoca la desconexión del cliente"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr "ERROR Excepción al transferir datos a los servidores de objetos %s"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "ERROR ¿No puedo obtener mis propias IP?"
-msgid "ERROR Insufficient Storage"
-msgstr "ERROR No hay suficiente almacenamiento"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr ""
-"ERROR La auditoría del objeto %(obj)s ha fallado y se ha puesto en "
-"cuarentena: %(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "ERROR Problema de desorden, poniendo %s en cuarentena"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "ERROR Unidad remota no montada %s"
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "ERROR al sincronizar %(db_file)s %(row)s"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "ERROR al sincronizar %s"
-
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "ERROR al intentar la auditoría de %s"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "ERROR Excepción no controlada en la solicitud"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "ERROR Error de __call__ con %(method)s %(path)s "
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/"
-"%(device)s (se volverá a intentar más tarde)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/"
-"%(device)s (se volverá a intentar más tarde): "
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr ""
-"ERROR Archivo pendiente de sincronización asíncrona con nombre inesperado %s"
-
msgid "ERROR auditing"
msgstr "ERROR de auditoría"
-#, python-format
-msgid "ERROR auditing: %s"
-msgstr "ERROR en la auditoría: %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"ERROR La actualización del contenedor ha fallado con %(ip)s:%(port)s/%(dev)s "
-"(guardando para una actualización asíncrona posterior)"
-
msgid "ERROR get_keys() missing callback"
msgstr "ERROR get_keys() No se proporciona devolución de llamada "
@@ -471,39 +150,6 @@ msgid "ERROR trying to replicate"
msgstr "ERROR al intentar la replicación"
#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "ERROR al intentar limpiar %s"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr "ERROR con el servidor %(type)s %(ip)s:%(port)s/%(device)s re: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "ERROR con las supresiones de carga desde %s: "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "ERROR con el servidor remoto %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr ""
-"ERROR: No se han podido obtener las vías de acceso a las particiones de "
-"unidad: %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "ERROR: no se ha podido acceder a %(path)s: %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "ERROR: no se ha podido ejecutar la auditoría: %s"
-
-msgid "Error hashing suffix"
-msgstr "Error en el hash del sufijo"
-
-#, python-format
msgid "Error in %(conf)r with mtime_check_interval: %(error)s"
msgstr "Error en %(conf)r con mtime_check_interval: %(error)s"
@@ -529,21 +175,6 @@ msgstr "Error al leer swift.conf"
msgid "Error retrieving recon data"
msgstr "Error al recuperar los datos de recon"
-msgid "Error syncing handoff partition"
-msgstr "Error al sincronizar la partición de transferencia"
-
-msgid "Error syncing partition"
-msgstr "Error al sincronizar la partición"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "Error en la sincronización con el nodo: %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-"Error al intentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "Error: se ha producido un error"
@@ -555,27 +186,12 @@ msgid "Error: unable to locate %s"
msgstr "Error: no se ha podido localizar %s"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "Excepción con %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "Esperado: 100-continuo en %s"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "Siguiente cadena CNAME de %(given_domain)s a %(found_domain)s"
msgid "Found configs:"
msgstr "Configuraciones encontradas:"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-"El modo de transferencias primero aún tiene transferencias restantes. "
-"Abortando el pase de réplica actual."
-
msgid "Host unreachable"
msgstr "Host no alcanzable"
@@ -592,14 +208,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Entrada pendiente no válida %(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "Respuesta no válida %(resp)s de %(full_path)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "Respuesta no válida %(resp)s desde %(ip)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -608,17 +216,6 @@ msgstr ""
"\"https\"."
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "Interrumpiendo resincronización (rsync) de larga duración: %s"
-
-#, python-format
-msgid "Loading JSON from %(auditor_status)s failed (%(err)s)"
-msgstr "Error al cargar JSON desde %(auditor_status)s falla (%(err)s)"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "Bloqueo detectado. Interrumpiendo coros activos."
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "Se ha correlacionado %(given_domain)s con %(found_domain)s"
@@ -646,168 +243,18 @@ msgstr "No hay ninguna política que tenga el índice %s"
msgid "No realm key for %r"
msgstr "No hay clave de dominio para %r"
-#, python-format
-msgid "No space left on device for %(file)s (%(err)s)"
-msgstr "No queda espacio libre en el dispositivo para %(file)s (%(err)s)"
-
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "Error de nodo limitado %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "No hay suficientes servidores de objetos reconocidos (constan %d)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"No se ha encontrado %(sync_from)r => %(sync_to)r - "
-"objeto %(obj_name)rd"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "No se ha reconstruido nada durante %s segundos."
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "No se ha replicado nada durante %s segundos."
-
-msgid "Object"
-msgstr "Objeto"
-
-msgid "Object PUT"
-msgstr "Objeto PUT"
-
-#, python-format
-msgid ""
-"Object PUT exceptions after last send, %(conns)s/%(nodes)s required "
-"connections"
-msgstr ""
-"excepciones de objeto PUT después de la última emisión, %(conns)s/%(nodes)s "
-"conexiones requeridas"
-
-#, python-format
-msgid ""
-"Object PUT exceptions during send, %(conns)s/%(nodes)s required connections"
-msgstr ""
-"excepciones de objeto PUT después de la última emisión, %(conns)s/%(nodes)s "
-"conexiones requeridas"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-"El objeto PUT devuelve 202 para 409: %(req_timestamp)s <= %(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "El objeto PUT devuelve 412, %(statuses)r"
-
-#, python-format
-msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections"
-msgstr "Retorno de objecto PUT 503, %(conns)s/%(nodes)s conexiones requeridas"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Auditoría de objetos (%(type)s) en modalidad \"%(mode)s\" finalizada: "
-"%(elapsed).02fs. Total en cuarentena: %(quars)d, Errores totales: "
-"%(errors)d, Archivos totales por segundo: %(frate).2f, Bytes totales por "
-"segundo: %(brate).2f, Tiempo de auditoría: %(audit).2f, Velocidad: "
-"%(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Auditoría de objetos (%(type)s). Desde %(start_time)s: Localmente: "
-"%(passes)d han pasado, %(quars)d en cuarentena, %(errors)d errores, archivos "
-"por segundo: %(frate).2f , bytes por segundo: %(brate).2f, Tiempo total: "
-"%(total).2f, Tiempo de auditoría: %(audit).2f, Velocidad: %(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "Estadísticas de auditoría de objetos: %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "Reconstrucción de objeto finalizada (una vez). (%.02f minutos)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "Reconstrucción de objeto finalizada. (%.02f minutos)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "Réplica de objeto finalizada (una vez). (%.02f minutos)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "Réplica de objeto finalizada. (%.02f minutos)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr ""
-"Los servidores de objeto han devuelvo %s etiquetas (etags) no coincidentes"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "Barrido de actualización del objeto finalizado: %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr ""
"Parámetros, consultas y fragmentos no permitidos en X-Container-Sync-To"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr ""
-"Tiempos de partición: máximo %(max).4fs, mínimo %(min).4fs, medio %(med).4fs"
-
-#, python-format
-msgid "Pass completed in %(time)ds; %(objects)d objects expired"
-msgstr "Paso completado en %(time)ds; %(objects)d objetos caducados"
-
-#, python-format
-msgid "Pass so far %(time)ds; %(objects)d objects expired"
-msgstr "Paso hasta ahora%(time)ds; %(objects)d objetos caducados"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Vía de acceso necesaria en X-Container-Sync-To"
#, python-format
-msgid "Problem cleaning up %(datadir)s (%(err)s)"
-msgstr "Problema al limpiar %(datadir)s (%(err)s)"
-
-#, python-format
-msgid "Problem cleaning up %s"
-msgstr "Problema al limpiar %s"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "Error de perfil: %s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"Se ha puesto en cuarentena %(hsh_path)s en %(quar_path)s debido a que no es "
-"un directorio"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"Se ha puesto en cuarentena %(object_path)s en %(quar_path)s debido a que no "
-"es un directorio"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "Poniendo en cuarentena la base de datos %s"
@@ -822,14 +269,6 @@ msgid "Removed %(remove)d dbs"
msgstr "Se han eliminado %(remove)d bases de datos"
#, python-format
-msgid "Removing %s objects"
-msgstr "Eliminando %s objetos"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "Eliminando partición: %s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr ""
"Eliminando el archivo PID %(pid_file)s que tiene el PID no válido %(pid)d"
@@ -850,59 +289,14 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "Se devuelven 497 debido a las listas negras: %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"Se devuelven 498 de %(meth)s a %(acc)s/%(cont)s/%(obj)s. Ajuste de límite "
-"(suspensión máxima) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr ""
-"Cambio de anillo detectado. Abortando el pase de reconstrucción actual."
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr "Cambio de anillo detectado. Abortando el pase de réplica actual."
-
-#, python-format
msgid "Running %s once"
msgstr "Ejecutando %s una vez"
-msgid "Running object reconstructor in script mode."
-msgstr "Ejecutando reconstructor de objeto en modo script."
-
-msgid "Running object replicator in script mode."
-msgstr "Ejecutando replicador de objeto en modalidad de script."
-
#, python-format
msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s"
msgstr "Señal %(server)s pid: %(pid)s Señal : %(signal)s"
#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"Desde %(time)s: %(sync)s se han sincronizado [%(delete)s supresiones, "
-"%(put)s colocaciones], %(skip)s se han omitido, %(fail)s han fallado"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr ""
-"Desde %(time)s: Auditorías de cuenta: %(passed)s han pasado la auditoría,"
-"%(failed)s han fallado la auditoría"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"Desde %(time)s: Auditorías de contenedor: %(pass)s han pasado la auditoría,"
-"%(fail)s han fallado la auditoría"
-
-#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "Omitiendo %s, ya que no está montado"
@@ -910,60 +304,9 @@ msgstr "Omitiendo %s, ya que no está montado"
msgid "Starting %s"
msgstr "Iniciando %s"
-msgid "Starting object reconstruction pass."
-msgstr "Iniciando el paso de reconstrucción de objeto."
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "Iniciando reconstructor de objeto en modo daemon."
-
-msgid "Starting object replication pass."
-msgstr "Iniciando el paso de réplica de objeto."
-
-msgid "Starting object replicator in daemon mode."
-msgstr "Iniciando replicador de objeto en modalidad de daemon."
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr ""
-"Resincronización de %(src)s realizada con éxito en %(dst)s (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "El acceso al tipo de archivo está prohibido."
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"El total de %(key)s del contenedor (%(total)s) no coincide con la suma de "
-"%(key)s en las políticas (%(sum)s)"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "Intentando %(method)s %(path)s"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "Intentando hacer un GET de %(full_path)s"
-
-#, python-format
-msgid "Trying to get %(status_type)s status of PUT to %(path)s"
-msgstr "Intentando obtener %(status_type)s el estado de PUT a %(path)s"
-
-msgid "Trying to read during GET"
-msgstr "Intentado leer durante GET"
-
-msgid "Trying to send to client"
-msgstr "Intentando enviar al cliente"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "Intentando sincronizar los sufijos con %s"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "Intentando escribir en %s"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "UNCAUGHT EXCEPTION"
@@ -972,12 +315,6 @@ msgid "Unable to find %(section)s config section in %(conf)s"
msgstr "No se ha podido encontrar %(section)s de la configuración en %(conf)s"
#, python-format
-msgid "Unable to load internal client from config: %(conf)r (%(error)s)"
-msgstr ""
-"No se puede cargar el cliente interno a partir de la configuración: %(conf)r "
-"(%(error)s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "No se ha podido localizar %s en libc. Se dejará como no operativo."
@@ -1004,21 +341,6 @@ msgstr "No se puede realizar fsync() en el directorio %(dir)s: %(err)s"
msgid "Unable to read config from %s"
msgstr "No se ha podido leer la configuración de %s"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "%(sync_from)r => %(sync_to)r sin autorización"
-
-msgid "Unhandled exception"
-msgstr "Excepción no controlada"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "Informe de actualización fallido para %(container)s %(dbfile)s"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "Informe de actualización enviado para %(container)s %(dbfile)s"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/locale/fr/LC_MESSAGES/swift.po b/swift/locale/fr/LC_MESSAGES/swift.po
index d8253d7f1..8b5ec6473 100644
--- a/swift/locale/fr/LC_MESSAGES/swift.po
+++ b/swift/locale/fr/LC_MESSAGES/swift.po
@@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -29,41 +29,10 @@ msgstr ""
"l'utilisateur quitte le programme"
#, python-format
-msgid " - %s"
-msgstr "- %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr "- parallel, %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"%(checked)d suffixe(s) vérifié(s) - %(hashed).2f%% haché(s), %(synced).2f%% "
-"synchronisé(s)"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions répliquées en "
-"%(time).2fs (%(rate).2f/sec ; %(remaining)s restante(s))"
-
-#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s succès, %(failure)s échec(s)"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s : renvoi de l'erreur 503 pour %(statuses)s"
-
-#, python-format
msgid "%s already started..."
msgstr "%s déjà démarré..."
@@ -72,28 +41,9 @@ msgid "%s does not exist"
msgstr "%s n'existe pas"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s n'est pas monté"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s ont été identifié(es) comme étant démonté(es)"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s : Connexion réinitialisée par l'homologue"
-msgid "Account"
-msgstr "Compte"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "Audit de compte en mode \"Once\" terminé : %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "Session d'audit de compte terminée : %.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
@@ -101,39 +51,6 @@ msgstr ""
"Tentative de réplication de %(count)d bases de données en %(time).5f "
"secondes (%(rate).5f/s)"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Code retour Rsync non valide : %(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "Démarrer l'audit de compte en mode \"Once\" (une fois)"
-
-msgid "Begin account audit pass."
-msgstr "Démarrer la session d'audit de compte."
-
-msgid "Begin container audit \"once\" mode"
-msgstr "Démarrer l'audit de conteneur en mode \"Once\" (une fois)"
-
-msgid "Begin container audit pass."
-msgstr "Démarrer la session d'audit de conteneur."
-
-msgid "Begin container sync \"once\" mode"
-msgstr "Démarrer la synchronisation de conteneurs en mode \"Once\" (une fois)"
-
-msgid "Begin container update single threaded sweep"
-msgstr ""
-"Démarrer le balayage des mises à jour du conteneur (unité d'exécution unique)"
-
-msgid "Begin container update sweep"
-msgstr "Démarrer le balayage des mises à jour du conteneur"
-
-msgid "Begin object update single threaded sweep"
-msgstr ""
-"Démarrer le balayage des mises à jour d'objet (unité d'exécution unique)"
-
-msgid "Begin object update sweep"
-msgstr "Démarrer le balayage des mises à jour d'objet"
-
msgid "Beginning replication run"
msgstr "Démarrage du cycle de réplication"
@@ -150,215 +67,27 @@ msgstr "Ne peut pas accéder au fichier %s."
msgid "Can not load profile data from %s."
msgstr "Impossible de charger des données de profil depuis %s."
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "Le client n'a pas lu les données du proxy en %s s"
-
-msgid "Client disconnected without sending enough data"
-msgstr "Client déconnecté avant l'envoi de toutes les données requises"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "Le client a été déconnecté avant l'envoi du dernier bloc"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"Le chemin d'accès au client %(client)s ne correspond pas au chemin stocké "
-"dans les métadonnées d'objet %(meta)s"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"L'option de configuration internal_client_conf_path n'a pas été définie. La "
-"configuration par défaut est utilisée. Consultez les options dans internal-"
-"client.conf-sample."
-
msgid "Connection refused"
msgstr "Connexion refusée"
msgid "Connection timeout"
msgstr "Dépassement du délai d'attente de connexion"
-msgid "Container"
-msgstr "Conteneur"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "Audit de conteneur en mode \"Once\" terminé : %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "Session d'audit de conteneur terminée : %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "Synchronisation de conteneurs en mode \"Once\" terminée : %.02fs"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Le balayage des mises à jour du conteneur (unité d'exécution unique) est "
-"terminé : %(elapsed).02fs, %(success)s succès, %(fail)s échec(s), "
-"%(no_change)s inchangé(s)"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "Le balayage des mises à jour du conteneur est terminé : %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Le balayage des mises à jour du conteneur (%(path)s) est terminé : "
-"%(elapsed).02fs, %(success)s succès, %(fail)s échec(s), %(no_change)s "
-"inchangé(s)"
-
#, python-format
msgid "Data download error: %s"
msgstr "Erreur de téléchargement des données: %s"
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "ERREUR %(db_file)s : %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "ERREUR %(status)d %(body)s depuis le serveur %(type)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "ERREUR %(status)d %(body)s depuis le serveur d'objets. Réf. : %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr ""
-"ERREUR %(status)d Attendu(s) : 100 - poursuivre depuis le serveur d'objets"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s "
-"(une nouvelle tentative sera effectuée ultérieurement). Réponse %(status)s "
-"%(reason)s"
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "ERREUR Dépassement du délai de lecture du client (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"ERREUR Echec de la mise à jour du conteneur (sauvegarde pour mise à jour "
-"asynchrone ultérieure) : réponse %(status)d renvoyée par %(ip)s:%(port)s/"
-"%(dev)s"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "ERREUR Impossible d'obtenir les infos de compte %s"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "ERREUR Impossible d'obtenir les infos de conteneur %s"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr ""
-"ERREUR Incident de fermeture du fichier disque %(data_file)s : %(exc)s : "
-"%(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "ERREUR Exception entraînant la déconnexion du client"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr ""
-"ERREUR Exception lors du transfert de données vers des serveurs d'objets %s"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "ERREUR Obtention impossible de mes propres adresses IP ?"
-msgid "ERROR Insufficient Storage"
-msgstr "ERREUR Stockage insuffisant"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr ""
-"ERREUR L'objet %(obj)s a échoué à l'audit et a été en quarantaine : %(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "ERREUR Problème lié à Pickle. Mise en quarantaine de %s"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "ERREUR Unité distante %s non montée"
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "ERREUR lors de la synchronisation de %(db_file)s %(row)s"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "ERREUR lors de la synchronisation de %s"
-
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "ERREUR lors de la tentative d'audit de %s"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "ERREUR Exception non gérée dans la demande"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "ERROR __call__ error sur %(method)s %(path)s "
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s "
-"(une nouvelle tentative sera effectuée ultérieurement)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s "
-"(une nouvelle tentative sera effectuée ultérieurement) : "
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr ""
-"ERREUR Le fichier des mises à jour asynchrones en attente porte un nom "
-"inattendu %s"
-
msgid "ERROR auditing"
msgstr "Erreur d'audit"
#, python-format
-msgid "ERROR auditing: %s"
-msgstr "ERREUR d'audit : %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"ERREUR Echec de la mise à jour du conteneur avec %(ip)s:%(port)s/%(dev)s "
-"(sauvegarde pour mise à jour asynchrone ultérieure)"
-
-#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "Erreur de lecture de la réponse HTTP depuis %s"
@@ -377,39 +106,6 @@ msgstr "ERREUR de synchronisation de %(file)s avec le noeud %(node)s"
msgid "ERROR trying to replicate"
msgstr "ERREUR lors de la tentative de réplication"
-#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "ERREUR pendant le nettoyage %s"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr ""
-"ERREUR liée au serveur %(type)s %(ip)s:%(port)s/%(device)s. Réf. : %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "ERREUR de chargement des suppressions de %s : "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "ERREUR liée au serveur distant %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr ""
-"ERREUR : Echec de l'obtention des chemins d'accès aux partitions d'unité : %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "ERREUR : Impossible d'accéder à %(path)s : %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "ERREUR : Impossible d'exécuter l'audit : %s"
-
-msgid "Error hashing suffix"
-msgstr "Erreur suffixe hashing"
-
msgid "Error listing devices"
msgstr "Erreur lors du listage des unités"
@@ -432,22 +128,6 @@ msgstr "Erreur de lecture de swift.conf"
msgid "Error retrieving recon data"
msgstr "Erreur lors de l'extraction des données Recon"
-msgid "Error syncing handoff partition"
-msgstr "Erreur lors de la synchronisation de la partition de transfert"
-
-msgid "Error syncing partition"
-msgstr "Erreur de synchronisation de la partition"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "Erreur de synchronisation avec le noeud : %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-"Une erreur est survenue lors de la tentative de régénération de %(path)s "
-"policy#%(policy)d frag#%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "Erreur : une erreur s'est produite"
@@ -459,14 +139,6 @@ msgid "Error: unable to locate %s"
msgstr "Erreur: impossible de localiser %s"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "Exception liée à %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "Attendus(s) : 100 - poursuivre sur %s"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr ""
"Suivi de la chaîne CNAME pour %(given_domain)s jusqu'à %(found_domain)s"
@@ -474,13 +146,6 @@ msgstr ""
msgid "Found configs:"
msgstr "Configurations trouvées :"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-"Le premier mode de transferts contient d'autres transferts. Abandon de la "
-"session de réplication en cours."
-
msgid "Host unreachable"
msgstr "Hôte inaccessible"
@@ -497,14 +162,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Entrée en attente non valide %(file)s : %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "Réponse %(resp)s non valide de %(full_path)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "Réponse %(resp)s non valide de %(ip)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -513,13 +170,6 @@ msgstr ""
"\"https\"."
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "Arrêt de l'opération Rsync à exécution longue : %s"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "Blocage détecté. Arrêt des coroutines actives."
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s mappé avec %(found_domain)s"
@@ -539,140 +189,17 @@ msgstr "Aucune statégie avec un index de type %s"
msgid "No realm key for %r"
msgstr "Aucune clé de domaine pour %r"
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr ""
-"Noeud marqué avec limite d'erreurs (error_limited) %(ip)s:%(port)s "
-"(%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr ""
-"Le nombre de serveurs d'objets reconnus n'est pas suffisant (%d obtenus)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"Introuvable : %(sync_from)r => %(sync_to)r - objet "
-"%(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "Aucun élément reconstruit pendant %s secondes."
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "Aucun élément répliqué pendant %s secondes."
-
-msgid "Object"
-msgstr "Objet"
-
-msgid "Object PUT"
-msgstr "Object PUT"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-"L'opération d'insertion (PUT) d'objet a renvoyé l'erreur 202 pour 409 : "
-"%(req_timestamp)s <= %(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr ""
-"L'opération d'insertion (PUT) d'objet a renvoyé l'erreur 412. %(statuses)r"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"L'audit d'objet (%(type)s) en mode \"%(mode)s\" est terminé : "
-"%(elapsed).02fs. Nombre total mis en quarantaine : %(quars)d. Nombre total "
-"d'erreurs : %(errors)d. Nombre total de fichiers/sec : %(frate).2f. Nombre "
-"total d'octets/sec : %(brate).2f. Durée d'audit : %(audit).2f. Taux : "
-"%(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Audit d'objet (%(type)s). Depuis %(start_time)s, localement : %(passes)d "
-"succès. %(quars)d en quarantaine. %(errors)d erreurs. Fichiers/sec : "
-"%(frate).2f. Octets/sec : %(brate).2f. Durée totale : %(total).2f. Durée "
-"d'audit : %(audit).2f. Taux : %(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "Statistiques de l'audit d'objet : %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr ""
-"La reconstruction d'objet en mode Once (une fois) est terminée. (%.02f "
-"minutes)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "Reconstruction d'objet terminée. (%.02f minutes)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr ""
-"La réplication d'objet en mode Once (une fois) est terminée. (%.02f minutes)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "Réplication d'objet terminée. (%.02f minutes)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "Des serveurs d'objets ont renvoyé %s en-têtes Etag non concordants"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "Le balayage des mises à jour d'objet est terminé : %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "Paramètres, requêtes et fragments interdits dans X-Container-Sync-To"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr ""
-"Temps de partition : maximum %(max).4fs, minimum %(min).4fs, moyenne "
-"%(med).4fs"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Chemin requis dans X-Container-Sync-To"
#, python-format
-msgid "Problem cleaning up %s"
-msgstr "Problème lors du nettoyage de %s"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "Erreur de profilage : %s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"%(hsh_path)s n'est pas un répertoire et a donc été mis en quarantaine dans "
-"%(quar_path)s"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"%(object_path)s n'est pas un répertoire et a donc été mis en quarantaine "
-"dans %(quar_path)s"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "Mise en quarantaine de la base de données %s"
@@ -687,14 +214,6 @@ msgid "Removed %(remove)d dbs"
msgstr "%(remove)d bases de données ont été retirées"
#, python-format
-msgid "Removing %s objects"
-msgstr "Suppression de %s objets"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "Suppression partition: %s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr ""
"Supression du fichier PID %(pid_file)s, comportant un PID incorrect %(pid)d"
@@ -715,54 +234,9 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "Renvoi de 497 en raison du placement sur liste noire : %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"Renvoi de 498 pour %(meth)s jusqu'à %(acc)s/%(cont)s/%(obj)s . Ratelimit "
-"(Max Sleep) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr ""
-"Changement d'anneau détecté. Abandon de la session de reconstruction en "
-"cours."
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr ""
-"Changement d'anneau détecté. Abandon de la session de réplication en cours."
-
-#, python-format
msgid "Running %s once"
msgstr "Exécution unique de %s"
-msgid "Running object reconstructor in script mode."
-msgstr "Exécution du reconstructeur d'objet en mode script."
-
-msgid "Running object replicator in script mode."
-msgstr "Exécution du réplicateur d'objet en mode script."
-
-#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"Depuis %(time)s : %(sync)s synchronisé(s) [%(delete)s suppression(s), "
-"%(put)s insertion(s)], %(skip)s ignoré(s), %(fail)s échec(s)"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr ""
-"Depuis %(time)s : audits de compte : %(passed)s succès, %(failed)s échec(s)"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"Depuis %(time)s : audits de conteneur : %(pass)s succès, %(fail)s échec(s)"
-
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "%s est ignoré car il n'est pas monté"
@@ -771,55 +245,9 @@ msgstr "%s est ignoré car il n'est pas monté"
msgid "Starting %s"
msgstr "Démarrage %s"
-msgid "Starting object reconstruction pass."
-msgstr "Démarrage de la session de reconstruction d'objet."
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "Démarrage du reconstructeur d'objet en mode démon."
-
-msgid "Starting object replication pass."
-msgstr "Démarrage de la session de réplication d'objet."
-
-msgid "Starting object replicator in daemon mode."
-msgstr "Démarrage du réplicateur d'objet en mode démon."
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "Succès de Rsync pour %(src)s dans %(dst)s (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "Accès interdit au type de fichier"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"Le total %(key)s du conteneur (%(total)s) ne correspond pas à la somme des "
-"clés %(key)s des différentes règles (%(sum)s)"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "Tentative d'exécution de %(method)s %(path)s"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "Tentative de lecture de %(full_path)s"
-
-msgid "Trying to read during GET"
-msgstr "Tentative de lecture pendant une opération GET"
-
-msgid "Trying to send to client"
-msgstr "Tentative d'envoi au client"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "Tentative de synchronisation de suffixes à l'aide de %s"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "Tentative d'écriture sur %s"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "EXCEPTION NON INTERCEPTEE"
@@ -842,21 +270,6 @@ msgstr ""
msgid "Unable to read config from %s"
msgstr "Impossible de lire le fichier de configuration depuis %s"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "Non autorisé : %(sync_from)r => %(sync_to)r"
-
-msgid "Unhandled exception"
-msgstr "Exception non prise en charge"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "Echec du rapport de mise à jour pour %(container)s %(dbfile)s"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "Rapport de mise à jour envoyé pour %(container)s %(dbfile)s"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/locale/it/LC_MESSAGES/swift.po b/swift/locale/it/LC_MESSAGES/swift.po
index f9b864783..5983d2eb1 100644
--- a/swift/locale/it/LC_MESSAGES/swift.po
+++ b/swift/locale/it/LC_MESSAGES/swift.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -28,41 +28,10 @@ msgstr ""
"l'utente è uscito"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - parallelo, %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"%(checked)d suffissi controllati - %(hashed).2f%% con hash, %(synced).2f%% "
-"sincronizzati"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partizioni replicate in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s rimanenti)"
-
-#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s operazioni con esito positivo, %(failure)s errori"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s restituisce 503 per %(statuses)s"
-
-#, python-format
msgid "%s already started..."
msgstr "%s già avviato..."
@@ -71,28 +40,9 @@ msgid "%s does not exist"
msgstr "%s non esiste"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s non è montato"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s ha risposto come smontato"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s: Connessione reimpostata dal peer"
-msgid "Account"
-msgstr "Conto"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "Modalità \"once\" verifica account completata: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "Trasmissione verifica account completata: %.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
@@ -100,37 +50,6 @@ msgstr ""
"È stato eseguito un tentativo di replicare %(count)d dbs in %(time).5f "
"secondi (%(rate).5f/s)"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Codice di ritorno rsync errato: %(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "Avvio modalità \"once\" verifica account"
-
-msgid "Begin account audit pass."
-msgstr "Avvio trasmissione verifica account."
-
-msgid "Begin container audit \"once\" mode"
-msgstr "Avvio modalità \"once\" verifica contenitore"
-
-msgid "Begin container audit pass."
-msgstr "Avvio trasmissione verifica contenitore."
-
-msgid "Begin container sync \"once\" mode"
-msgstr "Avvio della modalità \"once\" di sincronizzazione contenitore"
-
-msgid "Begin container update single threaded sweep"
-msgstr "Avvio pulizia a singolo thread aggiornamento contenitore"
-
-msgid "Begin container update sweep"
-msgstr "Avvio pulizia aggiornamento contenitore"
-
-msgid "Begin object update single threaded sweep"
-msgstr "Avvio pulizia a singolo thread aggiornamento oggetto"
-
-msgid "Begin object update sweep"
-msgstr "Avvio pulizia aggiornamento oggetto"
-
msgid "Beginning replication run"
msgstr "Avvio replica"
@@ -147,212 +66,27 @@ msgstr "Impossibile accedere al file %s."
msgid "Can not load profile data from %s."
msgstr "Impossibile caricare i dati del profilo da %s."
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "Il client non ha eseguito la lettura dal proxy in %ss"
-
-msgid "Client disconnected without sending enough data"
-msgstr "Client disconnesso senza inviare dati sufficienti"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "Client disconnesso senza inviare l'ultima porzione"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"Il percorso del client %(client)s non corrisponde al percorso memorizzato "
-"nei metadati dell'oggetto %(meta)s"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"Opzione di configurazione internal_client_conf_path non definita. Viene "
-"utilizzata la configurazione predefinita, vedere l'esempio internal-client."
-"conf-sample per le opzioni"
-
msgid "Connection refused"
msgstr "Connessione rifiutata"
msgid "Connection timeout"
msgstr "Timeout della connessione"
-msgid "Container"
-msgstr "Contenitore"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "Modalità \"once\" verifica contenitore completata: %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "Trasmissione verifica contenitore completata: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr ""
-"Modalità \"once\" di sincronizzazione del contenitore completata: %.02fs"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Pulizia a singolo thread aggiornamento contenitore completata: "
-"%(elapsed).02fs, %(success)s operazioni con esito positivo, %(fail)s errori, "
-"%(no_change)s senza modifiche"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "Pulizia aggiornamento contenitore completata: %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Pulizia aggiornamento contenitore di %(path)s completata: %(elapsed).02fs, "
-"%(success)s operazioni con esito positivo, %(fail)s errori, %(no_change)s "
-"senza modifiche"
-
#, python-format
msgid "Data download error: %s"
msgstr "Errore di download dei dati: %s"
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "ERRORE %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "ERRORE %(status)d %(body)s dal server %(type)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "ERRORE %(status)d %(body)s Dal server degli oggetti re: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "ERRORE %(status)d Previsto: 100-continue dal server degli oggetti"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"ERRORE Aggiornamento dell'account non riuscito con %(ip)s:%(port)s/"
-"%(device)s (verrà eseguito un nuovo tentativo successivamente): Risposta "
-"%(status)s %(reason)s"
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "ERRORE Timeout di lettura del client (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"ERRORE Aggiornamento del contenitore non riuscito (salvataggio per "
-"l'aggiornamento asincrono successivamente): %(status)d risposta da %(ip)s:"
-"%(port)s/%(dev)s"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "ERRORE Impossibile ottenere le informazioni sull'account %s"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "ERRORE Impossibile ottenere le informazioni sul contenitore %s"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr "ERRORE Errore di chiusura DiskFile %(data_file)s: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "ERRORE Eccezione che causa la disconnessione del client"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr ""
-"ERRORE Eccezione durante il trasferimento di dati nel server degli oggetti %s"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "ERRORE Impossibile ottenere i propri IP?"
-msgid "ERROR Insufficient Storage"
-msgstr "ERRORE Memoria insufficiente"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr ""
-"ERRORE L'oggetto %(obj)s non ha superato la verifica ed è stato inserito "
-"nella quarantena: %(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "ERRORE Problema relativo a pickle, inserimento di %s nella quarantena"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "ERRORE Unità remota non montata %s"
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "ERRORE durante la sincronizzazione di %(db_file)s %(row)s"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "ERRORE durante la sincronizzazione di %s"
-
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "ERRORE durante il tentativo di eseguire la verifica %s"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "ERRORE Eccezione non gestita nella richiesta"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "ERRORE errore __call__ con %(method)s %(path)s "
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"ERRORE aggiornamento dell'account non riuscito con %(ip)s:%(port)s/"
-"%(device)s (verrà eseguito un nuovo tentativo successivamente)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"ERRORE aggiornamento dell'account non riuscito con %(ip)s:%(port)s/"
-"%(device)s (verrà eseguito un nuovo tentativo successivamente): "
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "ERRORE file in sospeso asincrono con nome non previsto %s"
-
msgid "ERROR auditing"
msgstr "ERRORE durante la verifica"
#, python-format
-msgid "ERROR auditing: %s"
-msgstr "ERRORE durante la verifica: %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"ERRORE aggiornamento del contenitore non riuscito con %(ip)s:%(port)s/"
-"%(dev)s (salvataggio per aggiornamento asincrono successivamente)"
-
-#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "ERRORE durante la lettura della risposta HTTP da %s"
@@ -371,38 +105,6 @@ msgstr "ERRORE durante la sincronizzazione di %(file)s con il nodo %(node)s"
msgid "ERROR trying to replicate"
msgstr "ERRORE durante il tentativo di eseguire la replica"
-#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "ERRORE durante il tentativo di ripulire %s"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr ""
-"ERRORE relativo al server %(type)s %(ip)s:%(port)s/%(device)s re: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "ERRORE relativo al caricamento delle eliminazioni da %s: "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "ERRORE relativo al server remoto %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "ERRORE: Impossibile ottenere i percorsi per gestire le partizioni: %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "ERRORE: Impossibile accedere a %(path)s: %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "ERRORE: Impossibile eseguire la verifica: %s"
-
-msgid "Error hashing suffix"
-msgstr "Errore durante l'hash del suffisso"
-
msgid "Error listing devices"
msgstr "Errore durante l'elenco dei dispositivi"
@@ -427,22 +129,6 @@ msgstr "Errore durante la lettura di swift.conf"
msgid "Error retrieving recon data"
msgstr "Errore durante il richiamo dei dati di riconoscimento"
-msgid "Error syncing handoff partition"
-msgstr "Errore durante la sincronizzazione della partizione di passaggio"
-
-msgid "Error syncing partition"
-msgstr "Errore durante la sincronizzazione della partizione"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "Errore durante la sincronizzazione con il nodo: %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-"Errore nel tentativo di ricreare %(path)s policy#%(policy)d frag#"
-"%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "Errore: Si è verificato un errore"
@@ -454,14 +140,6 @@ msgid "Error: unable to locate %s"
msgstr "Errore: impossibile individuare %s"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "Eccezione relativa a %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "Previsto: 100-continue su %s"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr ""
"Viene seguita la catena CNAME per %(given_domain)s verso %(found_domain)s"
@@ -469,13 +147,6 @@ msgstr ""
msgid "Found configs:"
msgstr "Configurazioni trovate:"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-"Nella prima modalità di passaggio ci sono ancora passaggi restanti. "
-"Interruzione del passaggio di replica corrente."
-
msgid "Host unreachable"
msgstr "Host non raggiungibile"
@@ -492,14 +163,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Voce in sospeso non valida %(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "Risposta non valida %(resp)s da %(full_path)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "Risposta non valida %(resp)s da %(ip)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -508,13 +171,6 @@ msgstr ""
"oppure \"https\"."
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "Chiusura rsync ad elaborazione prolungata: %s"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "Blocco rilevato... chiusura dei coros attivi."
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s associato a %(found_domain)s"
@@ -534,128 +190,17 @@ msgstr "Nessuna politica con indice %s"
msgid "No realm key for %r"
msgstr "Nessuna chiave dell'area di autenticazione per %r"
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "Errore del nodo limitato %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "Server degli oggetti riconosciuti non sufficienti (got %d)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr "%(sync_from)r => %(sync_to)r non trovato - oggetto %(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "Nessun elemento ricostruito per %s secondi."
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "Nessun elemento replicato per %s secondi."
-
-msgid "Object"
-msgstr "Oggetto"
-
-msgid "Object PUT"
-msgstr "PUT dell'oggetto"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-"Il PUT dell'oggetto ha restituito 202 per 409: %(req_timestamp)s <= "
-"%(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "Il PUT dell'oggetto ha restituito 412, %(statuses)r"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Modalità \"%(mode)s\" (%(type)s) verifica oggetto completata: "
-"%(elapsed).02fs. Totale in quarantena: %(quars)d, Totale errori: %(errors)d, "
-"Totale file/sec: %(frate).2f, Totale byte/sec: %(brate).2f, Tempo verifica: "
-"%(audit).2f, Velocità: %(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Verifica oggetto (%(type)s). A partire da %(start_time)s: In locale: "
-"%(passes)d passati, %(quars)d in quarantena, %(errors)d errori file/sec: "
-"%(frate).2f , byte/sec: %(brate).2f, Tempo totale: %(total).2f, Tempo "
-"verifica: %(audit).2f, Velocità: %(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "Statistiche verifica oggetto: %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "Ricostruzione dell'oggetto completata (una volta). (%.02f minuti)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "Ricostruzione dell'oggetto completata. (%.02f minuti)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "Replica dell'oggetto completata (una volta). (%.02f minuti)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "Replica dell'oggetto completata. (%.02f minuti)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "I server dell'oggetto hanno restituito %s etag senza corrispondenza"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "Pulizia aggiornamento oggetto completata: %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "Parametri, query e frammenti non consentiti in X-Container-Sync-To"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr "Tempi partizione: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Percorso richiesto in X-Container-Sync-To"
#, python-format
-msgid "Problem cleaning up %s"
-msgstr "Problema durante la ripulitura di %s"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "Errore di creazione dei profili: %s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"%(hsh_path)s inserito in quarantena in %(quar_path)s perché non è una "
-"directory"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"%(object_path)s inserito in quarantena in %(quar_path)s perché non è una "
-"directory"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "Inserimento in quarantena del DB %s"
@@ -670,14 +215,6 @@ msgid "Removed %(remove)d dbs"
msgstr "Rimossi %(remove)d dbs"
#, python-format
-msgid "Removing %s objects"
-msgstr "Rimozione di oggetti %s"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "Rimozione della partizione: %s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "Rimozione del file pid %(pid_file)s con pid non valido %(pid)d"
@@ -697,58 +234,9 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "Viene restituito il codice 497 a causa della blacklist: %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"Viene restituito 498 per %(meth)s a %(acc)s/%(cont)s/%(obj)s . Ratelimit "
-"(numero massimo sospensioni) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr ""
-"Modifica ring rilevata. Interruzione della trasmissione della ricostruzione "
-"corrente."
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr ""
-"Modifica ring rilevata. Interruzione della trasmissione della replica "
-"corrente."
-
-#, python-format
msgid "Running %s once"
msgstr "Esecuzione di %s una volta"
-msgid "Running object reconstructor in script mode."
-msgstr ""
-"Esecuzione del programma di ricostruzione dell'oggetto in modalità script."
-
-msgid "Running object replicator in script mode."
-msgstr "Esecuzione del programma di replica dell'oggetto in modalità script."
-
-#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"A partire da %(time)s: %(sync)s sincronizzati [%(delete)s eliminazioni, "
-"%(put)s inserimenti], %(skip)s ignorati, %(fail)s non riusciti"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr ""
-"A partire da %(time)s: Verifiche account: %(passed)s verifiche superate, "
-"%(failed)s verifiche non superate"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"A partire da %(time)s: Verifiche contenitore: %(pass)s verifiche superate, "
-"%(fail)s verifiche non superate"
-
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "%s viene ignorato perché non è montato"
@@ -757,55 +245,9 @@ msgstr "%s viene ignorato perché non è montato"
msgid "Starting %s"
msgstr "Avvio di %s"
-msgid "Starting object reconstruction pass."
-msgstr "Avvio della trasmissione della ricostruzione dell'oggetto."
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "Avvio del programma di ricostruzione dell'oggetto in modalità daemon."
-
-msgid "Starting object replication pass."
-msgstr "Avvio della trasmissione della replica dell'oggetto."
-
-msgid "Starting object replicator in daemon mode."
-msgstr "Avvio del programma di replica dell'oggetto in modalità daemon."
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "Rsync di %(src)s eseguito correttamente su %(dst)s (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "Non è consentito l'accesso a questo tipo di file!"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"Il numero totale di %(key)s per il contenitore (%(total)s) non corrisponde "
-"alla somma di %(key)s tra le politiche (%(sum)s)"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "Tentativo di %(method)s %(path)s"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "Tentativo di eseguire GET %(full_path)s"
-
-msgid "Trying to read during GET"
-msgstr "Tentativo di lettura durante GET"
-
-msgid "Trying to send to client"
-msgstr "Tentativo di invio al client"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "Tentativo di sincronizzazione dei suffissi con %s"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "Tentativo di scrittura in %s"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "ECCEZIONE NON RILEVATA"
@@ -827,21 +269,6 @@ msgstr ""
msgid "Unable to read config from %s"
msgstr "Impossibile leggere la configurazione da %s"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "%(sync_from)r => %(sync_to)r non autorizzato"
-
-msgid "Unhandled exception"
-msgstr "Eccezione non gestita"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "Report di aggiornamento non riuscito per %(container)s %(dbfile)s"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "Report di aggiornamento inviato per %(container)s %(dbfile)s"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po
index 695972695..835a1cf78 100644
--- a/swift/locale/ja/LC_MESSAGES/swift.po
+++ b/swift/locale/ja/LC_MESSAGES/swift.po
@@ -10,7 +10,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -30,33 +30,6 @@ msgstr ""
"ユーザー終了"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - パラレル、%s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"%(checked)d サフィックスが検査されました - ハッシュ済み %(hashed).2f%%、同期"
-"済み %(synced).2f%%"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) パーティションが%(time).2fs で"
-"複製されました (%(rate).2f/秒、残り %(remaining)s)"
-
-#, python-format
msgid "%(server)s #%(number)d not running (%(conf)s)"
msgstr "%(server)s #%(number)d は稼働していません (%(conf)s)"
@@ -77,10 +50,6 @@ msgid "%(success)s successes, %(failure)s failures"
msgstr "成功 %(success)s、失敗 %(failure)s"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s が %(statuses)s について 503 を返しています"
-
-#, python-format
msgid "%(type)s: %(value)s"
msgstr "%(type)s: %(value)s"
@@ -93,28 +62,9 @@ msgid "%s does not exist"
msgstr "%s が存在しません"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s がマウントされていません"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s はアンマウントとして応答しました"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s: 接続がピアによってリセットされました"
-msgid "Account"
-msgstr "アカウント"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "アカウント監査 \"once\" モードが完了しました: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "アカウント監査の処理が完了しました: %.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
@@ -125,44 +75,9 @@ msgid "Audit Failed for %(path)s: %(err)s"
msgstr "%(path)s の監査が失敗しました: %(err)s"
#, python-format
-msgid "Audit passed for %s"
-msgstr "%s の監査が合格しました。"
-
-#, python-format
msgid "Bad key for %(name)r: %(err)s"
msgstr "%(name)r のキーが不正です: %(err)s"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "正しくない再同期戻りコード: %(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "アカウント監査 \"once\" モードの開始"
-
-msgid "Begin account audit pass."
-msgstr "アカウント監査パスを開始します。"
-
-msgid "Begin container audit \"once\" mode"
-msgstr "コンテナー監査「once」モードの開始"
-
-msgid "Begin container audit pass."
-msgstr "コンテナー監査パスを開始します。"
-
-msgid "Begin container sync \"once\" mode"
-msgstr "コンテナー同期「once」モードの開始"
-
-msgid "Begin container update single threaded sweep"
-msgstr "コンテナー更新単一スレッド化スイープの開始"
-
-msgid "Begin container update sweep"
-msgstr "コンテナー更新スイープの開始"
-
-msgid "Begin object update single threaded sweep"
-msgstr "オブジェクト更新単一スレッド化スイープの開始"
-
-msgid "Begin object update sweep"
-msgstr "オブジェクト更新スイープの開始"
-
msgid "Beginning replication run"
msgstr "複製の実行を開始中"
@@ -177,72 +92,12 @@ msgstr "ファイル %s にアクセスできません。"
msgid "Can not load profile data from %s."
msgstr "プロファイルデータを %s からロードできません。"
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "クライアントは %s 内のプロキシーからの読み取りを行いませんでした"
-
-msgid "Client disconnected without sending enough data"
-msgstr "十分なデータを送信せずにクライアントが切断されました"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "最後のチャンクを送信せずにクライアントが切断されました"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"クライアントパス %(client)s はオブジェクトメタデータ %(meta)s に保管されたパ"
-"スに一致しません"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"設定オプション internal_client_conf_path が定義されていません。デフォルト設定"
-"を使用しています。オプションについては internal-client.conf-sample を参照して"
-"ください"
-
msgid "Connection refused"
msgstr "接続が拒否されました"
msgid "Connection timeout"
msgstr "接続がタイムアウトになりました"
-msgid "Container"
-msgstr "コンテナー"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "コンテナー監査「once」モードが完了しました: %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "コンテナー監査の処理が完了しました: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "コンテナー同期「once」モードが完了しました: %.02fs"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"コンテナー更新単一スレッド化スイープが完了しました: %(elapsed).02fs、成功 "
-"%(success)s、失敗 %(fail)s、未変更 %(no_change)s"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "コンテナー更新スイープが完了しました: %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"%(path)s のコンテナー更新スイープが完了しました: %(elapsed).02fs、成功 "
-"%(success)s、失敗 %(fail)s、未変更 %(no_change)s"
-
#, python-format
msgid "Could not load %(conf)r: %(error)s"
msgstr "%(conf)r をロードできませんでした: %(error)s"
@@ -254,89 +109,14 @@ msgstr "データダウンロードエラー: %s"
msgid "Did not get a keys dict"
msgstr "キーの辞書を取得できませんでした。"
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "エラー %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "エラー %(status)d: %(type)s サーバーからの %(body)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "エラー %(status)d: オブジェクトサーバーからの %(body)s、re: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "エラー %(status)d: 予期: オブジェクトサーバーからの 100-continue"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"エラー: アカウント更新が %(ip)s:%(port)s/%(device)s で失敗しました(後で再試行"
-"されます): 応答 %(status)s %(reason)s"
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "エラー: クライアント読み取りがタイムアウトになりました (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"エラー: コンテナー更新に失敗しました (後の非同期更新のために保存中): %(ip)s:"
-"%(port)s/%(dev)s からの %(status)d 応答"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "ERROR アカウント情報 %s が取得できませんでした"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "エラー: コンテナー情報 %s を取得できませんでした"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr ""
-"エラー: DiskFile %(data_file)s を閉じることができません: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "エラー: 例外によりクライアントが切断されています"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr "エラー: オブジェクトサーバー %s へのデータ転送で例外が発生しました"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "エラー: 自分の IP の取得に失敗?"
-msgid "ERROR Insufficient Storage"
-msgstr "エラー: ストレージが不足しています"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr "エラー: オブジェクト %(obj)s は監査に失敗し、検疫されました: %(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "エラー: ピックルの問題、%s を検疫します"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "エラー: リモートドライブに %s がマウントされていません"
#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "%(db_file)s %(row)s の同期エラー"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "%s の同期エラー"
-
-#, python-format
msgid ""
"ERROR There are not enough handoff nodes to reach replica count for "
"partition %s"
@@ -344,52 +124,9 @@ msgstr ""
"エラー パーティション %s のレプリカ数に達するための handoff ノードが不足して"
"います。"
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "%s の監査を試行中にエラーが発生しました"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "エラー: 要求で未処理例外が発生しました"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "エラー: %(method)s %(path)s での __call__ エラー"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"エラー: アカウント更新が %(ip)s:%(port)s/%(device)s で失敗しました(後で再試行"
-"されます)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"エラー: アカウント更新が %(ip)s:%(port)s/%(device)s で失敗しました(後で再試行"
-"されます): "
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "エラー: 予期しない名前 %s を持つファイルを非同期保留中"
-
msgid "ERROR auditing"
msgstr "監査エラー"
-#, python-format
-msgid "ERROR auditing: %s"
-msgstr "監査エラー: %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"エラー: コンテナー更新が %(ip)s:%(port)s/%(dev)s で失敗しました (後の非同期更"
-"新のために保存中)"
-
msgid "ERROR get_keys() missing callback"
msgstr "エラー get_keys() コールバックがありません"
@@ -413,38 +150,6 @@ msgid "ERROR trying to replicate"
msgstr "複製の試行エラー"
#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "%s のクリーンアップを試行中にエラーが発生しました"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr ""
-"%(type)s サーバー %(ip)s:%(port)s/%(device)s でのエラー、返された値: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "%s からの抑止のロードでエラーが発生しました: "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "リモートサーバー %(ip)s:%(port)s/%(device)s でのエラー"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "エラー: ドライブパーティションに対するパスの取得に失敗しました: %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "エラー: %(path)s にアクセスできません: %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "エラー: 監査を実行できません: %s"
-
-msgid "Error hashing suffix"
-msgstr "サフィックスのハッシュエラー"
-
-#, python-format
msgid "Error in %(conf)r with mtime_check_interval: %(error)s"
msgstr "mtime_check_interval で %(conf)r にエラーがあります: %(error)s"
@@ -474,22 +179,6 @@ msgstr "再構成データの取得エラー"
msgid "Error sending UDP message to %(target)r: %(err)s"
msgstr "%(target)r への UDP メッセージ送信エラー: %(err)s"
-msgid "Error syncing handoff partition"
-msgstr "ハンドオフパーティションの同期エラー"
-
-msgid "Error syncing partition"
-msgstr "パーティションとの同期エラー"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "ノードとの同期エラー: %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-"%(path)s の再構築を試行中にエラーが発生しました。ポリシー #%(policy)d フラグ"
-"メント #%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "エラー: エラーが発生しました"
@@ -501,27 +190,12 @@ msgid "Error: unable to locate %s"
msgstr "エラー: %s が見つかりません"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "%(ip)s:%(port)s/%(device)s で例外が発生しました"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "予期: %s での 100-continue"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s から %(found_domain)s へ CNAME チェーンをフォロー中"
msgid "Found configs:"
msgstr "構成が見つかりました:"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-"ハンドオフのファーストモードにハンドオフが残っています。現行複製パスを打ち切"
-"ります。"
-
msgid "Host unreachable"
msgstr "ホストが到達不能です"
@@ -538,14 +212,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "無効な保留中項目 %(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "%(full_path)s からの応答 %(resp)s が無効です"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "%(ip)s からの応答 %(resp)s が無効です"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -557,13 +223,6 @@ msgid "Invalid swift_bytes"
msgstr "無効な swift_bytes"
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "長期実行の再同期を強制終了中: %s"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "ロックが検出されました.. ライブ coros を強制終了中"
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s が %(found_domain)s にマップされました"
@@ -591,129 +250,19 @@ msgstr "インデックス %s のポリシーはありません"
msgid "No realm key for %r"
msgstr "%r のレルムキーがありません"
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "ノードエラー制限 %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "肯定応答を返したオブジェクト・サーバーが不十分です (%d 取得)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"不検出 %(sync_from)r => %(sync_to)r - オブジェクト "
-"%(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "%s 秒間で何も再構成されませんでした。"
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "%s 秒間で何も複製されませんでした。"
-
-msgid "Object"
-msgstr "オブジェクト"
-
-msgid "Object PUT"
-msgstr "オブジェクト PUT"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-"オブジェクト PUT が 409 に対して 202 を返しています: %(req_timestamp)s<= "
-"%(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "オブジェクト PUT が 412 を返しています。%(statuses)r"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"オブジェクト監査 (%(type)s) 「%(mode)s」モード完了: %(elapsed).02fs。合計検疫"
-"済み: %(quars)d、合計エラー: %(errors)d、合計ファイル/秒: %(frate).2f、合計バ"
-"イト/秒: %(brate).2f、監査時間: %(audit).2f、率: %(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"オブジェクト監査 (%(type)s)。%(start_time)s 以降: ローカル: 合格した監査 "
-"%(passes)d、検疫済み %(quars)d、エラー %(errors)d、ファイル/秒: %(frate).2f、"
-"バイト/秒: %(brate).2f、合計時間: %(total).2f、監査時間: %(audit).2f、率: "
-"%(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "オブジェクト監査統計: %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "オブジェクト再構成が完了しました (1 回)。(%.02f 分)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "オブジェクト再構成が完了しました。(%.02f 分)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "オブジェクト複製が完了しました (1 回)。(%.02f 分)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "オブジェクト複製が完了しました。(%.02f 分)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "オブジェクトサーバーが %s 個の不一致 etag を返しました"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "オブジェクト更新スイープが完了しました: %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr ""
"パラメーター、照会、およびフラグメントは X-Container-Sync-To で許可されていま"
"せん"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr "パーティション時間: 最大 %(max).4fs、最小 %(min).4fs、中間 %(med).4fs"
-
msgid "Path required in X-Container-Sync-To"
msgstr "X-Container-Sync-To にパスが必要です"
#, python-format
-msgid "Problem cleaning up %s"
-msgstr "%s のクリーンアップ中に問題が発生しました"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "プロファイル作成エラー: %s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"ディレクトリーではないため、%(hsh_path)s は %(quar_path)s へ検疫されました"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"ディレクトリーではないため、%(object_path)s は %(quar_path)s へ検疫されました"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "DB %s の検疫中"
@@ -727,14 +276,6 @@ msgid "Removed %(remove)d dbs"
msgstr "%(remove)d 個の DB が削除されました"
#, python-format
-msgid "Removing %s objects"
-msgstr "%s オブジェクトの削除中"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "パーティションの削除中: %s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "正しくない pid %(pid)d の pid ファイル %(pid_file)s を削除中"
@@ -754,57 +295,14 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "ブラックリスティングのため 497 を返しています: %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"%(acc)s/%(cont)s/%(obj)s に対する %(meth)s に関して 498 を返しています。"
-"Ratelimit (最大スリープ) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr "リング変更が検出されました。現行再構成パスを打ち切ります。"
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr "リング変更が検出されました。現行複製パスを打ち切ります。"
-
-#, python-format
msgid "Running %s once"
msgstr "%s を 1 回実行中"
-msgid "Running object reconstructor in script mode."
-msgstr "スクリプトモードでオブジェクトリコンストラクターを実行中です。"
-
-msgid "Running object replicator in script mode."
-msgstr "スクリプトモードでオブジェクトレプリケーターを実行中です。"
-
#, python-format
msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s"
msgstr "%(server)s pid: %(pid)s へのシグナル: %(signal)s"
#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"%(time)s 以降: 同期済み %(sync)s [削除 %(delete)s、書き込み %(put)s]、スキッ"
-"プ %(skip)s、失敗 %(fail)s"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr ""
-"%(time)s 以降: アカウント監査: 合格した監査 %(passed)s、不合格の監"
-"査%(failed)s"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"%(time)s 以降: コンテナー監査: 合格した監査 %(pass)s、不合格の監査%(fail)s"
-
-#, python-format
msgid "Skipping %(datadir)s because %(err)s"
msgstr "%(err)s のため %(datadir)s をスキップします"
@@ -816,55 +314,9 @@ msgstr "マウントされていないため、 %s をスキップします"
msgid "Starting %s"
msgstr "%s を開始しています"
-msgid "Starting object reconstruction pass."
-msgstr "オブジェクト再構成パスを開始中です。"
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "オブジェクトリコンストラクターをデーモンモードで開始中です。"
-
-msgid "Starting object replication pass."
-msgstr "オブジェクト複製パスを開始中です。"
-
-msgid "Starting object replicator in daemon mode."
-msgstr "オブジェクトレプリケーターをデーモンモードで開始中です。"
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "%(dst)s での %(src)s の再同期が成功しました (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "このファイルタイプにはアクセスが禁止されています"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"コンテナーの合計 %(key)s (%(total)s) がポリシー全体の合計 %(key)s(%(sum)s) に"
-"一致しません"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "%(method)s %(path)s を試行中"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "GET %(full_path)s を試行中"
-
-msgid "Trying to read during GET"
-msgstr "GET 時に読み取りを試行中"
-
-msgid "Trying to send to client"
-msgstr "クライアントへの送信を試行中"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "%s でサフィックスの同期を試行中"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "%s への書き込みを試行中"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "キャッチされていない例外"
@@ -897,21 +349,6 @@ msgstr "ディレクトリー %(dir)s で fsync() を実行できません: %(er
msgid "Unable to read config from %s"
msgstr "構成を %s から読み取ることができません"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "非認証 %(sync_from)r => %(sync_to)r"
-
-msgid "Unhandled exception"
-msgstr "未処理例外"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "%(container)s %(dbfile)s に関する更新レポートが失敗しました"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "%(container)s %(dbfile)s に関する更新レポートが送信されました"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/locale/ko_KR/LC_MESSAGES/swift.po b/swift/locale/ko_KR/LC_MESSAGES/swift.po
index 5e43e97ff..a1651b615 100644
--- a/swift/locale/ko_KR/LC_MESSAGES/swift.po
+++ b/swift/locale/ko_KR/LC_MESSAGES/swift.po
@@ -6,17 +6,17 @@
# Mario Cho <hephaex@gmail.com>, 2014
# Ying Chun Guo <daisy.ycguo@gmail.com>, 2015
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
-# SeongSoo Cho <ppiyakk2@printf.kr>, 2019. #zanata
+# Seongsoo Cho <ppiyakk2@printf.kr>, 2019. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2019-10-14 12:04+0000\n"
-"Last-Translator: SeongSoo Cho <ppiyakk2@printf.kr>\n"
+"Last-Translator: Seongsoo Cho <ppiyakk2@printf.kr>\n"
"Language: ko_KR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
@@ -31,33 +31,6 @@ msgstr ""
"사용자 종료"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - 병렬, %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"%(checked)d개 접미부를 검사함 - %(hashed).2f%%개 해시됨, %(synced).2f%%개 동"
-"기화됨"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d(%(percentage).2f%%)개 파티션이 %(time).2f초"
-"(%(rate).2f/초, %(remaining)s 남음) 안에 복제됨"
-
-#, python-format
msgid "%(server)s #%(number)d not running (%(conf)s)"
msgstr "%(server)s #%(number)d가 실행 중이 아님 (%(conf)s)"
@@ -78,10 +51,6 @@ msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s개 성공, %(failure)s개 실패"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s에서 %(statuses)s에 대해 503을 리턴함"
-
-#, python-format
msgid "%(type)s: %(value)s"
msgstr "%(type)s: %(value)s"
@@ -94,28 +63,9 @@ msgid "%s does not exist"
msgstr "%s이(가) 존재하지 않음"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s이(가) 마운트되지 않음"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s이(가) 마운트 해제된 것으로 응답"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s: 피어에서 연결 재설정"
-msgid "Account"
-msgstr "계정"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "계정 감사 \"한 번\"모드가 완료: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "정상으로 판정난 계정: %.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
@@ -126,41 +76,6 @@ msgstr ""
msgid "Audit Failed for %(path)s: %(err)s"
msgstr "%(path)s 검사 실패: %(err)s"
-#, python-format
-msgid "Audit passed for %s"
-msgstr "%s 검사 완료"
-
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "잘못된 rsync 리턴 코드: %(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "계정 감사 \"한 번\"모드로 시작"
-
-msgid "Begin account audit pass."
-msgstr "계정 검사 시작."
-
-msgid "Begin container audit \"once\" mode"
-msgstr "컨테이너 감사 \"일 회\" 모드 시작"
-
-msgid "Begin container audit pass."
-msgstr "컨테이너 감사 전달이 시작됩니다."
-
-msgid "Begin container sync \"once\" mode"
-msgstr "컨테이너 동기화 \"일 회\" 모드 시작"
-
-msgid "Begin container update single threaded sweep"
-msgstr "컨테이너 업데이트 단일 스레드 스윕 시작"
-
-msgid "Begin container update sweep"
-msgstr "컨테이너 업데이트 스윕 시작"
-
-msgid "Begin object update single threaded sweep"
-msgstr "오브젝트 업데이트 단일 스레드 스윕 시작"
-
-msgid "Begin object update sweep"
-msgstr "오브젝트 업데이트 스윕 시작"
-
msgid "Beginning replication run"
msgstr "복제 실행 시작"
@@ -175,30 +90,6 @@ msgstr "파일 %s에 액세스할 수 없습니다."
msgid "Can not load profile data from %s."
msgstr "%s에서 프로파일 데이터를 로드할 수 없습니다."
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "클라이언트에서 %ss 내에 프록시를 읽을 수 없었음"
-
-msgid "Client disconnected without sending enough data"
-msgstr "데이터를 모두 전송하기 전에 클라이언트 연결이 끊어짐"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "마지막 청크를 전송하기 전에 클라이언트 연결이 끊어짐"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"클라이언트 경로 %(client)s이(가) 오브젝트 메타데이터 %(meta)s에 저장된 경로"
-"와 일치하지 않음"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"구성 옵션 internal_client_conf_path가 정의되지 않았습니다. 기본 구성 사용 시 "
-"internal-client.conf-sample에서 옵션을 참조하십시오."
-
msgid "Connection refused"
msgstr "연결이 거부됨"
@@ -208,41 +99,6 @@ msgstr "Connection reset"
msgid "Connection timeout"
msgstr "연결 제한시간 초과"
-msgid "Container"
-msgstr "컨테이너"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "컨테이너 감사 \"일 회\" 모드 완료: %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "컨테이너 감사 전달 완료: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "컨테이너 동기화 \"일 회\" 모드 완료: %.02fs"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"컨테이너 업데이트 단일 스레드 스윕 완료: %(elapsed).02fs, %(success)s개 성"
-"공, %(fail)s개 실패, %(no_change)s개 변경 없음"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "컨테이너 업데이트 스윕 완료: %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"%(path)s의 컨테이너 업데이트 스윕 완료: %(elapsed).02fs, %(success)s개 성공, "
-"%(fail)s개 실패, %(no_change)s개 변경 없음"
-
#, python-format
msgid "Could not load %(conf)r: %(error)s"
msgstr "%(conf)r를 로드할 수 없음: %(error)s"
@@ -251,139 +107,24 @@ msgstr "%(conf)r를 로드할 수 없음: %(error)s"
msgid "Data download error: %s"
msgstr "데이터 다운로드 오류: %s"
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "오류 %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "오류 %(status)d %(body)s, %(type)s 서버 발신"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "오류 %(status)d %(body)s, 오브젝트 서버 발신, 회신: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "오류 %(status)d. 예상: 100-continue, 오브젝트 서버 발신"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"오류. %(ip)s:%(port)s/%(device)s(으)로 계정 업데이트 실패(나중에 다시 시도): "
-"응답 %(status)s %(reason)s"
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "ERROR 클라이언트 읽기 시간 초과 (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"오류. 컨테이너 업데이트 실패(이후 비동기 업데이트용으로 저장): %(status)d응"
-"답. 출처: %(ip)s:%(port)s/%(dev)s"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "오류는 %s의 계정 정보를 얻을 수 없습니다"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "오류. 컨테이너 정보 %s을(를) 가져올 수 없음"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr "오류. 디스크 파일 %(data_file)s 닫기 실패: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "오류. 예외로 인해 클라이언트 연결이 끊어짐"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr "ERROR 오브젝트 서버 %s에 데이터를 전송하는 중에 예외 발생"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "오류. 자체 IP를 가져오는 중 오류 발생 여부"
-msgid "ERROR Insufficient Storage"
-msgstr "오류. 스토리지 공간이 충분하지 않음"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr "오류. 오브젝트 %(obj)s의 감사가 실패하여 격리됨: %(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "오류. 문제가 발생함, %s 격리 중"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "오류. 원격 드라이브가 마운트되지 않음. %s"
#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "%(db_file)s %(row)s 동기화 오류"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "%s 동기화 오류"
-
-#, python-format
msgid ""
"ERROR There are not enough handoff nodes to reach replica count for "
"partition %s"
msgstr ""
"오류 - 파티션 %s 의 복제 수 만큼 충분한 handoff 노드가 존재하지 않습니다"
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "%s 감사 중 오류 발생"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "오류. 요청에 처리되지 않은 예외가 있음"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "오류. %(method)s %(path)s에 __call__ 오류 발생"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"오류. %(ip)s:%(port)s/%(device)s(으)로 계정 업데이트 실패(나중에 다시 시도)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"오류. %(ip)s:%(port)s/%(device)s(으)로 계정 업데이트 실패(나중에 다시 시도): "
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "오류. 비동기 보류 파일에 예상치 못한 이름 %s을(를) 사용함"
-
msgid "ERROR auditing"
msgstr "검사 오류"
#, python-format
-msgid "ERROR auditing: %s"
-msgstr "감사 오류: %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"오류. %(ip)s:%(port)s/%(dev)s(으)로 컨테이너 업데이트 실패(이후 비동기 업데이"
-"트용으로 저장)"
-
-#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "%s에서 HTTP 응답을 읽는 중 오류 발생"
@@ -403,37 +144,6 @@ msgid "ERROR trying to replicate"
msgstr "복제 중 오류 발생"
#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "%s 정리 중 오류 발생"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr "%(type)s 서버 %(ip)s:%(port)s/%(device)s 오류, 회신: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "%s에서 억제를 로드하는 중 오류 발생: "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "원격 서버 %(ip)s:%(port)s/%(device)s에 오류 발생"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "오류: 드라이브 파티션에 대한 경로를 가져오지 못함: %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "오류: %(path)s에 액세스할 수 없음: %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "오류: 감사를 실행할 수 없음: %s"
-
-msgid "Error hashing suffix"
-msgstr "접미부를 해싱하는 중 오류 발생"
-
-#, python-format
msgid "Error in %(conf)r with mtime_check_interval: %(error)s"
msgstr "%(conf)r에서 mtime_check_interval 오류 발생: %(error)s"
@@ -463,22 +173,6 @@ msgstr "조정 데이터를 검색하는 중에 오류 발생"
msgid "Error sending UDP message to %(target)r: %(err)s"
msgstr "%(target)r에 UDP 메시지 전송 중 에러 발생: %(err)s"
-msgid "Error syncing handoff partition"
-msgstr "핸드오프 파티션 동기화 중 오류 발생"
-
-msgid "Error syncing partition"
-msgstr "파티션 동기 오류 "
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "노드 동기 오류: %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-"%(path)s policy#%(policy)d frag#%(frag_index)s을(를) 다시 빌드하려는 중 오류 "
-"발생"
-
msgid "Error: An error occurred"
msgstr "오류: 오류 발생"
@@ -490,27 +184,12 @@ msgid "Error: unable to locate %s"
msgstr "오류: %s을(를) 찾을 수 없음"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "%(ip)s:%(port)s/%(device)s 예외"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "%s에서 100-continue 예상"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s에서 %(found_domain)s(으)로의 다음 CNAME 체인"
msgid "Found configs:"
msgstr "구성 발견:"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-"핸드오프 첫 모드에 여전히 핸드오프가 남아 있습니다. 현재 복제 전달을 중단합니"
-"다."
-
msgid "Host unreachable"
msgstr "호스트 도달 불가능"
@@ -527,14 +206,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "올바르지 않은 보류 항목 %(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "%(full_path)s에서 올바르지 않은 응답 %(resp)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "%(ip)s의 올바르지 않은 응답 %(resp)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -546,13 +217,6 @@ msgid "Invalid swift_bytes"
msgstr "swift_bytes 가 유효하지 않습니다."
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "장기 실행 중인 rsync 강제 종료: %s"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "잠금 발견.. 활성 coros를 강제 종료합니다."
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s을(를) %(found_domain)s(으)로 맵핑함"
@@ -582,111 +246,13 @@ msgstr "인덱스가 %s인 정책이 없음"
msgid "No realm key for %r"
msgstr "%r에 대한 영역 키가 없음"
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "노드 오류로 %(ip)s:%(port)s(%(device)s)이(가) 제한됨"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "승인된 오브젝트 서버가 부족함(%d을(를) 받음)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"찾을 수 없음 %(sync_from)r => %(sync_to)r - 오브젝"
-"트%(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "%s초 동안 재구성된 것이 없습니다."
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "%s초 동안 복제된 것이 없습니다."
-
-msgid "Object"
-msgstr "오브젝트"
-
-msgid "Object PUT"
-msgstr "Object PUT"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-"Object PUT에서 409에 대해 202를 리턴함: %(req_timestamp)s <= %(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "Object PUT에서 412를 리턴함, %(statuses)r"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"오브젝트 감사(%(type)s) \"%(mode)s\" 모드 완료: %(elapsed).02fs. 총 격리 항"
-"목: %(quars)d, 총 오류 수: %(errors)d, 총 파일/초: %(frate).2f, 총 바이트/"
-"초: %(brate).2f, 감사 시간: %(audit).2f, 속도: %(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"오브젝트 감사(%(type)s). %(start_time)s 이후: 로컬: %(passes)d개 통과, "
-"%(quars)d개 격리, %(errors)d개 오류, 파일/초: %(frate).2f, 바이트/초: "
-"%(brate).2f, 총 시간: %(total).2f, 감사 시간: %(audit).2f, 속도: "
-"%(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "오브젝트 감사 통계: %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "오브젝트 재구성 완료(일 회). (%.02f분)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "오브젝트 재구성 완료. (%.02f분)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "오브젝트 복제 완료(일 회). (%.02f분)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "오브젝트 복제 완료. (%.02f분)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "오브젝트 서버에서 %s개의 불일치 etag를 리턴함"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "오브젝트 업데이트 스윕 완료: %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "X-Container-Sync-To에 매개변수, 조회, 단편이 허용되지 않음"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr "파티션 시간: 최대 %(max).4f초, 최소 %(min).4f초, 중간 %(med).4f초"
-
msgid "Path required in X-Container-Sync-To"
msgstr "X-Container-Sync-To에 경로가 필요함"
#, python-format
-msgid "Problem cleaning up %s"
-msgstr "%s 정리 문제 발생"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "프로파일링 오류: %s"
@@ -695,15 +261,6 @@ msgid "Quarantined %(db_dir)s to %(quar_path)s due to %(reason)s"
msgstr "%(db_dir)s을 %(quar_path)s로 격리합니다. 이유: %(reason)s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr "디렉토리가 아니어서 %(hsh_path)s을(를) %(quar_path)s에 격리함"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr "디렉토리가 아니어서 %(object_path)s을(를) %(quar_path)s에 격리함"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "데이터베이스 %s 격리"
@@ -717,14 +274,6 @@ msgid "Removed %(remove)d dbs"
msgstr "%(remove)d 데이터베이스를 제거함"
#, python-format
-msgid "Removing %s objects"
-msgstr "%s 오브젝트 제거 중"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "파티션 제거: %s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "잘못된 pid %(pid)d의 pid 파일 %(pid_file)s 제거"
@@ -744,54 +293,14 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "블랙리스트 지정으로 인해 497이 리턴됨: %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"%(acc)s/%(cont)s/%(obj)s(으)로 %(meth)s에 대한 498을 리턴합니다. 전송률 제한"
-"(최대 휴면) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr "링 변경이 발견되었습니다. 현재 재구성 전달을 중단합니다."
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr "링 변경이 발견되었습니다. 현재 복제 전달을 중단합니다."
-
-#, python-format
msgid "Running %s once"
msgstr "%s을(를) 한 번 실행"
-msgid "Running object reconstructor in script mode."
-msgstr "오브젝트 재구성자를 스크립트 모드로 실행 중입니다."
-
-msgid "Running object replicator in script mode."
-msgstr "오브젝트 복제자를 스크립트 모드로 실행 중입니다."
-
#, python-format
msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s"
msgstr "Signal %(server)s pid: %(pid)s signal: %(signal)s"
#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"%(time)s 이후: %(sync)s 동기화됨 [%(delete)s 삭제, %(put)s 배치], %(skip)s 건"
-"너뜀, %(fail)s 실패"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr "검사 경과 시간 %(time)s: 계정 검사A: %(passed)s 정상 ,%(failed)s 실패"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr "%(time)s 이후: 컨테이너 감사: %(pass)s 감사 전달, %(fail)s 감사 실패"
-
-#, python-format
msgid "Skipping %(datadir)s because %(err)s"
msgstr "%(datadir)s 을 건너 뜀: %(err)s"
@@ -803,55 +312,9 @@ msgstr "마운트되지 않는 %s를 건너 뛰기"
msgid "Starting %s"
msgstr "%s 시작 중"
-msgid "Starting object reconstruction pass."
-msgstr "오브젝트 재구성 전달을 시작합니다."
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "오브젝트 재구성자를 디먼 모드로 시작합니다."
-
-msgid "Starting object replication pass."
-msgstr "오브젝트 복제 전달을 시작합니다."
-
-msgid "Starting object replicator in daemon mode."
-msgstr "오브젝트 복제자를 디먼 모드로 시작합니다."
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "%(dst)s(%(time).03f)에서 %(src)s의 rsync 성공"
-
msgid "The file type are forbidden to access!"
msgstr "이 파일 유형에 대한 액세스가 금지되었습니다!"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"컨테이너의 총 %(key)s가 (%(total)s) 과 %(key)s의 총합 (%(sum)s)가 일치하지 "
-"않습니다."
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "%(method)s %(path)s 시도 중"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "GET %(full_path)s 시도 중"
-
-msgid "Trying to read during GET"
-msgstr "가져오기 중 읽기를 시도함"
-
-msgid "Trying to send to client"
-msgstr "클라이언트로 전송 시도 중"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "%s과(와) 접미사를 동기화하려고 시도"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "%s에 쓰기 시도 중"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "미발견 예외"
@@ -884,21 +347,6 @@ msgstr "디렉토리에서 fsync() 를 수행할 수 없음 %(dir)s: %(err)s"
msgid "Unable to read config from %s"
msgstr "%s에서 구성을 읽을 수 없음"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "권한 부여 해제 %(sync_from)r => %(sync_to)r"
-
-msgid "Unhandled exception"
-msgstr "처리되지 않은 예외"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "%(container)s %(dbfile)s의 업데이트 보고서 실패"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "%(container)s %(dbfile)s의 업데이트 보고서를 발송함"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/locale/pt_BR/LC_MESSAGES/swift.po b/swift/locale/pt_BR/LC_MESSAGES/swift.po
index b840e288f..65cd909d4 100644
--- a/swift/locale/pt_BR/LC_MESSAGES/swift.po
+++ b/swift/locale/pt_BR/LC_MESSAGES/swift.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -32,41 +32,10 @@ msgstr ""
"encerramento do usuário"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - paralelo, %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"%(checked)d sufixos verificados – %(hashed).2f%% de hash, %(synced).2f%% "
-"sincronizados"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partições replicadas em "
-"%(time).2fs (%(rate).2f/seg, %(remaining)s restantes)"
-
-#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s sucessos, %(failure)s falhas"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s retornando 503 para %(statuses)s"
-
-#, python-format
msgid "%s already started..."
msgstr "%s já iniciado..."
@@ -75,65 +44,15 @@ msgid "%s does not exist"
msgstr "%s não existe"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s não está montado"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s respondeu como não montado"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s: Reconfiguração da conexão por peer"
-msgid "Account"
-msgstr "Conta"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "Auditoria de conta em modo \"único\" finalizado: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "Passo de auditoria de conta finalizado: %.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
msgstr ""
"Tentativa de replicação do %(count)d dbs em%(time).5f segundos (%(rate).5f/s)"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Código de retorno de ressincronização inválido: %(ret)d <-%(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "Iniciar auditoria de conta em modo \"único\""
-
-msgid "Begin account audit pass."
-msgstr "Iniciando passo de auditoria de conta."
-
-msgid "Begin container audit \"once\" mode"
-msgstr "Inicie o modo \"único\" da auditoria do contêiner"
-
-msgid "Begin container audit pass."
-msgstr "Inicie a aprovação da auditoria do contêiner."
-
-msgid "Begin container sync \"once\" mode"
-msgstr "Inicie o modo \"único\" de sincronização do contêiner"
-
-msgid "Begin container update single threaded sweep"
-msgstr "Inicie a varredura de encadeamento único da atualização do contêiner"
-
-msgid "Begin container update sweep"
-msgstr "Inicie a varredura de atualização do contêiner"
-
-msgid "Begin object update single threaded sweep"
-msgstr "Inicie a varredura de encadeamento único da atualização do objeto"
-
-msgid "Begin object update sweep"
-msgstr "Inicie a varredura da atualização do objeto"
-
msgid "Beginning replication run"
msgstr "Começando execução de replicação"
@@ -148,205 +67,27 @@ msgstr "Não é possível acessar o arquivo %s."
msgid "Can not load profile data from %s."
msgstr "Não é possível carregar dados do perfil a partir de %s."
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "O cliente não leu no proxy dentro de %ss"
-
-msgid "Client disconnected without sending enough data"
-msgstr "Cliente desconecatdo sem ter enviado dados suficientes"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "Cliente desconectado sem ter enviado o último chunk"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"Caminho do cliente %(client)s não corresponde ao caminho armazenado nos "
-"metadados do objeto %(meta)s"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"Opção de configuração internal_client_conf_path não definida. Usando a "
-"configuração padrão. Consulte internal-client.conf-sample para obter opções"
-
msgid "Connection refused"
msgstr "Conexão recusada"
msgid "Connection timeout"
msgstr "Tempo limite de conexão"
-msgid "Container"
-msgstr "Contêiner"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "Modo \"único\" da auditoria do contêiner concluído: %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "Aprovação da auditoria do contêiner concluída: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "Modo \"único\" de sincronização do contêiner concluído: %.02fs"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Varredura de encadeamento único da atualização do contêiner concluída: "
-"%(elapsed).02fs, %(success)s com êxito, %(fail)s com falha, %(no_change)s "
-"sem alterações"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "Varredura da atualização do contêiner concluída: %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Varredura da atualização do contêiner de %(path)s concluída: "
-"%(elapsed).02fs, %(success)s com êxito, %(fail)s com falha, %(no_change)s "
-"sem alterações"
-
#, python-format
msgid "Data download error: %s"
msgstr "Erro ao fazer download de dados: %s"
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "ERRO %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "ERRO %(status)d %(body)s Do Servidor %(type)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "ERRO %(status)d %(body)s No Servidor de Objetos re: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "ERRO %(status)d Expectativa: 100-continuar Do Servidor de Objeto"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente "
-"novamente mais tarde): Resposta %(status)s %(reason)s"
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "ERRO Tempo limite de leitura do cliente (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"ERRO A atualização do contêiner falhou (salvando para atualização assíncrona "
-"posterior): %(status)d resposta do %(ip)s:%(port)s/%(dev)s"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "ERRO Não foi possível recuperar as informações da conta %s"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "ERRO Não foi possível obter informações do contêiner %s"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr "ERROR DiskFile %(data_file)s falha ao fechar: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "ERRO Exceção causando clientes a desconectar"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr "ERRO Exceção ao transferir dados para os servidores de objeto %s"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "ERRO Falha ao pegar meu próprio IPs?"
-msgid "ERROR Insufficient Storage"
-msgstr "ERRO Capacidade insuficiente"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr "ERRO O objeto %(obj)s falhou ao auditar e ficou em quarentena: %(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "ERRO Problema de seleção, em quarentena %s"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "ERRO Drive remoto não montado %s"
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "ERRO Sincronizando %(db_file)s %(row)s"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "ERRO Sincronizando %s"
-
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "ERRO Tentando auditar %s"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "ERRO Exceção não manipulada na solicitação"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "ERROR __call__ erro com %(method)s %(path)s"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente "
-"novamente mais tarde)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente "
-"novamente mais tarde): "
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "ERRO arquivo pendente assíncrono com nome inesperado %s"
-
msgid "ERROR auditing"
msgstr "Erro auditando"
#, python-format
-msgid "ERROR auditing: %s"
-msgstr "ERRO auditoria: %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"ERRO A atualização de contêiner falhou com %(ip)s:%(port)s/%(dev)s (salvando "
-"para atualização assíncrona posterior)"
-
-#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "ERRO lendo resposta HTTP de %s"
@@ -365,37 +106,6 @@ msgstr "ERRO sincronizando %(file)s com nodo %(node)s"
msgid "ERROR trying to replicate"
msgstr "ERRO tentando replicar"
-#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "ERRO enquanto tentaava limpar %s"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr "ERRO com %(type)s do servidor %(ip)s:%(port)s/%(device)s re: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "ERRO com as supressões de carregamento a partir de %s: "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "ERRO com o servidor remoto %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "ERRO: Falha ao obter caminhos para partições de unidade: %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "ERRO: Não é possível acessar %(path)s: %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "ERRO: Não é possível executar a auditoria: %s"
-
-msgid "Error hashing suffix"
-msgstr "Erro ao efetuar hash do sufixo"
-
msgid "Error listing devices"
msgstr "Erro ao listar dispositivos"
@@ -418,21 +128,6 @@ msgstr "Erro ao ler swift.conf"
msgid "Error retrieving recon data"
msgstr "Erro ao recuperar dados de reconhecimento"
-msgid "Error syncing handoff partition"
-msgstr "Erro ao sincronizar a partição de handoff"
-
-msgid "Error syncing partition"
-msgstr "Erro ao sincronizar partição"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "Erro ao sincronizar com o nó: %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-"Erro ao tentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "Erro: Ocorreu um erro"
@@ -444,27 +139,12 @@ msgid "Error: unable to locate %s"
msgstr "Erro: não é possível localizar %s"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "Exceção com %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "Expectativa: 100-continuar em %s"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "Cadeia CNAME a seguir para %(given_domain)s para%(found_domain)s"
msgid "Found configs:"
msgstr "Localizados arquivos de configuração:"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-"O primeiro modo de handoffs ainda possui handoffs. Interrompendo a aprovação "
-"da replicação atual."
-
msgid "Host unreachable"
msgstr "Destino inalcançável"
@@ -481,14 +161,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Entrada pendente inválida %(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "Resposta inválida %(resp)s a partir de %(full_path)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "Resposta inválida %(resp)s a partir de %(ip)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -497,13 +169,6 @@ msgstr ""
"\"https\"."
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "Eliminando a ressincronização de longa execução: %s"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "Bloqueio detectado... eliminando núcleos em tempo real."
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s mapeado para %(found_domain)s"
@@ -523,132 +188,18 @@ msgstr "Nenhuma política com índice %s"
msgid "No realm key for %r"
msgstr "Nenhuma chave do domínio para %r"
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "Erro de nó limitado %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr ""
-"Insuficiente número de servidores de objeto confirmaram (%d confirmados)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"Não localizado %(sync_from)r => %(sync_to)r – objeto "
-"%(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "Nada foi reconstruído durante %s segundos."
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "Nada foi replicado para %s segundos."
-
-msgid "Object"
-msgstr "Objeto"
-
-msgid "Object PUT"
-msgstr "PUT de objeto"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-"Objeto PUT retornando 202 para a versão 409: %(req_timestamp)s < = "
-"%(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "PUT de objeto retornando 412, %(statuses)r"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Modo \"%(mode)s\" da auditoria de objeto (%(type)s) concluído: "
-"%(elapsed).02fs. Total em quarentena: %(quars)d, Total de erros: %(errors)d, "
-"Total de arquivos/seg: %(frate).2f, Total de bytes/seg: %(brate).2f, Tempo "
-"de auditoria: %(audit).2f, Taxa: %(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Auditoria de objeto (%(type)s). Desde %(start_time)s: Localmente: %(passes)d "
-"aprovado, %(quars)d em quarentena, %(errors)d erros, arquivos/s: "
-"%(frate).2f, bytes/seg: %(brate).2f, Tempo total: %(total).2f, Tempo de "
-"auditoria: %(audit).2f, Taxa: %(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "Estatísticas de auditoria do objeto: %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "Reconstrução do objeto concluída (única). (%.02f minutos)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "Reconstrução do objeto concluída. (%.02f minutos)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "Replicação completa do objeto (única). (%.02f minutos)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "Replicação completa do objeto. (%.02f minutos)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "Servidores de objeto retornaram %s etags incompatíveis"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "Varredura da atualização de objeto concluída: %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr ""
"Parâmetros, consultas e fragmentos não permitidos em X-Container-Sync-To"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr ""
-"Tempos de partição: máximo %(max).4fs, mínimo %(min).4fs, médio %(med).4fs"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Caminho necessário em X-Container-Sync-To"
#, python-format
-msgid "Problem cleaning up %s"
-msgstr "Problema ao limpar %s"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "Erro da Criação de Perfil: %s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"Em quarentena %(hsh_path)s para %(quar_path)s porque ele não é um diretório"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"Em quarentena %(object_path)s para %(quar_path)s porque ele não é um "
-"diretório"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "Quarentenando BD %s"
@@ -663,14 +214,6 @@ msgid "Removed %(remove)d dbs"
msgstr "Dbs %(remove)d removido"
#, python-format
-msgid "Removing %s objects"
-msgstr "Removendo %s objetos"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "Removendo partição: %s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "Removendo arquivo pid %(pid_file)s com pid errado %(pid)d"
@@ -690,55 +233,9 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "Retornando 497 por causa da listagem negra: %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"Retornando 498 para %(meth)s para %(acc)s/%(cont)s/%(obj)s. Limite de taxa "
-"(Suspensão Máxima) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr ""
-"Mudança no anel detectada. Interrompendo a aprovação da recosntrução atual."
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr ""
-"Alteração do anel detectada. Interrompendo a aprovação da replicação atual."
-
-#, python-format
msgid "Running %s once"
msgstr "Executando %s uma vez,"
-msgid "Running object reconstructor in script mode."
-msgstr "Executando o reconstrutor do objeto no modo de script."
-
-msgid "Running object replicator in script mode."
-msgstr "Executando replicador do objeto no modo de script."
-
-#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"Desde %(time)s: %(sync)s sincronizados [%(delete)s exclui, %(put)s coloca], "
-"%(skip)s ignorados, %(fail)s com falha"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr ""
-"Desde %(time)s: Auditoria de contas: %(passed)s auditorias passaram,"
-"%(failed)s auditorias falharam"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"Desde %(time)s: As auditorias do contêiner: %(pass)s de auditoria aprovada, "
-"%(fail)s com falha auditoria"
-
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "Pulando %s porque não está montado"
@@ -747,55 +244,9 @@ msgstr "Pulando %s porque não está montado"
msgid "Starting %s"
msgstr "Iniciando %s"
-msgid "Starting object reconstruction pass."
-msgstr "Iniciando a aprovação da reconstrução de objeto."
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "Iniciando o reconstrutor do objeto no modo daemon."
-
-msgid "Starting object replication pass."
-msgstr "Iniciando a aprovação da replicação de objeto."
-
-msgid "Starting object replicator in daemon mode."
-msgstr "Iniciando o replicador do objeto no modo daemon."
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "Ressincronização bem-sucedida de %(src)s em %(dst)s (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "O tipo de arquivo é de acesso proibido!"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"O total %(key)s para o container (%(total)s) não confere com a soma %(key)s "
-"pelas politicas (%(sum)s)"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "Tentando %(method)s %(path)s"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "Tentando GET %(full_path)s"
-
-msgid "Trying to read during GET"
-msgstr "Tentando ler durante GET"
-
-msgid "Trying to send to client"
-msgstr "Tentando enviar para o cliente"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "Tentando sincronizar sufixos com %s"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "Tentando escrever para %s"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "EXCEÇÃO NÃO CAPTURADA"
@@ -817,21 +268,6 @@ msgstr ""
msgid "Unable to read config from %s"
msgstr "Não é possível ler a configuração a partir de %s"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "Não autorizado %(sync_from)r => %(sync_to)r"
-
-msgid "Unhandled exception"
-msgstr "Exceção não-tratada"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "Atualize o relatório com falha para %(container)s %(dbfile)s"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "Atualize o relatório enviado para %(container)s %(dbfile)s"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po
index 0bc24b112..6cbbb779e 100644
--- a/swift/locale/ru/LC_MESSAGES/swift.po
+++ b/swift/locale/ru/LC_MESSAGES/swift.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -30,41 +30,10 @@ msgstr ""
"Завершение работы пользователя"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - параллельно, %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"Проверено суффиксов: %(checked)d - хэшировано: %(hashed).2f%%, "
-"синхронизировано: %(synced).2f%%"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"Реплицировано разделов: %(replicated)d/%(total)d (%(percentage).2f%%) за "
-"время %(time).2f с (%(rate).2f/с, осталось: %(remaining)s)"
-
-#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s успешно, %(failure)s с ошибками"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s возвратил 503 для %(statuses)s"
-
-#, python-format
msgid "%s already started..."
msgstr "%s уже запущен..."
@@ -73,65 +42,15 @@ msgid "%s does not exist"
msgstr "%s не существует"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s не смонтирован"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s ответил как размонтированный"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s: соединение сброшено на другой стороне"
-msgid "Account"
-msgstr "Учетная запись"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "Проверка учетной записи в \"однократном\" режиме завершена: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "Проход контроля учетной записи выполнен: %.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
msgstr ""
"Попытка репликации %(count)d баз данных за %(time).5f секунд (%(rate).5f/s)"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Неправильный код возврата rsync: %(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "Начать проверку учетной записи в \"однократном\" режиме"
-
-msgid "Begin account audit pass."
-msgstr "Начать проход проверки учетной записи."
-
-msgid "Begin container audit \"once\" mode"
-msgstr "Начать проверку контейнера в \"однократном\" режиме"
-
-msgid "Begin container audit pass."
-msgstr "Начать проход проверки контейнера."
-
-msgid "Begin container sync \"once\" mode"
-msgstr "Начать синхронизацию контейнера в \"однократном\" режиме"
-
-msgid "Begin container update single threaded sweep"
-msgstr "Начать однонитевую сплошную проверку обновлений контейнера"
-
-msgid "Begin container update sweep"
-msgstr "Начать сплошную проверку обновлений контейнера"
-
-msgid "Begin object update single threaded sweep"
-msgstr "Начать однонитевую сплошную проверку обновлений объекта"
-
-msgid "Begin object update sweep"
-msgstr "Начать сплошную проверку обновлений объекта"
-
msgid "Beginning replication run"
msgstr "Запуск репликации"
@@ -146,211 +65,27 @@ msgstr "Отсутствует доступ к файлу %s."
msgid "Can not load profile data from %s."
msgstr "Не удается загрузить данные профайла из %s."
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "Клиент не прочитал данные из proxy в %ss"
-
-msgid "Client disconnected without sending enough data"
-msgstr "Клиент отключен без отправки данных"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "Клиент отключился, не отправив последний фрагмент данных"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"Путь клиента %(client)s не соответствует пути в метаданных объекта %(meta)s"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"Опция internal_client_conf_path конфигурации не определена. Используется "
-"конфигурация по умолчанию. Используйте intenal-client.conf-sample для "
-"информации об опциях"
-
msgid "Connection refused"
msgstr "Соединение отклонено"
msgid "Connection timeout"
msgstr "Тайм-аут соединения"
-msgid "Container"
-msgstr "контейнер"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "Проверка контейнера в \"однократном\" режиме завершена: %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "Проход проверки контейнера завершен: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "Синхронизация контейнера в \"однократном\" режиме завершена: %.02fs"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Сплошная однонитевая проверка обновлений контейнера завершена: "
-"%(elapsed).02fs, успешно: %(success)s, сбоев: %(fail)s, без изменений: "
-"%(no_change)s"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "Сплошная проверка обновлений контейнера завершена: %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Сплошная проверка обновлений контейнера в %(path)s завершена: "
-"%(elapsed).02fs, успешно: %(success)s, сбоев: %(fail)s, без изменений: "
-"%(no_change)s"
-
#, python-format
msgid "Data download error: %s"
msgstr "Ошибка загрузки данных: %s"
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "Ошибка %(status)d %(body)s из сервера %(type)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "Ошибка %(status)d %(body)s, ответ от сервера объекта: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "Ошибка %(status)d. Ожидаемое значение от сервера объекта: 100-continue"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"Ошибка: обновление учетной записи не выполнено для %(ip)s:%(port)s/"
-"%(device)s (операция будет повторена позднее): Ответ: %(status)s %(reason)s"
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "Ошибка: тайм-аут чтения клиента (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"Ошибка. Обновление контейнера не выполнено (сохранение асинхронных "
-"обновлений будет выполнено позднее): %(status)d ответ от %(ip)s:%(port)s/"
-"%(dev)s"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "Ошибка: не удалось получить сведения об учетной записи %s"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "Ошибка: не удалось получить информацию о контейнере %s"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr "Ошибка: ошибка закрытия DiskFile %(data_file)s: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "Ошибка. Исключительная ситуация при отключении клиента"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr ""
-"ОШИБКА. Исключительная ситуация при передаче данных на серверы объектов %s"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "Ошибка: не удалось получить собственные IP-адреса?"
-msgid "ERROR Insufficient Storage"
-msgstr "Ошибка - недостаточно памяти"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr ""
-"Ошибка: контроль объекта %(obj)s не выполнен, объект помещен в карантин: "
-"%(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "Ошибка Pickle, %s помещается в карантин"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "Ошибка: удаленный накопитель не смонтирован %s"
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "Ошибка синхронизации %(db_file)s %(row)s"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "Ошибка синхронизации %s"
-
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "Ошибка при попытке контроля %s"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "Ошибка. Необрабатываемая исключительная ситуация в запросе"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "Ошибка: ошибка __call__ в %(method)s %(path)s "
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"Ошибка: обновление учетной записи не выполнено для %(ip)s:%(port)s/"
-"%(device)s (операция будет повторена позднее)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"Ошибка: обновление учетной записи не выполнено для %(ip)s:%(port)s/"
-"%(device)s (операция будет повторена позднее): "
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr ""
-"Ошибка выполнения асинхронной передачи ожидающего файла с непредвиденным "
-"именем %s"
-
msgid "ERROR auditing"
msgstr "ОШИБКА контроля"
#, python-format
-msgid "ERROR auditing: %s"
-msgstr "Ошибка контроля: %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"Ошибка. Обновление контейнера не выполнена с %(ip)s:%(port)s/%(dev)s "
-"(сохранение асинхронного обновления будет выполнено позднее)"
-
-#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "Ошибка чтения ответа HTTP из %s"
@@ -369,38 +104,6 @@ msgstr "Ошибка синхронизации %(file)s с узлом %(node)s"
msgid "ERROR trying to replicate"
msgstr "Ошибка при попытке репликации"
-#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "Ошибка при попытке очистки %s"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr ""
-"Ошибка с сервером %(type)s %(ip)s:%(port)s/%(device)s, возврат: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "Ошибка при загрузки скрытых объектов из %s: "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "Ошибка с удаленным сервером %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "Ошибка: не удалось получить пути к разделам накопителей: %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "Ошибка: не удалось получить доступ к %(path)s: %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "Ошибка: не удалось запустить процесс контроля: %s"
-
-msgid "Error hashing suffix"
-msgstr "Ошибка хэширования суффикса"
-
msgid "Error listing devices"
msgstr "Ошибка при выводе списка устройств"
@@ -423,22 +126,6 @@ msgstr "Ошибка чтения swift.conf"
msgid "Error retrieving recon data"
msgstr "Ошибка при получении данных recon"
-msgid "Error syncing handoff partition"
-msgstr "Ошибка при синхронизации раздела передачи управления"
-
-msgid "Error syncing partition"
-msgstr "Ошибка синхронизации раздела"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "Ошибка синхронизации с узлом %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-"Ошибка при попытке перекомпоновки стратегии %(path)s: номер#%(policy)d "
-"фрагмент#%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "Ошибка: произошла ошибка"
@@ -450,27 +137,12 @@ msgid "Error: unable to locate %s"
msgstr "Ошибка: не удалось найти %s"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "Исключительная ситуация в %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "Ожидаемое значение: 100-continue в %s"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "Следующая цепочка CNAME для %(given_domain)s в %(found_domain)s"
msgid "Found configs:"
msgstr "Обнаружены конфигурации:"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-"В режиме передачи управления не все операции завершены. Принудительное "
-"завершение текущего прохода репликации."
-
msgid "Host unreachable"
msgstr "Хост недоступен"
@@ -487,14 +159,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Недопустимая ожидающая запись %(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "Недопустимый ответ %(resp)s от %(full_path)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "Недопустимый ответ %(resp)s от %(ip)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -503,13 +167,6 @@ msgstr ""
"\"http\" или \"https\"."
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "Принудительное завершение долго выполняющегося rsync: %s"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "Обнаружена блокировка.. принудительное завершение работающих модулей."
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "Преобразовано %(given_domain)s в %(found_domain)s"
@@ -529,131 +186,17 @@ msgstr "Не найдено стратегии с индексом %s"
msgid "No realm key for %r"
msgstr "Отсутствует ключ области для %r"
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "Ограниченная ошибка узла %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "Недостаточное число подтверждений с серверов объектов (получено %d)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"Не найдено: %(sync_from)r => %(sync_to)r - объект "
-"%(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "Ничего не реконструировано за %s с."
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "Ничего не реплицировано за %s с."
-
-msgid "Object"
-msgstr "Объект"
-
-msgid "Object PUT"
-msgstr "Функция PUT объекта"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-"Функция PUT объекта возвратила 202 для 409: %(req_timestamp)s <= "
-"%(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "Функция PUT объекта возвратила 412, %(statuses)r"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Контроль объекта (%(type)s) в режиме \"%(mode)s\" завершен: %(elapsed).02fs. "
-"Всего в карантине: %(quars)d, всего ошибок: %(errors)d, всего файлов/с: "
-"%(frate).2f, всего байт/с: %(brate).2f, время контроля: %(audit).2f, "
-"скорость: %(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Проверка объекта (%(type)s). После %(start_time)s: локально: успешно - "
-"%(passes)d, в карантине - %(quars)d, файлов с ошибками %(errors)d в секунду: "
-"%(frate).2f , байт/с: %(brate).2f, общее время: %(total).2f, время контроля: "
-"%(audit).2f, скорость: %(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "Состояние контроля объекта: %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "Реконструкция объекта выполнена (однократно). (%.02f мин.)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "Реконструкция объекта выполнена. (%.02f мин.)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "Репликация объекта выполнена (однократно). (%.02f мин.)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "Репликация объекта выполнена. (%.02f мин.)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "Серверы объектов вернули несоответствующие etag: %s"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "Сплошная проверка обновлений объекта завершена: %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "В X-Container-Sync-To не разрешены параметры, запросы и фрагменты"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr ""
-"Время раздела: максимум: %(max).4fs, минимум: %(min).4fs, среднее: %(med).4fs"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Требуется путь в X-Container-Sync-To"
#, python-format
-msgid "Problem cleaning up %s"
-msgstr "Неполадка при очистке %s"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "Ошибка профилирования: %s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"%(hsh_path)s помещен в карантин в %(quar_path)s, так как не является "
-"каталогом"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"%(object_path)s помещен в карантин в %(quar_path)s, так как не является "
-"каталогом"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "БД %s помещена в карантин"
@@ -668,14 +211,6 @@ msgid "Removed %(remove)d dbs"
msgstr "Удалено баз данных: %(remove)d"
#, python-format
-msgid "Removing %s objects"
-msgstr "Удаление объектов %s"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "Удаление раздела: %s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "Удаление файла pid %(pid_file)s с ошибочным pid %(pid)d"
@@ -695,57 +230,9 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "Возвращено 497 из-за черного списка: %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"Возвращено 498 для %(meth)s в %(acc)s/%(cont)s/%(obj)s . Ratelimit "
-"(максимальная задержка): %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr ""
-"Обнаружено изменение кольца. Принудительное завершение текущего прохода "
-"реконструкции."
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr ""
-"Обнаружено кольцевое изменение. Принудительное завершение текущего прохода "
-"репликации."
-
-#, python-format
msgid "Running %s once"
msgstr "Однократное выполнение %s"
-msgid "Running object reconstructor in script mode."
-msgstr "Запуск утилиты реконструкции объектов в режиме скрипта."
-
-msgid "Running object replicator in script mode."
-msgstr "Запуск утилиты репликации объектов в режиме сценариев."
-
-#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"За %(time)s операций синхронизировано %(sync)s [удалено: %(delete)s, "
-"добавлено: %(put)s], пропущено: %(skip)s, ошибки: %(fail)s"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr ""
-"Выполнено проверок учетной записи: %(time)s, из них успешно: %(passed)s, с "
-"ошибками: %(failed)s "
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"Выполнено проверок контейнера: %(time)s, из них успешно: %(pass)s, с "
-"ошибками: %(fail)s "
-
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "%s будет пропущен, так как он не смонтирован"
@@ -754,55 +241,9 @@ msgstr "%s будет пропущен, так как он не смонтиро
msgid "Starting %s"
msgstr "Запуск %s"
-msgid "Starting object reconstruction pass."
-msgstr "Запуск прохода реконструкции объектов."
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "Запуск утилиты реконструкции объектов в режиме демона."
-
-msgid "Starting object replication pass."
-msgstr "Запуск прохода репликации объектов."
-
-msgid "Starting object replicator in daemon mode."
-msgstr "Запуск утилиты репликации объектов в режиме демона."
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "Успешное выполнение rsync для %(src)s на %(dst)s (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "Запрещен доступ к этому типу файла!"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"Общее число %(key)s для контейнера (%(total)s) не соответствует сумме "
-"%(key)s в стратегиях (%(sum)s)"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "Попытка выполнения метода %(method)s %(path)s"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "Попытка GET-запроса %(full_path)s"
-
-msgid "Trying to read during GET"
-msgstr "Попытка чтения во время операции GET"
-
-msgid "Trying to send to client"
-msgstr "Попытка отправки клиенту"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "Попытка синхронизации суффиксов с %s"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "Попытка записи в %s"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "Необрабатываемая исключительная ситуация"
@@ -823,21 +264,6 @@ msgstr ""
msgid "Unable to read config from %s"
msgstr "Не удалось прочитать конфигурацию из %s"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "Синхронизация %(sync_from)r => %(sync_to)r без прав доступа"
-
-msgid "Unhandled exception"
-msgstr "Необработанная исключительная ситуация"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "Отчет об обновлении для %(container)s %(dbfile)s не выполнен"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "Отчет об обновлении отправлен для %(container)s %(dbfile)s"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/locale/tr_TR/LC_MESSAGES/swift.po b/swift/locale/tr_TR/LC_MESSAGES/swift.po
index 625408b77..23648e2e8 100644
--- a/swift/locale/tr_TR/LC_MESSAGES/swift.po
+++ b/swift/locale/tr_TR/LC_MESSAGES/swift.po
@@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -29,41 +29,10 @@ msgstr ""
"kullanıcı çıktı"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - paralel, %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"%(checked)d sonek kontrol edildi - %(hashed).2f%% özetlenen, %(synced).2f%% "
-"eşzamanlanan"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) bölüm %(time).2fs (%(rate).2f/"
-"sn, %(remaining)s kalan) içinde çoğaltıldı"
-
-#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s başarı, %(failure)s başarısızlık"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s %(statuses)s için 503 döndürüyor"
-
-#, python-format
msgid "%s already started..."
msgstr "%s zaten başlatıldı..."
@@ -72,64 +41,14 @@ msgid "%s does not exist"
msgstr "%s mevcut değil"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s bağlı değil"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s bağlı değil olarak yanıt verdi"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s: Bağlantı eş tarafından sıfırlandı"
-msgid "Account"
-msgstr "Hesap"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "Hesap denetimi \"bir kere\" kipi tamamlandı: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "Hesap denetimi geçişi tamamlandı: %.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
msgstr "%(count)d db %(time).5f saniyede çoğaltılmaya çalışıldı (%(rate).5f/s)"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Kötü rsync dönüş kodu: %(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "Hesap denetimi \"bir kere\" kipini başlat"
-
-msgid "Begin account audit pass."
-msgstr "Hesap denetimi başlatma geçildi."
-
-msgid "Begin container audit \"once\" mode"
-msgstr "Kap denetimine \"bir kere\" kipinde başla"
-
-msgid "Begin container audit pass."
-msgstr "Kap denetimi geçişini başlat."
-
-msgid "Begin container sync \"once\" mode"
-msgstr "Kap eşzamanlamayı \"bir kere\" kipinde başlat"
-
-msgid "Begin container update single threaded sweep"
-msgstr "Kap güncelleme tek iş iplikli süpürmeye başla"
-
-msgid "Begin container update sweep"
-msgstr "Kap güncelleme süpürmesine başla"
-
-msgid "Begin object update single threaded sweep"
-msgstr "Nesne güncelleme tek iş iplikli süpürmeye başla"
-
-msgid "Begin object update sweep"
-msgstr "Nesne güncelleme süpürmesine başla"
-
msgid "Beginning replication run"
msgstr "Çoğaltmanın çalıştırılmasına başlanıyor"
@@ -144,198 +63,27 @@ msgstr "%s dosyasına erişilemiyor."
msgid "Can not load profile data from %s."
msgstr "%s'den profil verisi yüklenemiyor."
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "İstemci %ss içinde vekilden okumadı"
-
-msgid "Client disconnected without sending enough data"
-msgstr "İstemci yeterli veri göndermeden bağlantıyı kesti"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr ""
-"İstemci yolu %(client)s nesne metadata'sında kayıtlı yol ile eşleşmiyor "
-"%(meta)s"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"Yapılandırma seçeneği internal_client_conf_path belirtilmemiş. Varsayılan "
-"yapılandırma kullanılıyor, seçenekleri çin internal-client.conf-sample'a "
-"bakın"
-
msgid "Connection refused"
msgstr "Bağlantı reddedildi"
msgid "Connection timeout"
msgstr "Bağlantı zaman aşımına uğradı"
-msgid "Container"
-msgstr "Kap"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "Kap denetimi \"bir kere\" kipinde tamamlandı: %.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "Kap denetim geçişi tamamlandı: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "Kap eşzamanlama \"bir kere\" kipinde tamamlandı: %.02fs"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"Kap güncelleme tek iş iplikli süpürme tamamlandı: %(elapsed).02fs, "
-"%(success)s başarılı, %(fail)s başarısız, %(no_change)s değişiklik yok"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "Kap güncelleme süpürme tamamlandı: %.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"%(path)s in kap güncelleme süpürmesi tamamlandı: %(elapsed).02fs, "
-"%(success)s başarılı, %(fail)s başarısız, %(no_change)s değişiklik yok"
-
#, python-format
msgid "Data download error: %s"
msgstr "Veri indirme hatası: %s"
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "HATA %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "HATA %(status)d %(body)s %(type)s Sunucudan"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "HATA %(status)d %(body)s Nesne Sunucu re'den: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "HATA %(status)d Beklenen: 100-Nesne Sunucusundan devam et"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme başarısız (sonra tekrar "
-"denenecek): Yanıt %(status)s %(reason)s"
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "HATA İstemci okuma zaman aşımına uğradı (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"HATA Kap güncelleme başarısız (daha sonraki async güncellemesi için "
-"kaydediliyor): %(ip)s:%(port)s/%(dev)s den %(status)d yanıtı"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "HATA hesap bilgisi %s alınamadı"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "HATA %s kap bilgisi alınamadı"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr "HATA %(data_file)s disk dosyası kapatma başarısız: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "HATA İstisna istemci bağlantısının kesilmesine neden oluyor"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "Kendi IP'lerimi alırken HATA?"
-msgid "ERROR Insufficient Storage"
-msgstr "HATA Yetersiz Depolama"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr ""
-"HATA Nesne %(obj)s denetimde başarısız oldu ve karantinaya alındı: %(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "HATA Picke problemi, %s karantinaya alınıyor"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "HATA Uzak sürücü bağlı değil %s"
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "HATA %(db_file)s %(row)s eşzamanlamada"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "HATA %s Eşzamanlama"
-
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "HATA %s denetimi denemesinde"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "HATA İstekte ele alınmayan istisna var"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "ERROR __call__ hatası %(method)s %(path)s "
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-"HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme başarısız (sonra "
-"yeniden denenecek)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-"HATA hesap güncelleme başarısız %(ip)s:%(port)s/%(device)s (sonra tekrar "
-"denenecek):"
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "HATA beklenmeyen isimli async bekleyen dosya %s"
-
msgid "ERROR auditing"
msgstr "denetlemede HATA"
#, python-format
-msgid "ERROR auditing: %s"
-msgstr "HATA denetim: %s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"HATA kap güncelleme %(ip)s:%(port)s/%(dev)s ile başarısız oldu (sonraki "
-"async güncellemesi için kaydediliyor)"
-
-#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "%s'den HTTP yanıtı okumada HATA"
@@ -354,37 +102,6 @@ msgstr "%(node)s düğümlü %(file)s eş zamanlamada HATA"
msgid "ERROR trying to replicate"
msgstr "Çoğaltmaya çalışmada HATA"
-#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "%s temizlenmeye çalışırken HATA"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr "HATA %(type)s sunucusu %(ip)s:%(port)s/%(device)s re: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "HATA %s den baskılamaların yüklenmesinde: "
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "HATA uzuk sunucuda %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "HATA: Sürücü bölümlerine olan yollar alınamadı: %s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "HATA: %(path)s e erişilemiyor: %(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "HATA: Denetim çalıştırılamıyor: %s"
-
-msgid "Error hashing suffix"
-msgstr "Sonek özetini çıkarmada hata"
-
msgid "Error listing devices"
msgstr "Aygıtları listelemede hata"
@@ -407,21 +124,6 @@ msgstr "swift.conf okunurken hata"
msgid "Error retrieving recon data"
msgstr "Recon verisini almada hata"
-msgid "Error syncing handoff partition"
-msgstr "Devir bölümünü eş zamanlamada hata"
-
-msgid "Error syncing partition"
-msgstr "Bölüm eşzamanlamada hata"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "Düğüm ile eş zamanlamada hata: %s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-"Yeniden inşa denenirken hata %(path)s policy#%(policy)d frag#%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "Hata: Bir hata oluştu"
@@ -433,14 +135,6 @@ msgid "Error: unable to locate %s"
msgstr "Hata: %s bulunamıyor"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "%(ip)s:%(port)s/%(device)s ile istisna"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "Beklenen: 100-%s üzerinden devam et"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s den %(found_domain)s e CNAME zinciri takip ediliyor"
@@ -463,14 +157,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Geçersiz bekleyen girdi %(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "%(full_path)s den geçersiz yanıt %(resp)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "%(ip)s den geçersiz yanıt %(resp)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -479,13 +165,6 @@ msgstr ""
"olmalı."
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "Uzun süre çalışan rsync öldürülüyor: %s"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "Kilitleme algılandı.. canlı co-rutinler öldürülüyor."
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s %(found_domain)s eşleştirildi"
@@ -505,113 +184,17 @@ msgstr "%s indisine sahip ilke yok"
msgid "No realm key for %r"
msgstr "%r için realm anahtarı yok"
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "Düğüm hatası sınırlandı %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "Yeterince nesne sunucu ack'lenmedi (%d alındı)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"Bulunamadı %(sync_from)r => %(sync_to)r - nesne %(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "%s saniye boyunca hiçbir şey yeniden oluşturulmadı."
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "%s saniyedir hiçbir şey çoğaltılmadı."
-
-msgid "Object"
-msgstr "Nesne"
-
-msgid "Object PUT"
-msgstr "Nesne PUT"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr "Nesne PUT 409 için 202 döndürüyor: %(req_timestamp)s <= %(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "Nesne PUT 412 döndürüyor, %(statuses)r"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"Nesne denetimi (%(type)s) \"%(mode)s\" kipinde tamamlandı: %(elapsed).02fs. "
-"Toplam karantina: %(quars)d, Toplam hata: %(errors)d, Toplam dosya/sn: "
-"%(frate).2f, Toplam bayt/sn: %(brate).2f, Denetleme zamanı: %(audit).2f, "
-"Oran: %(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "Nesne denetim istatistikleri: %s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "Nesne yeniden oluşturma tamamlandı (bir kere). (%.02f dakika)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "Nesne yeniden oluşturma tamamlandı. (%.02f dakika)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "Nesne çoğaltma tamamlandı (bir kere). (%.02f dakika)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "Nesne çoğaltma tamamlandı. (%.02f dakika)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "Nesne sunucuları %s eşleşmeyen etag döndürdü"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "Nesne güncelleme süpürmesi tamamlandı: %.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "X-Container-Sync-To'da parametre, sorgular, ve parçalara izin verilmez"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr ""
-"Bölüm zamanları: azami %(max).4fs, asgari %(min).4fs, ortalama %(med).4fs"
-
msgid "Path required in X-Container-Sync-To"
msgstr "X-Container-Sync-To'de yol gerekli"
#, python-format
-msgid "Problem cleaning up %s"
-msgstr "%s temizliğinde problem"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "Profilleme Hatası: %s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr "%(hsh_path)s %(quar_path)s karantinasına alındı çünkü bir dizin değil"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-"Bir dizin olmadığından %(object_path)s %(quar_path)s e karantinaya alındı"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "DB %s karantinaya alınıyor"
@@ -625,14 +208,6 @@ msgid "Removed %(remove)d dbs"
msgstr "%(remove)d db silindi"
#, python-format
-msgid "Removing %s objects"
-msgstr "%s nesne kaldırılıyor"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "Bölüm kaldırılıyor: %s"
-
-#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "Geçersiz pid'e sahip pid dosyası %s siliniyor"
@@ -648,54 +223,9 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "Kara listeleme yüzünden 497 döndürülüyor: %s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"%(acc)s/%(cont)s/%(obj)s ye %(meth)s için 498 döndürülüyor. Oran sınırı "
-"(Azami uyku) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr ""
-"Zincir değişikliği algılandı. Mevcut yeniden oluşturma geçişi iptal ediliyor."
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr "Zincir değişimi algılandı. Mevcut çoğaltma geçişi iptal ediliyor."
-
-#, python-format
msgid "Running %s once"
msgstr "%s bir kere çalıştırılıyor"
-msgid "Running object reconstructor in script mode."
-msgstr "Nesne yeniden oluşturma betik kipinde çalıştırılıyor."
-
-msgid "Running object replicator in script mode."
-msgstr "Nesne çoğaltıcı betik kipinde çalıştırılıyor."
-
-#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"%(time)s den beri: %(sync)s eşzamanlandı [%(delete)s silme, %(put)s koyma], "
-"%(skip)s atlama, %(fail)s başarısız"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr ""
-"%(time)s den beri: Hesap denetimleri: %(passed)s denetimi geçti, %(failed)s "
-"denetimi geçemedi"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"%(time)s den beri: Kap denetimleri: %(pass)s denetimi geçti, %(fail)s "
-"denetimde başarısız"
-
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "Bağlı olmadığından %s atlanıyor"
@@ -704,55 +234,9 @@ msgstr "Bağlı olmadığından %s atlanıyor"
msgid "Starting %s"
msgstr "%s başlatılıyor"
-msgid "Starting object reconstruction pass."
-msgstr "Nesne yeniden oluşturma geçişi başlatılıyor."
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "Nesne yeniden oluşturma artalan işlemi kipinde başlatılıyor."
-
-msgid "Starting object replication pass."
-msgstr "Nesne çoğaltma geçişi başlatılıyor."
-
-msgid "Starting object replicator in daemon mode."
-msgstr "Nesne çoğaltıcı artalan işlemi kipinde başlatılıyor."
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "%(dst)s (%(time).03f) de %(src)s başarılı rsync'i"
-
msgid "The file type are forbidden to access!"
msgstr "Dosya türüne erişim yasaklanmış!"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"(%(total)s) kabı için %(key)s toplamı ilkeler arasındaki %(key)s toplamıyla "
-"eşleşmiyor (%(sum)s)"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "%(method)s %(path)s deneniyor"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "%(full_path)s GET deneniyor"
-
-msgid "Trying to read during GET"
-msgstr "GET sırasında okuma deneniyor"
-
-msgid "Trying to send to client"
-msgstr "İstemciye gönderilmeye çalışılıyor"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "%s e sahip son ekler eşzamanlanmaya çalışılıyor"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "%s'e yazmaya çalışılıyor"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "YAKALANMAYAN İSTİSNA"
@@ -773,21 +257,6 @@ msgstr ""
msgid "Unable to read config from %s"
msgstr "%s'den yapılandırma okunamıyor"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "%(sync_from)r => %(sync_to)r yetki al"
-
-msgid "Unhandled exception"
-msgstr "Yakalanmamış istisna"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "%(container)s %(dbfile)s için güncelleme raporu başarısız"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "%(container)s %(dbfile)s için güncelleme raporu gönderildi"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po
index 4a5c606c2..5f45d81af 100644
--- a/swift/locale/zh_CN/LC_MESSAGES/swift.po
+++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po
@@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -29,39 +29,10 @@ msgstr ""
"用户退出"
#, python-format
-msgid " - %s"
-msgstr "- %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr "-平行,%s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n"
-"\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-
-#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s成功,%(failure)s失败"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s 返回 503 在 %(statuses)s"
-
-#, python-format
msgid "%s already started..."
msgstr "%s已启动..."
@@ -70,64 +41,14 @@ msgid "%s does not exist"
msgstr "%s不存在"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s未挂载"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s 响应为未安装"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s:已由同级重置连接"
-msgid "Account"
-msgstr "账号"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "账号审计\"once\"模式完成: %.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "账号审计完成:%.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
msgstr "%(time).5f seconds (%(rate).5f/s)尝试复制%(count)d dbs"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Bad rsync返还代码:%(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "开始账号审计\"once\"模式"
-
-msgid "Begin account audit pass."
-msgstr "开始账号审计通过"
-
-msgid "Begin container audit \"once\" mode"
-msgstr "开始容器审计\"once\" 模式"
-
-msgid "Begin container audit pass."
-msgstr "开始通过容器审计"
-
-msgid "Begin container sync \"once\" mode"
-msgstr "开始容器同步\"once\"模式"
-
-msgid "Begin container update single threaded sweep"
-msgstr "开始容器更新单线程扫除"
-
-msgid "Begin container update sweep"
-msgstr "开始容器更新扫除"
-
-msgid "Begin object update single threaded sweep"
-msgstr "开始对象更新单线程扫除"
-
-msgid "Begin object update sweep"
-msgstr "开始对象更新扫除"
-
msgid "Beginning replication run"
msgstr "开始运行复制"
@@ -142,195 +63,27 @@ msgstr "无法访问文件%s"
msgid "Can not load profile data from %s."
msgstr "无法从%s下载分析数据"
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "客户尚未从代理处读取%ss"
-
-msgid "Client disconnected without sending enough data"
-msgstr "客户中断 尚未发送足够"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "客户机已断开连接而未发送最后一个数据块"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"未定义配置选项 internal_client_conf_path。正在使用缺省配置。请参阅 internal-"
-"client.conf-sample 以了解各个选项"
-
msgid "Connection refused"
msgstr "连接被拒绝"
msgid "Connection timeout"
msgstr "连接超时"
-msgid "Container"
-msgstr "容器"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "容器审计\"once\"模式完成:%.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "容器审计通过完成: %.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "容器同步\"once\"模式完成:%.02fs"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, "
-"%(no_change)s 无更改"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "容器更新扫除完成:%.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, "
-"%(fail)s 失败, %(no_change)s 无更改"
-
#, python-format
msgid "Data download error: %s"
msgstr "数据下载错误:%s"
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "错误 %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "发生 %(status)d 错误,需要 100 - 从对象服务器继续"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"出现错误 账号更新失败: %(ip)s:%(port)s/%(device)s (稍后尝试): 回应 "
-"%(status)s %(reason)s"
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "错误 客户读取超时(%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/"
-"%(dev)s"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "错误:无法获取账号信息%s"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "错误:无法获取容器%s信息"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "错误 异常导致客户端中断连接"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr "错误:向对象服务器 %s 传输数据时发生异常"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "错误 无法获得我方IPs?"
-msgid "ERROR Insufficient Storage"
-msgstr "错误 存储空间不足"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "错误 Pickle问题 隔离%s"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "错误 远程驱动器无法挂载 %s"
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "同步错误 %(db_file)s %(row)s"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "同步时发生错误%s"
-
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "错误 尝试开始审计%s"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "错误 未处理的异常发出请求"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "%(method)s %(path)s出现错误__call__ error"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr "错误 账号更新失败%(ip)s:%(port)s/%(device)s (稍后尝试):"
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "执行同步等待文件 文件名不可知%s"
-
msgid "ERROR auditing"
msgstr "错误 审计"
#, python-format
-msgid "ERROR auditing: %s"
-msgstr "审计错误:%s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)"
-
-#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "读取HTTP错误 响应来源%s"
@@ -349,37 +102,6 @@ msgstr "错误 同步 %(file)s 和 节点%(node)s"
msgid "ERROR trying to replicate"
msgstr "尝试复制时发生错误"
-#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "清理时出现错误%s"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "执行下载压缩时发生错误%s"
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "%s未挂载"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "出错,无法访问 %(path)s:%(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "错误:无法执行审计:%s"
-
-msgid "Error hashing suffix"
-msgstr "执行Hashing后缀时发生错误"
-
msgid "Error listing devices"
msgstr "设备列表时出现错误"
@@ -402,20 +124,6 @@ msgstr "读取swift.conf时出现错误"
msgid "Error retrieving recon data"
msgstr "检索recon data时出现错误"
-msgid "Error syncing handoff partition"
-msgstr "执行同步切换分区时发生错误"
-
-msgid "Error syncing partition"
-msgstr "执行同步分区时发生错误"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "执行同步时节点%s发生错误"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr "尝试重建 %(path)s 策略时出错:#%(policy)d frag#%(frag_index)s"
-
msgid "Error: An error occurred"
msgstr "错误:一个错误发生了"
@@ -427,25 +135,12 @@ msgid "Error: unable to locate %s"
msgstr "错误:无法查询到 %s"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "%(ip)s:%(port)s/%(device)s出现异常"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "已知:100-continue on %s"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s"
msgid "Found configs:"
msgstr "找到配置"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr "Handoffs 优先方式仍有 handoffs。正在中止当前复制过程。"
-
msgid "Host unreachable"
msgstr "无法连接到主机"
@@ -462,14 +157,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "不可用的等待输入%(file)s: %(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "从 %(full_path)s 返回了无效响应 %(resp)s"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "无效的回应%(resp)s来自%(ip)s"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -477,13 +164,6 @@ msgstr ""
"在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。"
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "终止long-running同步: %s"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "检测到lockup。终止正在执行的coros"
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "集合%(given_domain)s到%(found_domain)s"
@@ -503,123 +183,17 @@ msgstr "没有具备索引 %s 的策略"
msgid "No realm key for %r"
msgstr "%r权限key不存在"
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "没有足够的对象服务器应答(收到 %d)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "过去 %s 秒未重构任何对象。"
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "%s秒无复制"
-
-msgid "Object"
-msgstr "对象"
-
-msgid "Object PUT"
-msgstr "对象上传"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-"对象 PUT 正在返回 202(对于 409):%(req_timestamp)s 小于或等于 "
-"%(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "对象PUT返还 412,%(statuses)r "
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"对象审计 (%(type)s) \\\"%(mode)s\\\"模式完成: %(elapsed).02fs 隔离总数: "
-"%(quars)d, 错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: "
-"%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"对象审计 (%(type)s). 自 %(start_time)s 开始: 本地:%(passes)d 通"
-"过,%(quars)d 隔离,%(errors)d 错误,文件/秒:%(frate).2f,字节/秒:"
-"%(brate).2f,总时间:%(total).2f,审计时间:%(audit).2f,速率:"
-"%(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "对象审计统计:%s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "对象重构完成(一次)。(%.02f 分钟)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "对象重构完成。(%.02f 分钟)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "对象复制完成(一次)。(%.02f minutes)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "对象复制完成。(%.02f minutes)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "对象服务器返还%s不匹配etags"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "对象更新扫除完成:%.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-
msgid "Path required in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中路径是必须的"
#, python-format
-msgid "Problem cleaning up %s"
-msgstr "问题清除%s"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "分析代码时出现错误:%s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr "隔离%(object_path)s和%(quar_path)s因为非目录"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "隔离DB%s"
@@ -632,14 +206,6 @@ msgid "Removed %(remove)d dbs"
msgstr "删除%(remove)d dbs"
#, python-format
-msgid "Removing %s objects"
-msgstr "正在移除 %s 个对象"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "移除分区:%s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "移除 pid 文件 %(pid_file)s 失败,pid %(pid)d 不正确"
@@ -659,49 +225,9 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "返回497因为黑名单:%s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,流量控制(Max \"\n"
-"\"Sleep) %(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr "检测到环更改。正在中止当前重构过程。"
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr "Ring改变被检测到。退出现有的复制通过"
-
-#, python-format
msgid "Running %s once"
msgstr "运行%s一次"
-msgid "Running object reconstructor in script mode."
-msgstr "正以脚本方式运行对象重构程序。"
-
-msgid "Running object replicator in script mode."
-msgstr "在加密模式下执行对象复制"
-
-#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n"
-"\"%(skip)s 跳过, %(fail)s 失败"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr "自%(time)s开始:账号审计:%(passed)s 通过审计,%(failed)s 失败"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败"
-
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "挂载失败 跳过%s"
@@ -710,53 +236,9 @@ msgstr "挂载失败 跳过%s"
msgid "Starting %s"
msgstr "启动%s"
-msgid "Starting object reconstruction pass."
-msgstr "正在启动对象重构过程。"
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "正以守护程序方式启动对象重构程序。"
-
-msgid "Starting object replication pass."
-msgstr "开始通过对象复制"
-
-msgid "Starting object replicator in daemon mode."
-msgstr "在守护模式下开始对象复制"
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "该文件类型被禁止访问!"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)"
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "尝试执行%(method)s %(path)s"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "正尝试获取 %(full_path)s"
-
-msgid "Trying to read during GET"
-msgstr "执行GET时尝试读取"
-
-msgid "Trying to send to client"
-msgstr "尝试发送到客户端"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "正尝试使后缀与 %s 同步"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "尝试执行书写%s"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "未捕获的异常"
@@ -776,21 +258,6 @@ msgstr "无法查询到fallocate, posix_fallocate。保存为no-op"
msgid "Unable to read config from %s"
msgstr "无法从%s读取设置"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "未授权%(sync_from)r => %(sync_to)r"
-
-msgid "Unhandled exception"
-msgstr "未处理的异常"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "%(container)s %(dbfile)s更新报告失败"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "更新报告发至%(container)s %(dbfile)s"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/locale/zh_TW/LC_MESSAGES/swift.po b/swift/locale/zh_TW/LC_MESSAGES/swift.po
index 610c1759b..9563427a8 100644
--- a/swift/locale/zh_TW/LC_MESSAGES/swift.po
+++ b/swift/locale/zh_TW/LC_MESSAGES/swift.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-10-07 00:36+0000\n"
+"POT-Creation-Date: 2022-05-27 18:57+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -28,40 +28,10 @@ msgstr ""
"使用者退出"
#, python-format
-msgid " - %s"
-msgstr " - %s"
-
-#, python-format
-msgid " - parallel, %s"
-msgstr " - 平行,%s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr ""
-"已檢查 %(checked)d 個字尾 - %(hashed).2f%% 個已雜湊,%(synced).2f%% 個已同步"
-
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"已抄寫 %(replicated)d/%(total)d (%(percentage).2f%%) 個分割區(在 "
-"%(time).2fs 內,%(rate).2f/秒,剩餘 %(remaining)s)"
-
-#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s 個成功,%(failure)s 個失敗"
#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s 針對 %(statuses)s 正在傳回 503"
-
-#, python-format
msgid "%s already started..."
msgstr "%s 已啟動..."
@@ -70,64 +40,14 @@ msgid "%s does not exist"
msgstr "%s 不存在"
#, python-format
-msgid "%s is not mounted"
-msgstr "未裝載 %s"
-
-#, python-format
-msgid "%s responded as unmounted"
-msgstr "%s 已回應為未裝載"
-
-#, python-format
msgid "%s: Connection reset by peer"
msgstr "%s:已由對等項目重設連線"
-msgid "Account"
-msgstr "帳戶"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "帳戶審核「一次性」模式已完成:%.02fs"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "帳戶審核通過已完成:%.02fs"
-
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
msgstr "已嘗試在 %(time).5f 秒內抄寫 %(count)d 個資料庫 (%(rate).5f/s)"
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "不當的遠端同步回覆碼:%(ret)d <- %(args)s"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "開始帳戶審核「一次性」模式"
-
-msgid "Begin account audit pass."
-msgstr "開始帳戶審核通過。"
-
-msgid "Begin container audit \"once\" mode"
-msgstr "開始儲存器審核「一次性」模式"
-
-msgid "Begin container audit pass."
-msgstr "開始儲存器審核通過。"
-
-msgid "Begin container sync \"once\" mode"
-msgstr "開始儲存器同步「一次性」模式"
-
-msgid "Begin container update single threaded sweep"
-msgstr "開始儲存器更新單一執行緒清理"
-
-msgid "Begin container update sweep"
-msgstr "開始儲存器更新清理"
-
-msgid "Begin object update single threaded sweep"
-msgstr "開始物件更新單一執行緒清理"
-
-msgid "Begin object update sweep"
-msgstr "開始物件更新清理"
-
msgid "Beginning replication run"
msgstr "正在開始抄寫執行"
@@ -142,196 +62,27 @@ msgstr "無法存取檔案 %s。"
msgid "Can not load profile data from %s."
msgstr "無法從 %s 中載入設定檔資料。"
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "用戶端未在 %s 秒內從 Proxy 中讀取"
-
-msgid "Client disconnected without sending enough data"
-msgstr "用戶端已中斷連線,未傳送足夠的資料"
-
-msgid "Client disconnected without sending last chunk"
-msgstr "用戶端已中斷連線,未傳送最後一個片段"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr "用戶端路徑 %(client)s 不符合物件 meta 資料%(meta)s 中儲存的路徑"
-
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default "
-"configuration, See internal-client.conf-sample for options"
-msgstr ""
-"為定義配置選項 internal_client_conf_path。將使用預設配置,請參閱 internal-"
-"client.conf-sample 以取得選項"
-
msgid "Connection refused"
msgstr "連線遭拒"
msgid "Connection timeout"
msgstr "連線逾時"
-msgid "Container"
-msgstr "容器"
-
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr "儲存器審核「一次性」模式已完成:%.02fs"
-
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "儲存器審核通過已完成:%.02fs"
-
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "儲存器同步「一次性」模式已完成:%.02fs"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"儲存器更新單一執行緒清理已完成:%(elapsed).02fs,%(success)s 個成"
-"功,%(fail)s 個失敗,%(no_change)s 個無變更"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "儲存器更新清理已完成:%.02fs"
-
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"%(path)s 的儲存器更新清理已完成:%(elapsed).02fs,%(success)s 個成"
-"功,%(fail)s 個失敗,%(no_change)s 個無變更"
-
#, python-format
msgid "Data download error: %s"
msgstr "資料下載錯誤:%s"
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "錯誤:%(db_file)s:%(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "錯誤:%(status)d %(body)s 來自 %(type)s 伺服器"
-
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "錯誤:%(status)d %(body)s 來自物件伺服器 re:%(path)s"
-
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr "錯誤:%(status)d 預期:100 繼續自物件伺服器"
-
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-"錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將稍後重試):回應 "
-"%(status)s %(reason)s"
-
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "錯誤:用戶端讀取逾時 (%ss)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"錯誤:儲存器更新失敗(儲存以稍後進行非同步更新):%(status)d回應(來自 "
-"%(ip)s:%(port)s/%(dev)s)"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "錯誤:無法取得帳戶資訊 %s"
-
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "錯誤:無法取得儲存器資訊 %s"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr "錯誤:磁碟檔 %(data_file)s 關閉失敗:%(exc)s:%(stack)s"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "錯誤:異常狀況造成用戶端中斷連線"
-
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr "錯誤:將資料轉送至物件伺服器 %s 時發生異常狀況"
-
msgid "ERROR Failed to get my own IPs?"
msgstr "錯誤:無法取得我自己的 IP?"
-msgid "ERROR Insufficient Storage"
-msgstr "錯誤:儲存體不足"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr "錯誤:物件 %(obj)s 審核失敗,已隔離:%(err)s"
-
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "錯誤:挑選問題,正在隔離 %s"
-
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "錯誤:未裝載遠端磁碟機 %s"
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr "同步 %(db_file)s %(row)s 時發生錯誤"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "同步 %s 時發生錯誤"
-
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "嘗試審核 %s 時發生錯誤"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "錯誤:要求中有無法處理的異常狀況"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "錯誤:%(method)s %(path)s 發生呼叫錯誤"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將稍後重試)"
-
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將稍後重試):"
-
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "錯誤:具有非預期名稱 %s 的非同步擱置檔案"
-
msgid "ERROR auditing"
msgstr "審核時發生錯誤"
#, python-format
-msgid "ERROR auditing: %s"
-msgstr "審核時發生錯誤:%s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr ""
-"錯誤:%(ip)s:%(port)s/%(dev)s 的儲存器更新失敗(儲存以稍後進行非同步更新)"
-
-#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "從 %s 讀取 HTTP 回應時發生錯誤"
@@ -350,37 +101,6 @@ msgstr "同步 %(file)s 與節點 %(node)s 時發生錯誤"
msgid "ERROR trying to replicate"
msgstr "嘗試抄寫時發生錯誤"
-#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "嘗試清除 %s 時發生錯誤"
-
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr "%(type)s 伺服器發生錯誤:%(ip)s:%(port)s/%(device)s re: %(info)s"
-
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "從 %s 載入抑制時發生錯誤:"
-
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "遠端伺服器發生錯誤:%(ip)s:%(port)s/%(device)s"
-
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "錯誤:無法取得磁碟機分割區的路徑:%s"
-
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr "錯誤:無法存取 %(path)s:%(error)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "錯誤:無法執行審核:%s"
-
-msgid "Error hashing suffix"
-msgstr "混合字尾時發生錯誤"
-
msgid "Error listing devices"
msgstr "列出裝置時發生錯誤"
@@ -403,20 +123,6 @@ msgstr "讀取 swift.conf 時發生錯誤"
msgid "Error retrieving recon data"
msgstr "擷取 recon 資料時發生錯誤"
-msgid "Error syncing handoff partition"
-msgstr "同步遞交分割區時發生錯誤"
-
-msgid "Error syncing partition"
-msgstr "同步分割區時發生錯誤"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "與節點同步時發生錯誤:%s"
-
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr "嘗試重建 %(path)s 原則 #%(policy)d 分段 #%(frag_index)s 時發生錯誤"
-
msgid "Error: An error occurred"
msgstr "錯誤:發生錯誤"
@@ -428,25 +134,12 @@ msgid "Error: unable to locate %s"
msgstr "錯誤:找不到 %s"
#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "%(ip)s:%(port)s/%(device)s 發生異常狀況"
-
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "預期 100 - 在 %s 上繼續"
-
-#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "遵循 %(given_domain)s 到 %(found_domain)s 的 CNAME 鏈"
msgid "Found configs:"
msgstr "找到配置:"
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr "「遞交作業最先」模式仍有剩餘的遞交作業。正在中斷現行抄寫傳遞。"
-
msgid "Host unreachable"
msgstr "無法抵達主機"
@@ -463,14 +156,6 @@ msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "無效的擱置項目 %(file)s:%(entry)s"
#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr "來自 %(full_path)s 的回應 %(resp)s 無效"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "來自 %(ip)s 的回應 %(resp)s 無效"
-
-#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
@@ -478,13 +163,6 @@ msgstr ""
"X-Container-Sync-To 中的架構 %r 無效,必須是 \"//\"、\"http\" 或\"https\"。"
#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "正在結束長時間執行的遠端同步:%s"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "偵測到鎖定。正在結束即時 coro。"
-
-#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "已將 %(given_domain)s 對映至 %(found_domain)s"
@@ -504,122 +182,17 @@ msgstr "沒有具有索引 %s 的原則"
msgid "No realm key for %r"
msgstr "沒有 %r 的範圍金鑰"
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "節點錯誤限制 %(ip)s:%(port)s (%(device)s)"
-
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr "未確認足夠的物件伺服器(已取得 %d)"
-
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-"找不到 %(sync_from)r => %(sync_to)r - 物件%(obj_name)r"
-
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr "%s 秒未重新建構任何內容。"
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "未抄寫任何項目達 %s 秒。"
-
-msgid "Object"
-msgstr "物件"
-
-msgid "Object PUT"
-msgstr "物件 PUT"
-
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr "物件 PUT 針對 409 正在傳回 202:%(req_timestamp)s <= %(timestamps)r"
-
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "物件 PUT 正在傳回 412,%(statuses)r"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
-"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"物件審核 (%(type)s) \"%(mode)s\" 模式已完成:%(elapsed).02fs。已隔離總計:"
-"%(quars)d,錯誤總計:%(errors)d,檔案/秒總計:%(frate).2f,位元組/秒總計:"
-"%(brate).2f,審核時間:%(audit).2f,速率:%(audit_rate).2f"
-
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
-msgstr ""
-"物件審核 (%(type)s)。自 %(start_time)s 以來:本端:%(passes)d 個已通"
-"過,%(quars)d 個已隔離,%(errors)d 個錯誤,檔案/秒:%(frate).2f,位元組數/"
-"秒:%(brate).2f,時間總計:%(total).2f,審核時間:%(audit).2f,速率:"
-"%(audit_rate).2f"
-
-#, python-format
-msgid "Object audit stats: %s"
-msgstr "物件審核統計資料:%s"
-
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr "物件重新建構完成(一次性)。(%.02f 分鐘)"
-
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr "物件重新建構完成。(%.02f 分鐘)"
-
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr "物件抄寫完成(一次性)。(%.02f 分鐘)"
-
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr "物件抄寫完成。(%.02f 分鐘)"
-
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "物件伺服器已傳回 %s 個不符 etag"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "物件更新清理已完成:%.02fs"
-
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "X-Container-Sync-To 中不容許參數、查詢及片段"
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr "分割區時間:最大 %(max).4fs,最小 %(min).4fs,中間 %(med).4fs"
-
msgid "Path required in X-Container-Sync-To"
msgstr "X-Container-Sync-To 中需要路徑"
#, python-format
-msgid "Problem cleaning up %s"
-msgstr "清除 %s 時發生問題"
-
-#, python-format
msgid "Profiling Error: %s"
msgstr "側寫錯誤:%s"
#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr "已將 %(hsh_path)s 隔離至 %(quar_path)s,原因是它不是目錄"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr "已將 %(object_path)s 隔離至 %(quar_path)s,原因是它不是目錄"
-
-#, python-format
msgid "Quarantining DB %s"
msgstr "正在隔離資料庫 %s"
@@ -632,14 +205,6 @@ msgid "Removed %(remove)d dbs"
msgstr "已移除 %(remove)d 個資料庫"
#, python-format
-msgid "Removing %s objects"
-msgstr "正在移除 %s 物件"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "正在移除分割區:%s"
-
-#, python-format
msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
msgstr "正在移除具有錯誤 PID %(pid)d 的 PID 檔 %(pid_file)s"
@@ -659,51 +224,9 @@ msgid "Returning 497 because of blacklisting: %s"
msgstr "由於黑名單,正在傳回 497:%s"
#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"正在將 %(meth)s 的 498 傳回至 %(acc)s/%(cont)s/%(obj)s。Ratelimit(休眠上"
-"限)%(e)s"
-
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr "偵測到環變更。正在中斷現行重新建構傳遞。"
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr "偵測到環變更。正在中斷現行抄寫傳遞。"
-
-#, python-format
msgid "Running %s once"
msgstr "正在執行 %s 一次"
-msgid "Running object reconstructor in script mode."
-msgstr "正在 Script 模式下執行物件重新建構器。"
-
-msgid "Running object replicator in script mode."
-msgstr "正在 Script 模式下執行物件抄寫器"
-
-#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
-msgstr ""
-"自 %(time)s 以來:已同步 %(sync)s 個 [已刪除 [%(delete)s 個,已放置 %(put)s "
-"個],已跳過 %(skip)s 個,%(fail)s 個失敗"
-
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr ""
-"自 %(time)s 以來:帳戶審核:%(passed)s 個已通過審核,%(failed)s 個失敗審核"
-
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-"自 %(time)s 以來:儲存器審核:%(pass)s 個已通過審核,%(fail)s 個失敗審核"
-
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "正在跳過 %s,原因是它未裝載"
@@ -712,54 +235,9 @@ msgstr "正在跳過 %s,原因是它未裝載"
msgid "Starting %s"
msgstr "正在啟動 %s"
-msgid "Starting object reconstruction pass."
-msgstr "正在啟動物件重新建構傳遞。"
-
-msgid "Starting object reconstructor in daemon mode."
-msgstr "正在常駐程式模式下啟動物件重新建構器。"
-
-msgid "Starting object replication pass."
-msgstr "正在啟動物件抄寫傳遞。"
-
-msgid "Starting object replicator in daemon mode."
-msgstr "正在常駐程式模式下啟動物件抄寫器。"
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "已順利遠端同步 %(dst)s 中的 %(src)s (%(time).03f)"
-
msgid "The file type are forbidden to access!"
msgstr "此檔案類型禁止存取!"
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr ""
-"儲存器的 %(key)s 總計 (%(total)s) 不符合原則中的 %(key)s 總和 (%(sum)s) "
-
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr "正在嘗試 %(method)s %(path)s"
-
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr "正在嘗試對 %(full_path)s 執行 GET 動作"
-
-msgid "Trying to read during GET"
-msgstr "正在嘗試於 GET 期間讀取"
-
-msgid "Trying to send to client"
-msgstr "正在嘗試傳送至用戶端"
-
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr "正在嘗試與 %s 同步字尾"
-
-#, python-format
-msgid "Trying to write to %s"
-msgstr "正在嘗試寫入至 %s"
-
msgid "UNCAUGHT EXCEPTION"
msgstr "未捕捉的異常狀況"
@@ -779,21 +257,6 @@ msgstr "在 libc 中找不到 fallocate、posix_fallocate。保留為 no-op。"
msgid "Unable to read config from %s"
msgstr "無法從 %s 讀取配置"
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "未鑑別 %(sync_from)r => %(sync_to)r"
-
-msgid "Unhandled exception"
-msgstr "無法處理的異常狀況"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "%(container)s %(dbfile)s 的更新報告失敗"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "已傳送 %(container)s %(dbfile)s 的更新報告"
-
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
"termination for a production deployment."
diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py
index 588b087a0..0f3d7ef76 100644
--- a/swift/obj/diskfile.py
+++ b/swift/obj/diskfile.py
@@ -582,7 +582,7 @@ def object_audit_location_generator(devices, datadir, mount_check=True,
try:
suffixes = listdir(part_path)
except OSError as e:
- if e.errno != errno.ENOTDIR:
+ if e.errno not in (errno.ENOTDIR, errno.ENODATA):
raise
continue
for asuffix in suffixes:
@@ -590,7 +590,7 @@ def object_audit_location_generator(devices, datadir, mount_check=True,
try:
hashes = listdir(suff_path)
except OSError as e:
- if e.errno != errno.ENOTDIR:
+ if e.errno not in (errno.ENOTDIR, errno.ENODATA):
raise
continue
for hsh in hashes:
diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py
index 5294231d3..c3f35615a 100644
--- a/swift/obj/reconstructor.py
+++ b/swift/obj/reconstructor.py
@@ -169,7 +169,7 @@ class ObjectReconstructor(Daemon):
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
- self.bind_ip = conf.get('bind_ip', '0.0.0.0')
+ self.ring_ip = conf.get('ring_ip', conf.get('bind_ip', '0.0.0.0'))
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6200))
@@ -1275,7 +1275,7 @@ class ObjectReconstructor(Daemon):
return jobs
def get_policy2devices(self):
- ips = whataremyips(self.bind_ip)
+ ips = whataremyips(self.ring_ip)
policy2devices = {}
for policy in self.policies:
self.load_object_ring(policy)
diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py
index 6e84c05dc..45d8a3f85 100644
--- a/swift/obj/replicator.py
+++ b/swift/obj/replicator.py
@@ -134,7 +134,7 @@ class ObjectReplicator(Daemon):
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
- self.bind_ip = conf.get('bind_ip', '0.0.0.0')
+ self.ring_ip = conf.get('ring_ip', conf.get('bind_ip', '0.0.0.0'))
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6200))
@@ -182,6 +182,8 @@ class ObjectReplicator(Daemon):
self.default_headers = {
'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()}
+ self.log_rsync_transfers = config_true_value(
+ conf.get('log_rsync_transfers', True))
self.rsync_error_log_line_length = \
int(conf.get('rsync_error_log_line_length', 0))
self.handoffs_first = config_true_value(conf.get('handoffs_first',
@@ -211,17 +213,6 @@ class ObjectReplicator(Daemon):
self.replicator_workers,
os.getpid()))
- def _get_my_replication_ips(self):
- my_replication_ips = set()
- ips = whataremyips()
- for policy in self.policies:
- self.load_object_ring(policy)
- for local_dev in [dev for dev in policy.object_ring.devs
- if dev and dev['replication_ip'] in ips and
- dev['replication_port'] == self.port]:
- my_replication_ips.add(local_dev['replication_ip'])
- return list(my_replication_ips)
-
def _child_process_reaper(self):
"""
Consume processes from self._child_process_reaper_queue and wait() for
@@ -316,7 +307,7 @@ class ObjectReplicator(Daemon):
This is the device names, e.g. "sdq" or "d1234" or something, not
the full ring entries.
"""
- ips = whataremyips(self.bind_ip)
+ ips = whataremyips(self.ring_ip)
local_devices = set()
for policy in self.policies:
self.load_object_ring(policy)
@@ -413,8 +404,10 @@ class ObjectReplicator(Daemon):
continue
if result.startswith('cd+'):
continue
+ if result.startswith('<') and not self.log_rsync_transfers:
+ continue
if not ret_val:
- self.logger.info(result)
+ self.logger.debug(result)
else:
self.logger.error(result)
if ret_val:
@@ -426,7 +419,8 @@ class ObjectReplicator(Daemon):
log_method = self.logger.info if results else self.logger.debug
log_method(
"Successful rsync of %(src)s to %(dst)s (%(time).03f)",
- {'src': args[-2], 'dst': args[-1], 'time': total_time})
+ {'src': args[-2][:-3] + '...', 'dst': args[-1],
+ 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
@@ -614,8 +608,9 @@ class ObjectReplicator(Daemon):
try:
tpool.execute(shutil.rmtree, path)
except OSError as e:
- if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
- # If there was a race to create or delete, don't worry
+ if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.ENODATA):
+ # Don't worry if there was a race to create or delete,
+ # or some disk corruption that happened after the sync
raise
def delete_handoff_objs(self, job, delete_objs):
@@ -907,7 +902,7 @@ class ObjectReplicator(Daemon):
policies will be returned
"""
jobs = []
- ips = whataremyips(self.bind_ip)
+ ips = whataremyips(self.ring_ip)
for policy in self.policies:
# Skip replication if next_part_power is set. In this case
# every object is hard-linked twice, but the replicator can't
@@ -946,7 +941,6 @@ class ObjectReplicator(Daemon):
self.last_replication_count = 0
self.replication_cycle = (self.replication_cycle + 1) % 10
self.partition_times = []
- self.my_replication_ips = self._get_my_replication_ips()
self.all_devs_info = set()
self.handoffs_remaining = 0
diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py
index 54f70e862..b1e2ab072 100644
--- a/swift/obj/ssync_receiver.py
+++ b/swift/obj/ssync_receiver.py
@@ -116,17 +116,17 @@ class Receiver(object):
The general process inside an SSYNC request is:
- 1. Initialize the request: Basic request validation, mount check,
- acquire semaphore lock, etc..
+ 1. Initialize the request: Basic request validation, mount check,
+ acquire semaphore lock, etc..
- 2. Missing check: Sender sends the hashes and timestamps of
- the object information it can send, receiver sends back
- the hashes it wants (doesn't have or has an older
- timestamp).
+ 2. Missing check: Sender sends the hashes and timestamps of
+ the object information it can send, receiver sends back
+ the hashes it wants (doesn't have or has an older
+ timestamp).
- 3. Updates: Sender sends the object information requested.
+ 3. Updates: Sender sends the object information requested.
- 4. Close down: Release semaphore lock, etc.
+ 4. Close down: Release semaphore lock, etc.
"""
def __init__(self, app, request):
@@ -346,29 +346,29 @@ class Receiver(object):
The process is generally:
- 1. Sender sends `:MISSING_CHECK: START` and begins
- sending `hash timestamp` lines.
+ 1. Sender sends ``:MISSING_CHECK: START`` and begins
+ sending `hash timestamp` lines.
- 2. Receiver gets `:MISSING_CHECK: START` and begins
- reading the `hash timestamp` lines, collecting the
- hashes of those it desires.
+ 2. Receiver gets ``:MISSING_CHECK: START`` and begins
+ reading the `hash timestamp` lines, collecting the
+ hashes of those it desires.
- 3. Sender sends `:MISSING_CHECK: END`.
+ 3. Sender sends ``:MISSING_CHECK: END``.
- 4. Receiver gets `:MISSING_CHECK: END`, responds with
- `:MISSING_CHECK: START`, followed by the list of
- <wanted_hash> specifiers it collected as being wanted
- (one per line), `:MISSING_CHECK: END`, and flushes any
- buffers.
+ 4. Receiver gets ``:MISSING_CHECK: END``, responds with
+ ``:MISSING_CHECK: START``, followed by the list of
+ <wanted_hash> specifiers it collected as being wanted
+ (one per line), ``:MISSING_CHECK: END``, and flushes any
+ buffers.
- Each <wanted_hash> specifier has the form <hash>[ <parts>] where
- <parts> is a string containing characters 'd' and/or 'm'
- indicating that only data or meta part of object respectively is
- required to be sync'd.
+ Each <wanted_hash> specifier has the form <hash>[ <parts>] where
+ <parts> is a string containing characters 'd' and/or 'm'
+ indicating that only data or meta part of object respectively is
+ required to be sync'd.
- 5. Sender gets `:MISSING_CHECK: START` and reads the list
- of hashes desired by the receiver until reading
- `:MISSING_CHECK: END`.
+ 5. Sender gets ``:MISSING_CHECK: START`` and reads the list
+ of hashes desired by the receiver until reading
+ ``:MISSING_CHECK: END``.
The collection and then response is so the sender doesn't
have to read while it writes to ensure network buffers don't
@@ -413,18 +413,18 @@ class Receiver(object):
The process is generally:
- 1. Sender sends `:UPDATES: START` and begins sending the
- PUT and DELETE subrequests.
+ 1. Sender sends ``:UPDATES: START`` and begins sending the
+ PUT and DELETE subrequests.
- 2. Receiver gets `:UPDATES: START` and begins routing the
- subrequests to the object server.
+ 2. Receiver gets ``:UPDATES: START`` and begins routing the
+ subrequests to the object server.
- 3. Sender sends `:UPDATES: END`.
+ 3. Sender sends ``:UPDATES: END``.
- 4. Receiver gets `:UPDATES: END` and sends `:UPDATES:
- START` and `:UPDATES: END` (assuming no errors).
+ 4. Receiver gets ``:UPDATES: END`` and sends ``:UPDATES:
+ START`` and ``:UPDATES: END`` (assuming no errors).
- 5. Sender gets `:UPDATES: START` and `:UPDATES: END`.
+ 5. Sender gets ``:UPDATES: START`` and ``:UPDATES: END``.
If too many subrequests fail, as configured by
replication_failure_threshold and replication_failure_ratio,
diff --git a/swift/obj/updater.py b/swift/obj/updater.py
index 2ee7c35fa..1e327b891 100644
--- a/swift/obj/updater.py
+++ b/swift/obj/updater.py
@@ -86,15 +86,15 @@ class BucketizedUpdateSkippingLimiter(object):
The iterator increments stats as follows:
- * The `deferrals` stat is incremented for each update that is
- rate-limited. Note that a individual update is rate-limited at most
- once.
- * The `skips` stat is incremented for each rate-limited update that is
- not eventually yielded. This includes updates that are evicted from the
- deferral queue and all updates that remain in the deferral queue when
- ``drain_until`` time is reached and the iterator terminates.
- * The `drains` stat is incremented for each rate-limited update that is
- eventually yielded.
+ * The `deferrals` stat is incremented for each update that is
+ rate-limited. Note that a individual update is rate-limited at most
+ once.
+ * The `skips` stat is incremented for each rate-limited update that is
+ not eventually yielded. This includes updates that are evicted from the
+ deferral queue and all updates that remain in the deferral queue when
+ ``drain_until`` time is reached and the iterator terminates.
+ * The `drains` stat is incremented for each rate-limited update that is
+ eventually yielded.
Consequently, when this iterator terminates, the sum of `skips` and
`drains` is equal to the number of `deferrals`.
@@ -219,12 +219,12 @@ class SweepStats(object):
"""
Stats bucket for an update sweep
- A measure of the rate at which updates are being rate-limited is:
+ A measure of the rate at which updates are being rate-limited is::
deferrals / (deferrals + successes + failures - drains)
A measure of the rate at which updates are not being sent during a sweep
- is:
+ is::
skips / (skips + successes + failures)
"""
diff --git a/swift/proxy/controllers/info.py b/swift/proxy/controllers/info.py
index 05bc1a998..2435849c7 100644
--- a/swift/proxy/controllers/info.py
+++ b/swift/proxy/controllers/info.py
@@ -16,7 +16,8 @@
import json
from time import time
-from swift.common.utils import public, get_hmac, streq_const_time
+from swift.common.utils import public, streq_const_time
+from swift.common.digest import get_hmac
from swift.common.registry import get_swift_info
from swift.proxy.controllers.base import Controller, delay_denial
from swift.common.swob import HTTPOk, HTTPForbidden, HTTPUnauthorized
diff --git a/test/debug_logger.py b/test/debug_logger.py
index e1fc84e69..21b3cb7a6 100644
--- a/test/debug_logger.py
+++ b/test/debug_logger.py
@@ -134,6 +134,9 @@ class FakeLogger(logging.Logger, CaptureLog):
return [call[0][0] for call in self.log_dict['increment']]
def get_increment_counts(self):
+ # note: this method reports the sum of stats sent via the increment
+ # method only; consider using get_stats_counts instead to get the sum
+ # of stats sent via both the increment and update_stats methods
counts = {}
for metric in self.get_increments():
if metric not in counts:
@@ -144,6 +147,16 @@ class FakeLogger(logging.Logger, CaptureLog):
def get_update_stats(self):
return [call[0] for call in self.log_dict['update_stats']]
+ def get_stats_counts(self):
+ # return dict key->count for stats, aggregating calls to both the
+ # increment and update methods
+ counts = self.get_increment_counts()
+ for metric, step in self.get_update_stats():
+ if metric not in counts:
+ counts[metric] = 0
+ counts[metric] += step
+ return counts
+
def setFormatter(self, obj):
self.formatter = obj
diff --git a/test/functional/s3api/test_object.py b/test/functional/s3api/test_object.py
index 5c0d84753..bbca44068 100644
--- a/test/functional/s3api/test_object.py
+++ b/test/functional/s3api/test_object.py
@@ -789,15 +789,17 @@ class TestS3ApiObject(S3ApiBase):
elem = fromstring(body, 'ListBucketResult')
last_modified = elem.find('./Contents/LastModified').text
listing_datetime = S3Timestamp.from_s3xmlformat(last_modified)
- headers = \
- {'If-Unmodified-Since': formatdate(listing_datetime)}
+ # Make sure there's no fractions of a second
+ self.assertEqual(int(listing_datetime), float(listing_datetime))
+ header_datetime = formatdate(int(listing_datetime))
+
+ headers = {'If-Unmodified-Since': header_datetime}
status, headers, body = \
self.conn.make_request('GET', self.bucket, obj, headers=headers)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
- headers = \
- {'If-Modified-Since': formatdate(listing_datetime)}
+ headers = {'If-Modified-Since': header_datetime}
status, headers, body = \
self.conn.make_request('GET', self.bucket, obj, headers=headers)
self.assertEqual(status, 304)
diff --git a/test/functional/test_object_versioning.py b/test/functional/test_object_versioning.py
index a6ac9027d..32eb092c8 100644
--- a/test/functional/test_object_versioning.py
+++ b/test/functional/test_object_versioning.py
@@ -2578,7 +2578,7 @@ class TestHistoryLocationWithVersioning(TestVersionsLocationWithVersioning):
class TestVersioningAccountTempurl(TestObjectVersioningBase):
env = TestTempurlEnv
- digest_name = 'sha1'
+ digest_name = 'sha256'
def setUp(self):
self.env.versions_header_key = 'X-Versions-Enabled'
@@ -2689,7 +2689,7 @@ class TestVersioningAccountTempurl(TestObjectVersioningBase):
class TestVersioningContainerTempurl(TestObjectVersioningBase):
env = TestContainerTempurlEnv
- digest_name = 'sha1'
+ digest_name = 'sha256'
def setUp(self):
self.env.versions_header_key = 'X-Versions-Enabled'
diff --git a/test/functional/test_symlink.py b/test/functional/test_symlink.py
index 372d2b3a8..eee646576 100755
--- a/test/functional/test_symlink.py
+++ b/test/functional/test_symlink.py
@@ -2268,7 +2268,7 @@ class TestSymlinkComparison(TestSymlinkTargetObjectComparison):
class TestSymlinkAccountTempurl(Base):
env = TestTempurlEnv
- digest_name = 'sha1'
+ digest_name = 'sha256'
def setUp(self):
super(TestSymlinkAccountTempurl, self).setUp()
@@ -2364,7 +2364,7 @@ class TestSymlinkAccountTempurl(Base):
class TestSymlinkContainerTempurl(Base):
env = TestContainerTempurlEnv
- digest_name = 'sha1'
+ digest_name = 'sha256'
def setUp(self):
super(TestSymlinkContainerTempurl, self).setUp()
diff --git a/test/functional/test_tempurl.py b/test/functional/test_tempurl.py
index 54659bc31..6f442e479 100644
--- a/test/functional/test_tempurl.py
+++ b/test/functional/test_tempurl.py
@@ -90,7 +90,7 @@ class TestTempurlEnv(TestTempurlBaseEnv):
class TestTempurl(Base):
env = TestTempurlEnv
- digest_name = 'sha1'
+ digest_name = 'sha256'
def setUp(self):
super(TestTempurl, self).setUp()
@@ -102,6 +102,8 @@ class TestTempurl(Base):
"Expected tempurl_enabled to be True/False, got %r" %
(self.env.tempurl_enabled,))
+ # N.B. The default to 'sha1' in case the info has nothing is for
+ # extremely old clusters, which presumably use SHA1.
if self.digest_name not in cluster_info['tempurl'].get(
'allowed_digests', ['sha1']):
raise SkipTest("tempurl does not support %s signatures" %
@@ -461,7 +463,7 @@ class TestContainerTempurlEnv(BaseEnv):
class TestContainerTempurl(Base):
env = TestContainerTempurlEnv
- digest_name = 'sha1'
+ digest_name = 'sha256'
def setUp(self):
super(TestContainerTempurl, self).setUp()
@@ -736,7 +738,7 @@ class TestSloTempurlEnv(TestTempurlBaseEnv):
class TestSloTempurl(Base):
env = TestSloTempurlEnv
- digest_name = 'sha1'
+ digest_name = 'sha256'
def setUp(self):
super(TestSloTempurl, self).setUp()
@@ -838,7 +840,7 @@ class TestTempurlAlgorithms(Base):
else:
raise ValueError('Unrecognized encoding: %r' % encoding)
- def _do_test(self, digest, encoding, expect_failure=False):
+ def _do_test(self, digest, encoding):
expires = int(time()) + 86400
sig = self.get_sig(expires, digest, encoding)
@@ -850,24 +852,14 @@ class TestTempurlAlgorithms(Base):
parms = {'temp_url_sig': sig, 'temp_url_expires': str(expires)}
- if expect_failure:
- with self.assertRaises(ResponseError):
- self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
- self.assert_status([401])
-
- # ditto for HEADs
- with self.assertRaises(ResponseError):
- self.env.obj.info(parms=parms, cfg={'no_auth_token': True})
- self.assert_status([401])
- else:
- contents = self.env.obj.read(
- parms=parms,
- cfg={'no_auth_token': True})
- self.assertEqual(contents, b"obj contents")
+ contents = self.env.obj.read(
+ parms=parms,
+ cfg={'no_auth_token': True})
+ self.assertEqual(contents, b"obj contents")
- # GET tempurls also allow HEAD requests
- self.assertTrue(self.env.obj.info(
- parms=parms, cfg={'no_auth_token': True}))
+ # GET tempurls also allow HEAD requests
+ self.assertTrue(self.env.obj.info(
+ parms=parms, cfg={'no_auth_token': True}))
@requires_digest('sha1')
def test_sha1(self):
@@ -887,8 +879,7 @@ class TestTempurlAlgorithms(Base):
@requires_digest('sha512')
def test_sha512(self):
- # 128 chars seems awfully long for a signature -- let's require base64
- self._do_test('sha512', 'hex', expect_failure=True)
+ self._do_test('sha512', 'hex')
self._do_test('sha512', 'base64')
self._do_test('sha512', 'base64-no-padding')
self._do_test('sha512', 'url-safe-base64')
diff --git a/test/probe/test_sharder.py b/test/probe/test_sharder.py
index 69e923b43..68cace6ff 100644
--- a/test/probe/test_sharder.py
+++ b/test/probe/test_sharder.py
@@ -72,24 +72,18 @@ class BaseTestContainerSharding(ReplProbeTest):
def _maybe_skip_test(self):
try:
- cont_configs = [
+ self.cont_configs = [
utils.readconf(p, 'container-sharder')
for p in self.configs['container-sharder'].values()]
except ValueError:
raise SkipTest('No [container-sharder] section found in '
'container-server configs')
- skip_reasons = []
- auto_shard = all(config_true_value(c.get('auto_shard', False))
- for c in cont_configs)
- if not auto_shard:
- skip_reasons.append(
- 'auto_shard must be true in all container_sharder configs')
-
self.max_shard_size = max(
int(c.get('shard_container_threshold', '1000000'))
- for c in cont_configs)
+ for c in self.cont_configs)
+ skip_reasons = []
if not (MIN_SHARD_CONTAINER_THRESHOLD <= self.max_shard_size
<= MAX_SHARD_CONTAINER_THRESHOLD):
skip_reasons.append(
@@ -98,7 +92,7 @@ class BaseTestContainerSharding(ReplProbeTest):
MAX_SHARD_CONTAINER_THRESHOLD))
def skip_check(reason_list, option, required):
- values = {int(c.get(option, required)) for c in cont_configs}
+ values = {int(c.get(option, required)) for c in self.cont_configs}
if values != {required}:
reason_list.append('%s must be %s' % (option, required))
@@ -133,6 +127,7 @@ class BaseTestContainerSharding(ReplProbeTest):
self.sharders = Manager(['container-sharder'])
self.internal_client = self.make_internal_client()
self.memcache = MemcacheRing(['127.0.0.1:11211'])
+ self.container_replicators = Manager(['container-replicator'])
def init_brain(self, container_name):
self.container_to_shard = container_name
@@ -371,10 +366,18 @@ class BaseTestContainerSharding(ReplProbeTest):
else:
self.fail('No shard sysmeta found in %s' % headers)
- def assert_container_state(self, node, expected_state, num_shard_ranges):
+ def assert_container_state(self, node, expected_state, num_shard_ranges,
+ account=None, container=None, part=None,
+ override_deleted=False):
+ account = account or self.account
+ container = container or self.container_to_shard
+ part = part or self.brain.part
+ headers = {'X-Backend-Record-Type': 'shard'}
+ if override_deleted:
+ headers['x-backend-override-deleted'] = True
headers, shard_ranges = direct_client.direct_get_container(
- node, self.brain.part, self.account, self.container_to_shard,
- headers={'X-Backend-Record-Type': 'shard'})
+ node, part, account, container,
+ headers=headers)
self.assertEqual(num_shard_ranges, len(shard_ranges))
self.assertIn('X-Backend-Sharding-State', headers)
self.assertEqual(
@@ -383,7 +386,7 @@ class BaseTestContainerSharding(ReplProbeTest):
def assert_subprocess_success(self, cmd_args):
try:
- subprocess.check_output(cmd_args, stderr=subprocess.STDOUT)
+ return subprocess.check_output(cmd_args, stderr=subprocess.STDOUT)
except Exception as exc:
# why not 'except CalledProcessError'? because in my py3.6 tests
# the CalledProcessError wasn't caught by that! despite type(exc)
@@ -402,12 +405,17 @@ class BaseTestContainerSharding(ReplProbeTest):
shard_range.account, shard_range.container)
return part, [n['id'] + 1 for n in nodes]
- def run_sharders(self, shard_ranges):
+ def run_sharders(self, shard_ranges, exclude_partitions=None):
"""Run the sharder on partitions for given shard ranges."""
if not isinstance(shard_ranges, (list, tuple, set)):
shard_ranges = (shard_ranges,)
- partitions = ','.join(str(self.get_part_and_node_numbers(sr)[0])
- for sr in shard_ranges)
+ exclude_partitions = exclude_partitions or []
+ shard_parts = []
+ for sr in shard_ranges:
+ sr_part = self.get_part_and_node_numbers(sr)[0]
+ if sr_part not in exclude_partitions:
+ shard_parts.append(str(sr_part))
+ partitions = ','.join(shard_parts)
self.sharders.once(additional_args='--partitions=%s' % partitions)
def run_sharder_sequentially(self, shard_range=None):
@@ -425,7 +433,18 @@ class BaseTestContainerSharding(ReplProbeTest):
conf_index, custom_conf, **kwargs)
-class TestContainerShardingNonUTF8(BaseTestContainerSharding):
+class BaseAutoContainerSharding(BaseTestContainerSharding):
+
+ def _maybe_skip_test(self):
+ super(BaseAutoContainerSharding, self)._maybe_skip_test()
+ auto_shard = all(config_true_value(c.get('auto_shard', False))
+ for c in self.cont_configs)
+ if not auto_shard:
+ raise SkipTest('auto_shard must be true '
+ 'in all container_sharder configs')
+
+
+class TestContainerShardingNonUTF8(BaseAutoContainerSharding):
def test_sharding_listing(self):
# verify parameterised listing of a container during sharding
all_obj_names = self._make_object_names(4 * self.max_shard_size)
@@ -661,7 +680,7 @@ class TestContainerShardingUTF8(TestContainerShardingNonUTF8):
self.container_name = self.container_name.decode('utf8')
-class TestContainerShardingObjectVersioning(BaseTestContainerSharding):
+class TestContainerShardingObjectVersioning(BaseAutoContainerSharding):
def _maybe_skip_test(self):
super(TestContainerShardingObjectVersioning, self)._maybe_skip_test()
try:
@@ -870,7 +889,7 @@ class TestContainerShardingObjectVersioning(BaseTestContainerSharding):
self.assert_container_post_ok('sharded')
-class TestContainerSharding(BaseTestContainerSharding):
+class TestContainerSharding(BaseAutoContainerSharding):
def _test_sharded_listing(self, run_replicators=False):
obj_names = self._make_object_names(self.max_shard_size)
self.put_objects(obj_names)
@@ -1322,6 +1341,81 @@ class TestContainerSharding(BaseTestContainerSharding):
def test_sharded_listing_with_replicators(self):
self._test_sharded_listing(run_replicators=True)
+ def test_listing_under_populated_replica(self):
+ # the leader node and one other primary have all the objects and will
+ # cleave to 4 shard ranges, but the third primary only has 1 object in
+ # the final shard range
+ obj_names = self._make_object_names(2 * self.max_shard_size)
+ self.brain.servers.stop(number=self.brain.node_numbers[2])
+ self.put_objects(obj_names)
+ self.brain.servers.start(number=self.brain.node_numbers[2])
+ subset_obj_names = [obj_names[-1]]
+ self.put_objects(subset_obj_names)
+ self.brain.servers.stop(number=self.brain.node_numbers[2])
+
+ # sanity check: the first 2 primaries will list all objects
+ self.assert_container_listing(obj_names, req_hdrs={'x-newest': 'true'})
+
+ # Run sharder on the fully populated nodes, starting with the leader
+ client.post_container(self.url, self.admin_token, self.container_name,
+ headers={'X-Container-Sharding': 'on'})
+ self.sharders.once(number=self.brain.node_numbers[0],
+ additional_args='--partitions=%s' % self.brain.part)
+ self.sharders.once(number=self.brain.node_numbers[1],
+ additional_args='--partitions=%s' % self.brain.part)
+
+ # Verify that the first 2 primary nodes have cleaved the first batch of
+ # 2 shard ranges
+ broker = self.get_broker(self.brain.part, self.brain.nodes[0])
+ self.assertEqual('sharding', broker.get_db_state())
+ shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()]
+ self.assertLengthEqual(shard_ranges, 4)
+ self.assertEqual([ShardRange.CLEAVED, ShardRange.CLEAVED,
+ ShardRange.CREATED, ShardRange.CREATED],
+ [sr['state'] for sr in shard_ranges])
+ self.assertEqual(
+ {False},
+ set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]))
+
+ # listing is complete (from the fully populated primaries at least);
+ # the root serves the listing parts for the last 2 shard ranges which
+ # are not yet cleaved
+ self.assert_container_listing(obj_names, req_hdrs={'x-newest': 'true'})
+
+ # Run the sharder on the under-populated node to get it fully
+ # cleaved.
+ self.brain.servers.start(number=self.brain.node_numbers[2])
+ Manager(['container-replicator']).once(
+ number=self.brain.node_numbers[2])
+ self.sharders.once(number=self.brain.node_numbers[2],
+ additional_args='--partitions=%s' % self.brain.part)
+
+ broker = self.get_broker(self.brain.part, self.brain.nodes[2])
+ self.assertEqual('sharded', broker.get_db_state())
+ shard_ranges = [dict(sr) for sr in broker.get_shard_ranges()]
+ self.assertLengthEqual(shard_ranges, 4)
+ self.assertEqual([ShardRange.ACTIVE, ShardRange.ACTIVE,
+ ShardRange.ACTIVE, ShardRange.ACTIVE],
+ [sr['state'] for sr in shard_ranges])
+ self.assertEqual(
+ {True, False},
+ set([ctx.done() for ctx, _ in CleavingContext.load_all(broker)]))
+
+ # Get a consistent view of shard range states then check listing
+ Manager(['container-replicator']).once(
+ number=self.brain.node_numbers[2])
+ # oops, the listing is incomplete because the last 2 listing parts are
+ # now served by the under-populated shard ranges.
+ self.assert_container_listing(
+ obj_names[:self.max_shard_size] + subset_obj_names,
+ req_hdrs={'x-newest': 'true'})
+
+ # but once another replica has completed cleaving the listing is
+ # complete again
+ self.sharders.once(number=self.brain.node_numbers[1],
+ additional_args='--partitions=%s' % self.brain.part)
+ self.assert_container_listing(obj_names, req_hdrs={'x-newest': 'true'})
+
def test_async_pendings(self):
obj_names = self._make_object_names(self.max_shard_size * 2)
@@ -3048,7 +3142,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# path with most cleaving progress, and so shrink shard ranges 0.*.
db_file = self.get_db_file(self.brain.part, self.brain.nodes[0])
self.assert_subprocess_success(
- ['swift-manage-shard-ranges', db_file, 'repair', '--yes'])
+ ['swift-manage-shard-ranges', db_file, 'repair', '--yes',
+ '--min-shard-age', '0'])
# make sure all root replicas now sync their shard ranges
self.replicators.once()
@@ -3303,7 +3398,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# container
db_file = self.get_db_file(self.brain.part, self.brain.nodes[0])
self.assert_subprocess_success(
- ['swift-manage-shard-ranges', db_file, 'repair', '--yes'])
+ ['swift-manage-shard-ranges', db_file, 'repair', '--yes',
+ '--min-shard-age', '0'])
self.replicators.once()
self.sharders_once()
self.sharders_once()
@@ -3330,3 +3426,359 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
self.assertEqual(1, len(sharded_shard_ranges), root_shard_ranges)
self.assert_container_listing(expected_obj_names)
+
+ def test_manage_shard_ranges_repair_root_shrinking_gaps(self):
+ # provoke shrinking/shrunk gaps by prematurely repairing a transient
+ # overlap in root container; repair the gap.
+ # note: be careful not to add a container listing to this test which
+ # would get shard ranges into memcache
+ obj_names = self._make_object_names(4)
+ self.put_objects(obj_names)
+
+ client.post_container(self.url, self.admin_token, self.container_name,
+ headers={'X-Container-Sharding': 'on'})
+
+ # run replicators first time to get sync points set
+ self.container_replicators.once(
+ additional_args='--partitions=%s' % self.brain.part)
+
+ # shard root
+ root_0_db_file = self.get_db_file(self.brain.part, self.brain.nodes[0])
+ self.assert_subprocess_success([
+ 'swift-manage-shard-ranges',
+ root_0_db_file,
+ 'find_and_replace', '2', '--enable'])
+ self.container_replicators.once(
+ additional_args='--partitions=%s' % self.brain.part)
+ for node in self.brain.nodes:
+ self.assert_container_state(node, 'unsharded', 2)
+ self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
+ # get shards to update state from parent...
+ self.sharders_once()
+ for node in self.brain.nodes:
+ self.assert_container_state(node, 'sharded', 2)
+
+ # sanity check, all is well
+ msg = self.assert_subprocess_success([
+ 'swift-manage-shard-ranges', root_0_db_file, 'repair', '--gaps',
+ '--dry-run'])
+ self.assertIn(b'No repairs necessary.', msg)
+
+ # shard first shard into 2 sub-shards while root node 0 is disabled
+ self.stop_container_servers(node_numbers=slice(0, 1))
+ shard_ranges = self.get_container_shard_ranges()
+ shard_brokers = [self.get_shard_broker(shard_ranges[0], node_index=i)
+ for i in range(3)]
+ self.assert_subprocess_success([
+ 'swift-manage-shard-ranges',
+ shard_brokers[0].db_file,
+ 'find_and_replace', '1', '--enable'])
+ shard_part, shard_nodes = self.brain.ring.get_nodes(
+ shard_ranges[0].account, shard_ranges[0].container)
+ self.container_replicators.once(
+ additional_args='--partitions=%s' % shard_part)
+ # TODO: get this assertion working (node filtering wonky??)
+ # for node in [n for n in shard_nodes if n != self.brain.nodes[0]]:
+ # self.assert_container_state(
+ # node, 'unsharded', 2, account=shard_ranges[0].account,
+ # container=shard_ranges[0].container, part=shard_part)
+ self.sharders_once(additional_args='--partitions=%s' % shard_part)
+ # get shards to update state from parent...
+ self.sharders_once()
+ # TODO: get this assertion working (node filtering wonky??)
+ # for node in [n for n in shard_nodes if n != self.brain.nodes[0]]:
+ # self.assert_container_state(
+ # node, 'sharded', 2, account=shard_ranges[0].account,
+ # container=shard_ranges[0].container, part=shard_part)
+
+ # put an object into the second of the 2 sub-shards so that the shard
+ # will update the root next time the sharder is run; do this before
+ # restarting root node 0 so that the object update is definitely
+ # redirected to a sub-shard by root node 1 or 2.
+ new_obj_name = obj_names[0] + 'a'
+ self.put_objects([new_obj_name])
+
+ # restart root node 0
+ self.brain.servers.start(number=self.brain.node_numbers[0])
+ # node 0 DB doesn't know about the sub-shards
+ root_brokers = [self.get_broker(self.brain.part, node)
+ for node in self.brain.nodes]
+ broker = root_brokers[0]
+ self.assertEqual(
+ [(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[1]),
+ (ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
+ [(sr.state, sr.deleted, sr.lower, sr.upper)
+ for sr in broker.get_shard_ranges(include_deleted=True)])
+
+ for broker in root_brokers[1:]:
+ self.assertEqual(
+ [(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[0]),
+ (ShardRange.ACTIVE, False, obj_names[0], obj_names[1]),
+ (ShardRange.SHARDED, True, ShardRange.MIN, obj_names[1]),
+ (ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
+ [(sr.state, sr.deleted, sr.lower, sr.upper)
+ for sr in broker.get_shard_ranges(include_deleted=True)])
+
+ sub_shard = root_brokers[1].get_shard_ranges()[1]
+ self.assertEqual(obj_names[0], sub_shard.lower)
+ self.assertEqual(obj_names[1], sub_shard.upper)
+ sub_shard_part, nodes = self.get_part_and_node_numbers(sub_shard)
+ # we want the sub-shard to update root node 0 but not the sharded
+ # shard, but there is a small chance the two will be in same partition
+ # TODO: how can we work around this?
+ self.assertNotEqual(sub_shard_part, shard_part,
+ 'You were unlucky, try again')
+ self.sharders_once(additional_args='--partitions=%s' % sub_shard_part)
+
+ # now root node 0 has the original shards plus one of the sub-shards
+ # but all are active :(
+ self.assertEqual(
+ [(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[1]),
+ # note: overlap!
+ (ShardRange.ACTIVE, False, obj_names[0], obj_names[1]),
+ (ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
+ [(sr.state, sr.deleted, sr.lower, sr.upper)
+ for sr in root_brokers[0].get_shard_ranges(include_deleted=True)])
+
+ # we are allowed to fix the overlap...
+ msg = self.assert_subprocess_success(
+ ['swift-manage-shard-ranges', root_0_db_file, 'repair', '--yes',
+ '--min-shard-age', '0'])
+ self.assertIn(
+ b'Repairs necessary to remove overlapping shard ranges.', msg)
+
+ self.assertEqual(
+ [(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[1]),
+ (ShardRange.SHRINKING, False, obj_names[0], obj_names[1]),
+ (ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
+ [(sr.state, sr.deleted, sr.lower, sr.upper)
+ for sr in root_brokers[0].get_shard_ranges(include_deleted=True)])
+
+ self.sharders_once()
+ self.sharders_once()
+ self.container_replicators.once()
+
+ # boo :'( ... we made gap
+ for broker in root_brokers:
+ self.assertEqual(
+ [(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[0]),
+ (ShardRange.SHARDED, True, ShardRange.MIN, obj_names[1]),
+ (ShardRange.SHRUNK, True, obj_names[0], obj_names[1]),
+ (ShardRange.ACTIVE, False, obj_names[1], ShardRange.MAX)],
+ [(sr.state, sr.deleted, sr.lower, sr.upper)
+ for sr in broker.get_shard_ranges(include_deleted=True)])
+
+ msg = self.assert_subprocess_success([
+ 'swift-manage-shard-ranges', root_0_db_file, 'repair', '--gaps',
+ '--yes'])
+ self.assertIn(b'Repairs necessary to fill gaps.', msg)
+
+ self.sharders_once()
+ self.sharders_once()
+ self.container_replicators.once()
+
+ # yay! we fixed the gap (without creating an overlap)
+ for broker in root_brokers:
+ self.assertEqual(
+ [(ShardRange.ACTIVE, False, ShardRange.MIN, obj_names[0]),
+ (ShardRange.SHARDED, True, ShardRange.MIN, obj_names[1]),
+ (ShardRange.SHRUNK, True, obj_names[0], obj_names[1]),
+ (ShardRange.ACTIVE, False, obj_names[0], ShardRange.MAX)],
+ [(sr.state, sr.deleted, sr.lower, sr.upper)
+ for sr in broker.get_shard_ranges(include_deleted=True)])
+
+ msg = self.assert_subprocess_success([
+ 'swift-manage-shard-ranges', root_0_db_file, 'repair',
+ '--dry-run', '--min-shard-age', '0'])
+ self.assertIn(b'No repairs necessary.', msg)
+ msg = self.assert_subprocess_success([
+ 'swift-manage-shard-ranges', root_0_db_file, 'repair', '--gaps',
+ '--dry-run'])
+ self.assertIn(b'No repairs necessary.', msg)
+
+ self.assert_container_listing(
+ [obj_names[0], new_obj_name] + obj_names[1:])
+
+ def test_manage_shard_ranges_unsharded_deleted_root(self):
+ # verify that a deleted DB will still be sharded
+
+ # choose a node that will not be sharded initially
+ sharded_nodes = []
+ unsharded_node = None
+ for node in self.brain.nodes:
+ if self.brain.node_numbers[node['index']] \
+ in self.brain.handoff_numbers:
+ unsharded_node = node
+ else:
+ sharded_nodes.append(node)
+
+ # put some objects - not enough to trigger auto-sharding
+ obj_names = self._make_object_names(MIN_SHARD_CONTAINER_THRESHOLD - 1)
+ self.put_objects(obj_names)
+
+ # run replicators first time to get sync points set and commit updates
+ self.replicators.once()
+
+ # setup sharding...
+ self.assert_subprocess_success([
+ 'swift-manage-shard-ranges',
+ self.get_db_file(self.brain.part, sharded_nodes[0]),
+ 'find_and_replace', '2', '--enable', '--minimum-shard-size', '1'])
+
+ # Run container-replicator to replicate shard ranges
+ self.container_replicators.once()
+ self.assert_container_state(sharded_nodes[0], 'unsharded', 2)
+ self.assert_container_state(sharded_nodes[1], 'unsharded', 2)
+ self.assert_container_state(unsharded_node, 'unsharded', 2)
+
+ # Run container-sharder to shard the 2 primary replicas that did
+ # receive the object PUTs
+ for num in self.brain.primary_numbers:
+ self.sharders_once(
+ number=num,
+ additional_args='--partitions=%s' % self.brain.part)
+
+ # delete the objects - the proxy's will have cached container info with
+ # out-of-date db_state=unsharded, so updates go to the root DBs
+ self.delete_objects(obj_names)
+ # deal with DELETE's being misplaced in root db's...
+ for num in self.brain.primary_numbers:
+ self.sharders_once(
+ number=num,
+ additional_args='--partitions=%s' % self.brain.part)
+
+ self.assert_container_state(sharded_nodes[0], 'sharded', 2)
+ self.assert_container_state(sharded_nodes[1], 'sharded', 2)
+ shard_ranges = self.assert_container_state(
+ unsharded_node, 'unsharded', 2)
+
+ # get root stats updated - but avoid sharding the remaining root DB
+ self.run_sharders(shard_ranges, exclude_partitions=[self.brain.part])
+ self.assert_container_listing([])
+
+ # delete the empty container
+ client.delete_container(self.url, self.admin_token,
+ self.container_name)
+
+ # sanity check - unsharded DB is deleted
+ broker = self.get_broker(self.brain.part, unsharded_node,
+ self.account, self.container_name)
+ self.assertEqual(UNSHARDED, broker.get_db_state())
+ self.assertTrue(broker.is_deleted())
+ self.assertEqual(0, broker.get_info()['object_count'])
+ self.assertEqual(0, broker.get_shard_usage()['object_count'])
+
+ # now shard the final DB
+ for num in self.brain.handoff_numbers:
+ self.sharders_once(
+ number=num,
+ additional_args='--partitions=%s' % self.brain.part)
+
+ # all DBs should now be sharded and still deleted
+ for node in self.brain.nodes:
+ with annotate_failure(
+ 'node %s in %s'
+ % (node['index'], [n['index'] for n in self.brain.nodes])):
+ self.assert_container_state(node, 'sharded', 2,
+ override_deleted=True)
+ broker = self.get_broker(self.brain.part, node,
+ self.account, self.container_name)
+ self.assertEqual(SHARDED, broker.get_db_state())
+ self.assertEqual(0, broker.get_info()['object_count'])
+ self.assertEqual(0,
+ broker.get_shard_usage()['object_count'])
+ self.assertTrue(broker.is_deleted())
+
+ def test_manage_shard_ranges_unsharded_deleted_root_gets_undeleted(self):
+ # verify that an apparently deleted DB (no object rows in root db) will
+ # still be sharded and also become undeleted when objects are
+ # discovered in the shards
+
+ # choose a node that will not be sharded initially
+ sharded_nodes = []
+ unsharded_node = None
+ for node in self.brain.nodes:
+ if self.brain.node_numbers[node['index']] \
+ in self.brain.handoff_numbers:
+ unsharded_node = node
+ else:
+ sharded_nodes.append(node)
+
+ # put some objects, but only to 2 replicas - not enough to trigger
+ # auto-sharding
+ self.brain.stop_handoff_half()
+
+ obj_names = self._make_object_names(MIN_SHARD_CONTAINER_THRESHOLD - 1)
+ self.put_objects(obj_names)
+ # run replicators first time to get sync points set and commit puts
+ self.replicators.once()
+
+ # setup sharding...
+ self.assert_subprocess_success([
+ 'swift-manage-shard-ranges',
+ self.get_db_file(self.brain.part, sharded_nodes[0]),
+ 'find_and_replace', '2', '--enable', '--minimum-shard-size', '1'])
+
+ # Run container-replicator to replicate shard ranges - object rows will
+ # not be sync'd now there are shard ranges
+ for num in self.brain.primary_numbers:
+ self.container_replicators.once(number=num)
+ self.assert_container_state(sharded_nodes[0], 'unsharded', 2)
+ self.assert_container_state(sharded_nodes[1], 'unsharded', 2)
+
+ # revive the stopped node
+ self.brain.start_handoff_half()
+ self.assert_container_state(unsharded_node, 'unsharded', 0)
+
+ # delete the empty replica
+ direct_client.direct_delete_container(
+ unsharded_node, self.brain.part, self.account,
+ self.container_name)
+
+ # Run container-sharder to shard the 2 primary replicas that did
+ # receive the object PUTs
+ for num in self.brain.primary_numbers:
+ self.sharders_once(
+ number=num,
+ additional_args='--partitions=%s' % self.brain.part)
+
+ self.assert_container_state(sharded_nodes[0], 'sharded', 2)
+ self.assert_container_state(sharded_nodes[1], 'sharded', 2)
+ # the sharder syncs shard ranges ...
+ self.assert_container_state(unsharded_node, 'unsharded', 2,
+ override_deleted=True)
+
+ # sanity check - unsharded DB is empty and deleted
+ broker = self.get_broker(self.brain.part, unsharded_node,
+ self.account, self.container_name)
+ self.assertEqual(UNSHARDED, broker.get_db_state())
+ self.assertEqual(0, broker.get_info()['object_count'])
+ # the shard ranges do have object count but are in CREATED state so
+ # not reported in shard usage...
+ self.assertEqual(0, broker.get_shard_usage()['object_count'])
+ self.assertTrue(broker.is_deleted())
+
+ # now shard the final DB
+ for num in self.brain.handoff_numbers:
+ self.sharders_once(
+ number=num,
+ additional_args='--partitions=%s' % self.brain.part)
+ shard_ranges = self.assert_container_state(
+ unsharded_node, 'sharded', 2, override_deleted=True)
+
+ # and get roots updated and sync'd
+ self.container_replicators.once()
+ self.run_sharders(shard_ranges, exclude_partitions=[self.brain.part])
+
+ # all DBs should now be sharded and NOT deleted
+ for node in self.brain.nodes:
+ with annotate_failure(
+ 'node %s in %s'
+ % (node['index'], [n['index'] for n in self.brain.nodes])):
+ broker = self.get_broker(self.brain.part, node,
+ self.account, self.container_name)
+ self.assertEqual(SHARDED, broker.get_db_state())
+ self.assertEqual(3, broker.get_info()['object_count'])
+ self.assertEqual(3,
+ broker.get_shard_usage()['object_count'])
+ self.assertFalse(broker.is_deleted())
diff --git a/test/s3api/__init__.py b/test/s3api/__init__.py
index 9c722721b..ebfc0fe64 100644
--- a/test/s3api/__init__.py
+++ b/test/s3api/__init__.py
@@ -23,7 +23,7 @@ import boto3
from botocore.exceptions import ClientError
from six.moves import urllib
-from swift.common.utils import config_true_value
+from swift.common.utils import config_true_value, readconf
from test import get_config
@@ -40,6 +40,30 @@ class ConfigError(Exception):
'''Error test conf misconfigurations'''
+def load_aws_config(conf_file):
+ """
+ Read user credentials from an AWS CLI style credentials file and translate
+ to a swift test config. Currently only supports a single user.
+
+ :param conf_file: path to AWS credentials file
+ """
+ conf = readconf(conf_file, 'default')
+ global _CONFIG
+ _CONFIG = {
+ 'endpoint': 'https://s3.amazonaws.com',
+ 'region': 'us-east-1',
+ 'access_key1': conf.get('aws_access_key_id'),
+ 'secret_key1': conf.get('aws_secret_access_key'),
+ 'session_token1': conf.get('aws_session_token')
+ }
+
+
+aws_config_file = os.environ.get('SWIFT_TEST_AWS_CONFIG_FILE')
+if aws_config_file:
+ load_aws_config(aws_config_file)
+ print('Loaded test config from %s' % aws_config_file)
+
+
def get_opt_or_error(option):
global _CONFIG
if _CONFIG is None:
@@ -94,6 +118,7 @@ def get_s3_client(user=1, signature_version='s3v4', addressing_style='path'):
region = get_opt('region', 'us-east-1')
access_key = get_opt_or_error('access_key%d' % user)
secret_key = get_opt_or_error('secret_key%d' % user)
+ session_token = get_opt('session_token%d' % user)
ca_cert = get_opt('ca_cert')
if ca_cert is not None:
@@ -115,6 +140,7 @@ def get_s3_client(user=1, signature_version='s3v4', addressing_style='path'):
}),
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
+ aws_session_token=session_token
)
diff --git a/test/s3api/test_mpu.py b/test/s3api/test_mpu.py
new file mode 100644
index 000000000..e522ba915
--- /dev/null
+++ b/test/s3api/test_mpu.py
@@ -0,0 +1,100 @@
+# Copyright (c) 2021 Nvidia
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from test.s3api import BaseS3TestCase
+
+
+class TestMultiPartUploads(BaseS3TestCase):
+
+ maxDiff = None
+
+ def setUp(self):
+ self.client = self.get_s3_client(1)
+ self.bucket_name = self.create_name('test-mpu')
+ resp = self.client.create_bucket(Bucket=self.bucket_name)
+ self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
+
+ def tearDown(self):
+ self.clear_bucket(self.client, self.bucket_name)
+ super(TestMultiPartUploads, self).tearDown()
+
+ def test_basic_upload(self):
+ key_name = self.create_name('key')
+ create_mpu_resp = self.client.create_multipart_upload(
+ Bucket=self.bucket_name, Key=key_name)
+ self.assertEqual(200, create_mpu_resp[
+ 'ResponseMetadata']['HTTPStatusCode'])
+ upload_id = create_mpu_resp['UploadId']
+ parts = []
+ for i in range(1, 3):
+ body = ('%d' % i) * 5 * (2 ** 20)
+ part_resp = self.client.upload_part(
+ Body=body, Bucket=self.bucket_name, Key=key_name,
+ PartNumber=i, UploadId=upload_id)
+ self.assertEqual(200, part_resp[
+ 'ResponseMetadata']['HTTPStatusCode'])
+ parts.append({
+ 'ETag': part_resp['ETag'],
+ 'PartNumber': i,
+ })
+ list_parts_resp = self.client.list_parts(
+ Bucket=self.bucket_name, Key=key_name,
+ UploadId=upload_id,
+ )
+ self.assertEqual(200, list_parts_resp[
+ 'ResponseMetadata']['HTTPStatusCode'])
+ self.assertEqual(parts, [{k: p[k] for k in ('ETag', 'PartNumber')}
+ for p in list_parts_resp['Parts']])
+ complete_mpu_resp = self.client.complete_multipart_upload(
+ Bucket=self.bucket_name, Key=key_name,
+ MultipartUpload={
+ 'Parts': parts,
+ },
+ UploadId=upload_id,
+ )
+ self.assertEqual(200, complete_mpu_resp[
+ 'ResponseMetadata']['HTTPStatusCode'])
+
+ def test_create_list_abort_multipart_uploads(self):
+ key_name = self.create_name('key')
+ create_mpu_resp = self.client.create_multipart_upload(
+ Bucket=self.bucket_name, Key=key_name)
+ self.assertEqual(200, create_mpu_resp[
+ 'ResponseMetadata']['HTTPStatusCode'])
+ upload_id = create_mpu_resp['UploadId']
+
+ # our upload is in progress
+ list_mpu_resp = self.client.list_multipart_uploads(
+ Bucket=self.bucket_name)
+ self.assertEqual(200, list_mpu_resp[
+ 'ResponseMetadata']['HTTPStatusCode'])
+ found_uploads = list_mpu_resp.get('Uploads', [])
+ self.assertEqual(1, len(found_uploads), found_uploads)
+ self.assertEqual(upload_id, found_uploads[0]['UploadId'])
+
+ abort_resp = self.client.abort_multipart_upload(
+ Bucket=self.bucket_name,
+ Key=key_name,
+ UploadId=upload_id,
+ )
+ self.assertEqual(204, abort_resp[
+ 'ResponseMetadata']['HTTPStatusCode'])
+
+ # no more inprogress uploads
+ list_mpu_resp = self.client.list_multipart_uploads(
+ Bucket=self.bucket_name)
+ self.assertEqual(200, list_mpu_resp[
+ 'ResponseMetadata']['HTTPStatusCode'])
+ self.assertEqual([], list_mpu_resp.get('Uploads', []))
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
index 266763e0a..e57b57453 100644
--- a/test/unit/__init__.py
+++ b/test/unit/__init__.py
@@ -279,6 +279,7 @@ class FakeRing(Ring):
'zone': x % 3,
'region': x % 2,
'id': x,
+ 'weight': 1,
}
self.add_node(dev)
diff --git a/test/unit/cli/test_ipv6_output.stub b/test/unit/cli/test_ipv6_output.stub
index 423b8fae1..30be34861 100644
--- a/test/unit/cli/test_ipv6_output.stub
+++ b/test/unit/cli/test_ipv6_output.stub
@@ -3,8 +3,8 @@ __RINGFILE__, build version 4, id __BUILDER_ID__
The minimum number of hours before a partition can be reassigned is 1 (0:00:00 remaining)
The overload factor is 0.00% (0.000000)
Ring file __RINGFILE__.ring.gz not found, probably it hasn't been written yet
-Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta
- 0 0 0 [2001:db8:85a3::8a2e:370:7334]:6200 [2001:db8:85a3::8a2e:370:7334]:6200 sda1 100.00 0 -100.00 some meta data
- 1 1 1 127.0.0.1:66201 127.0.0.1:66201 sda2 100.00 0 -100.00
- 2 2 2 [2001:db8:85a3::8a2e:370:7336]:6202 127.0.10.127:7070 sdc3 100.00 0 -100.00
- 3 3 3 [2001:db8:85a3::8a2e:370:7337]:6203 [7001:db8:85a3::8a2e:370:7337]:11664 sdd4 100.00 0 -100.00
+Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta
+ 0 0 0 [2001:db8:85a3::8a2e:370:7334]:6200 [2001:db8:85a3::8a2e:370:7334]:6200 sda1 100.00 0 -100.00 some meta data
+ 1 1 1 127.0.0.1:66201 127.0.0.1:66201 sda2 100.00 0 -100.00
+ 2 2 2 [2001:db8:85a3::8a2e:370:7336]:6202 127.0.10.127:7070 sdc3 10000.00 0 -100.00
+ 3 3 3 [2001:db8:85a3::8a2e:370:7337]:6203 [7001:db8:85a3::8a2e:370:7337]:11664 sdd4 100.00 0 -100.00
diff --git a/test/unit/cli/test_manage_shard_ranges.py b/test/unit/cli/test_manage_shard_ranges.py
index 240c4184a..469b8da59 100644
--- a/test/unit/cli/test_manage_shard_ranges.py
+++ b/test/unit/cli/test_manage_shard_ranges.py
@@ -62,7 +62,8 @@ class TestManageShardRanges(unittest.TestCase):
]
self.overlap_shard_data_1 = [
- {'index': 0, 'lower': '', 'upper': 'obj10', 'object_count': 1},
+ {'index': 0, 'lower': '', 'upper': 'obj10',
+ 'object_count': 1},
{'index': 1, 'lower': 'obj10', 'upper': 'obj20',
'object_count': 1},
{'index': 2, 'lower': 'obj20', 'upper': 'obj30',
@@ -79,7 +80,8 @@ class TestManageShardRanges(unittest.TestCase):
'object_count': 1},
{'index': 8, 'lower': 'obj78', 'upper': 'obj88',
'object_count': 1},
- {'index': 9, 'lower': 'obj88', 'upper': '', 'object_count': 1},
+ {'index': 9, 'lower': 'obj88', 'upper': '',
+ 'object_count': 1},
]
self.overlap_shard_data_2 = [
@@ -1074,22 +1076,22 @@ class TestManageShardRanges(unittest.TestCase):
'Donor shard range(s) with total of 2018 rows:',
" '.shards_a",
" objects: 10, tombstones: 999, lower: 'obj29'",
- " state: active, upper: 'obj39'",
+ " state: active, deleted: 0 upper: 'obj39'",
" '.shards_a",
" objects: 10, tombstones: 999, lower: 'obj39'",
- " state: active, upper: 'obj49'",
+ " state: active, deleted: 0 upper: 'obj49'",
'can be compacted into acceptor shard range:',
" '.shards_a",
" objects: 100001, tombstones: 999, lower: 'obj49'",
- " state: active, upper: 'obj59'",
+ " state: active, deleted: 0 upper: 'obj59'",
'Donor shard range(s) with total of 1009 rows:',
" '.shards_a",
" objects: 10, tombstones: 999, lower: 'obj69'",
- " state: active, upper: 'obj79'",
+ " state: active, deleted: 0 upper: 'obj79'",
'can be compacted into acceptor shard range:',
" '.shards_a",
" objects: 100001, tombstones: 999, lower: 'obj79'",
- " state: active, upper: 'obj89'",
+ " state: active, deleted: 0 upper: 'obj89'",
'Total of 2 shard sequences identified for compaction.',
'Once applied to the broker these changes will result in '
'shard range compaction the next time the sharder runs.',
@@ -1634,7 +1636,7 @@ class TestManageShardRanges(unittest.TestCase):
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal([], updated_ranges)
- def test_repair_gaps_one_incomplete_sequence(self):
+ def test_repair_one_incomplete_sequence(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)):
@@ -1656,7 +1658,7 @@ class TestManageShardRanges(unittest.TestCase):
updated_ranges = broker.get_shard_ranges()
self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
- def test_repair_gaps_overlapping_incomplete_sequences(self):
+ def test_repair_overlapping_incomplete_sequences(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
with mock_timestamp_now(next(self.ts_iter)):
@@ -1685,6 +1687,374 @@ class TestManageShardRanges(unittest.TestCase):
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
+ def test_repair_gaps(self):
+ def do_test(missing_index, expander_index, missing_state=None):
+ broker = self._make_broker()
+ broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
+ for shard in self.shard_data:
+ shard['state'] = ShardRange.ACTIVE
+ with mock_timestamp_now(next(self.ts_iter)):
+ all_shard_ranges = make_shard_ranges(
+ broker, self.shard_data, '.shards_')
+ shard_ranges = list(all_shard_ranges)
+ if missing_state is None:
+ missing_range = shard_ranges.pop(missing_index)
+ exp_gap_contents = []
+ else:
+ missing_range = shard_ranges[missing_index]
+ missing_range.state = missing_state
+ exp_gap_contents = [
+ " '%s'" % missing_range.name, mock.ANY, mock.ANY]
+ broker.merge_shard_ranges(shard_ranges)
+ self.assertTrue(broker.is_root_container())
+ out = StringIO()
+ err = StringIO()
+ with mock_timestamp_now(next(self.ts_iter)) as ts_now, \
+ mock.patch('sys.stdout', out), \
+ mock.patch('sys.stderr', err):
+ ret = main([broker.db_file, 'repair', '--gaps', '--yes'])
+ self.assertEqual(0, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ expander = all_shard_ranges[expander_index]
+ if missing_index < expander_index:
+ expander.lower = missing_range.lower
+ else:
+ expander.upper = missing_range.upper
+ expander.state_timestamp = expander.timestamp
+ expander.meta_timestamp = expander.timestamp
+ expander.timestamp = ts_now
+ self.assertEqual(
+ ['Found 1 gaps:',
+ ' gap: %r - %r' % (missing_range.lower, missing_range.upper),
+ ' apparent gap contents:']
+ + exp_gap_contents +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % expander.name] +
+ [mock.ANY] * 2 +
+ ['',
+ 'Repairs necessary to fill gaps.',
+ 'The following expanded shard range(s) will be applied to '
+ 'the DB:',
+ " '%s'" % expander.name] +
+ [mock.ANY] * 2 +
+ ['',
+ 'It is recommended that no other concurrent changes are made '
+ 'to the ',
+ 'shard ranges while fixing gaps. If necessary, abort '
+ 'this change ',
+ 'and stop any auto-sharding processes before repeating '
+ 'this command.',
+ '',
+ 'Run container-replicator to replicate the changes to '
+ 'other nodes.',
+ 'Run container-sharder on all nodes to fill gaps.',
+ ''],
+ out_lines)
+ updated_ranges = broker.get_shard_ranges()
+ self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
+ os.remove(broker.db_file)
+
+ for i in range(len(self.shard_data) - 1):
+ do_test(i, i + 1)
+
+ do_test(len(self.shard_data) - 1, len(self.shard_data) - 2)
+
+ for i in range(len(self.shard_data) - 1):
+ do_test(i, i + 1, ShardRange.SHRINKING)
+
+ do_test(len(self.shard_data) - 1, len(self.shard_data) - 2,
+ ShardRange.SHRINKING)
+
+ def test_repair_gaps_multiple_missing(self):
+ def do_test(broker, max_expanding):
+ broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
+ states = [
+ ShardRange.ACTIVE,
+ ShardRange.SHRINKING,
+ ShardRange.SHRUNK,
+ ShardRange.ACTIVE,
+ ShardRange.SHRUNK,
+ ShardRange.SHRINKING,
+ ShardRange.ACTIVE,
+ ShardRange.SHRINKING,
+ ShardRange.SHRUNK,
+ ShardRange.SHARDED,
+ ]
+ for i, shard in enumerate(self.shard_data):
+ shard['state'] = states[i]
+ if states[i] in (ShardRange.SHRUNK, ShardRange.SHARDED):
+ shard['deleted'] = 1
+ with mock_timestamp_now(next(self.ts_iter)):
+ shard_ranges = make_shard_ranges(
+ broker, self.shard_data, '.shards_')
+ broker.merge_shard_ranges(shard_ranges)
+ self.assertTrue(broker.is_root_container())
+ orig_shard_ranges = broker.get_shard_ranges(include_deleted=True)
+ out = StringIO()
+ err = StringIO()
+ args = [broker.db_file, 'repair', '--gaps', '--yes']
+ if max_expanding is not None:
+ args.extend(['--max-expanding', str(max_expanding)])
+ with mock_timestamp_now(next(self.ts_iter)) as ts_now, \
+ mock.patch('sys.stdout', out), \
+ mock.patch('sys.stderr', err):
+ ret = main(args)
+ self.assertEqual(0, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ os.remove(broker.db_file)
+ return orig_shard_ranges, out_lines, ts_now
+
+ # max-expanding 1
+ broker = self._make_broker()
+ orig_shard_ranges, out_lines, ts_now = do_test(broker, 1)
+ orig_shard_ranges[3].timestamp = ts_now
+ orig_shard_ranges[3].lower = orig_shard_ranges[1].lower
+ self.assertEqual(
+ ['Found 3 gaps:',
+ ' gap: %r - %r' % (orig_shard_ranges[1].lower,
+ orig_shard_ranges[2].upper),
+ ' apparent gap contents:']
+ + [mock.ANY] * 6 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % orig_shard_ranges[3].name] +
+ [mock.ANY] * 2 +
+ [' gap: %r - %r' % (orig_shard_ranges[4].lower,
+ orig_shard_ranges[5].upper),
+ ' apparent gap contents:'] +
+ [mock.ANY] * 6 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % orig_shard_ranges[6].name] +
+ [mock.ANY] * 2 +
+ [' gap: %r - %r' % (orig_shard_ranges[7].lower,
+ orig_shard_ranges[9].upper),
+ ' apparent gap contents:'] +
+ [mock.ANY] * 9 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % orig_shard_ranges[6].name] +
+ [mock.ANY] * 2 +
+ ['',
+ 'Repairs necessary to fill gaps.',
+ 'The following expanded shard range(s) will be applied to the '
+ 'DB:',
+ " '%s'" % orig_shard_ranges[3].name] +
+ [mock.ANY] * 6 +
+ ['',
+ 'Run container-replicator to replicate the changes to '
+ 'other nodes.',
+ 'Run container-sharder on all nodes to fill gaps.',
+ ''],
+ out_lines)
+ updated_ranges = broker.get_shard_ranges(include_deleted=True)
+ self.assert_shard_ranges_equal(
+ sorted(orig_shard_ranges, key=lambda s: s.name),
+ sorted(updated_ranges, key=lambda s: s.name))
+
+ # max-expanding 2
+ broker = self._make_broker()
+ orig_shard_ranges, out_lines, ts_now = do_test(broker, 2)
+ orig_shard_ranges[3].timestamp = ts_now
+ orig_shard_ranges[3].lower = orig_shard_ranges[1].lower
+ orig_shard_ranges[6].timestamp = ts_now
+ orig_shard_ranges[6].lower = orig_shard_ranges[4].lower
+ self.assertEqual(
+ ['Found 3 gaps:',
+ ' gap: %r - %r' % (orig_shard_ranges[1].lower,
+ orig_shard_ranges[2].upper),
+ ' apparent gap contents:'] +
+ [mock.ANY] * 6 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % orig_shard_ranges[3].name] +
+ [mock.ANY] * 2 +
+ [' gap: %r - %r' % (orig_shard_ranges[4].lower,
+ orig_shard_ranges[5].upper),
+ ' apparent gap contents:'] +
+ [mock.ANY] * 6 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % orig_shard_ranges[6].name] +
+ [mock.ANY] * 2 +
+ [' gap: %r - %r' % (orig_shard_ranges[7].lower,
+ orig_shard_ranges[9].upper),
+ ' apparent gap contents:'] +
+ [mock.ANY] * 9 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % orig_shard_ranges[6].name] +
+ [mock.ANY] * 2 +
+ ['',
+ 'Repairs necessary to fill gaps.',
+ 'The following expanded shard range(s) will be applied to the '
+ 'DB:',
+ " '%s'" % orig_shard_ranges[3].name] +
+ [mock.ANY] * 2 +
+ [" '%s'" % orig_shard_ranges[6].name] +
+ [mock.ANY] * 6 +
+ ['',
+ 'Run container-replicator to replicate the changes to '
+ 'other nodes.',
+ 'Run container-sharder on all nodes to fill gaps.',
+ ''],
+ out_lines)
+ updated_ranges = broker.get_shard_ranges(include_deleted=True)
+ self.assert_shard_ranges_equal(
+ sorted(orig_shard_ranges, key=lambda s: s.name),
+ sorted(updated_ranges, key=lambda s: s.name))
+
+ # max-expanding unlimited
+ broker = self._make_broker()
+ orig_shard_ranges, out_lines, ts_now = do_test(broker, None)
+ orig_shard_ranges[3].timestamp = ts_now
+ orig_shard_ranges[3].lower = orig_shard_ranges[1].lower
+ orig_shard_ranges[6].timestamp = ts_now
+ orig_shard_ranges[6].lower = orig_shard_ranges[4].lower
+ orig_shard_ranges[6].upper = orig_shard_ranges[9].upper
+ self.assertEqual(
+ ['Found 3 gaps:',
+ ' gap: %r - %r' % (orig_shard_ranges[1].lower,
+ orig_shard_ranges[2].upper),
+ ' apparent gap contents:'] +
+ [mock.ANY] * 6 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % orig_shard_ranges[3].name] +
+ [mock.ANY] * 2 +
+ [' gap: %r - %r' % (orig_shard_ranges[4].lower,
+ orig_shard_ranges[5].upper),
+ ' apparent gap contents:'] +
+ [mock.ANY] * 6 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % orig_shard_ranges[6].name] +
+ [mock.ANY] * 2 +
+ [' gap: %r - %r' % (orig_shard_ranges[7].lower,
+ orig_shard_ranges[9].upper),
+ ' apparent gap contents:'] +
+ [mock.ANY] * 9 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % orig_shard_ranges[6].name] +
+ [mock.ANY] * 2 +
+ ['',
+ 'Repairs necessary to fill gaps.',
+ 'The following expanded shard range(s) will be applied to the '
+ 'DB:',
+ " '%s'" % orig_shard_ranges[3].name] +
+ [mock.ANY] * 2 +
+ [" '%s'" % orig_shard_ranges[6].name] +
+ [mock.ANY] * 6 +
+ ['',
+ 'Run container-replicator to replicate the changes to '
+ 'other nodes.',
+ 'Run container-sharder on all nodes to fill gaps.',
+ ''],
+ out_lines)
+ updated_ranges = broker.get_shard_ranges(include_deleted=True)
+ self.assert_shard_ranges_equal(
+ sorted(orig_shard_ranges, key=lambda s: s.name),
+ sorted(updated_ranges, key=lambda s: s.name))
+
+ def test_repair_gaps_complete_sequence(self):
+ broker = self._make_broker()
+ broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
+ for shard in self.shard_data:
+ shard['state'] = ShardRange.ACTIVE
+ with mock_timestamp_now(next(self.ts_iter)):
+ shard_ranges = make_shard_ranges(
+ broker, self.shard_data, '.shards_')
+ broker.merge_shard_ranges(shard_ranges)
+ self.assertTrue(broker.is_root_container())
+ out = StringIO()
+ err = StringIO()
+ with mock_timestamp_now(next(self.ts_iter)), \
+ mock.patch('sys.stdout', out), \
+ mock.patch('sys.stderr', err):
+ ret = main([broker.db_file, 'repair', '--gaps', '--yes'])
+ self.assertEqual(0, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ self.assertEqual(
+ ['Found one complete sequence of %d shard ranges with no gaps.'
+ % len(self.shard_data),
+ 'No repairs necessary.'], out_lines[:2])
+ updated_ranges = broker.get_shard_ranges()
+ self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
+
+ def test_repair_gaps_with_overlap(self):
+ # verify that overlaps don't look like gaps
+ broker = self._make_broker()
+ broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
+ for shard in self.shard_data:
+ shard['state'] = ShardRange.ACTIVE
+ with mock_timestamp_now(next(self.ts_iter)):
+ shard_ranges = make_shard_ranges(
+ broker, self.shard_data, '.shards_')
+ # create a gap
+ shard_ranges[3].state = ShardRange.SHRINKING
+ # create an overlap
+ shard_ranges[5].lower = 'obj45'
+ self.assertLess(shard_ranges[5].lower, shard_ranges[4].upper)
+ broker.merge_shard_ranges(shard_ranges)
+ orig_shard_ranges = broker.get_shard_ranges()
+ self.assertTrue(broker.is_root_container())
+ out = StringIO()
+ err = StringIO()
+ with mock_timestamp_now(next(self.ts_iter)) as ts_now, \
+ mock.patch('sys.stdout', out), \
+ mock.patch('sys.stderr', err):
+ ret = main([broker.db_file, 'repair', '--gaps', '--yes'])
+ self.assertEqual(0, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ self.assertEqual(
+ ['Found 1 gaps:',
+ ' gap: %r - %r' % (shard_ranges[3].lower,
+ shard_ranges[3].upper),
+ ' apparent gap contents:'] +
+ [mock.ANY] * 3 +
+ [' gap can be fixed by expanding neighbor range:',
+ " '%s'" % shard_ranges[4].name] +
+ [mock.ANY] * 2 +
+ ['',
+ 'Repairs necessary to fill gaps.',
+ 'The following expanded shard range(s) will be applied to the '
+ 'DB:',
+ " '%s'" % shard_ranges[4].name] +
+ [mock.ANY] * 6 +
+ ['',
+ 'Run container-replicator to replicate the changes to '
+ 'other nodes.',
+ 'Run container-sharder on all nodes to fill gaps.',
+ ''],
+ out_lines)
+ orig_shard_ranges[4].lower = shard_ranges[3].lower
+ orig_shard_ranges[4].timestamp = ts_now
+ updated_ranges = broker.get_shard_ranges()
+ self.assert_shard_ranges_equal(orig_shard_ranges, updated_ranges)
+
+ def test_repair_gaps_not_root(self):
+ broker = self._make_broker()
+ shard_ranges = make_shard_ranges(broker, self.shard_data, '.shards_')
+ broker.merge_shard_ranges(shard_ranges)
+ # make broker appear to not be a root container
+ out = StringIO()
+ err = StringIO()
+ broker.set_sharding_sysmeta('Quoted-Root', 'not_a/c')
+ self.assertFalse(broker.is_root_container())
+ with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
+ ret = main([broker.db_file, 'repair', '--gaps'])
+ self.assertEqual(1, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ self.assertEqual(
+ ['WARNING: Shard containers cannot be repaired.',
+ 'This command should be used on a root container.'],
+ out_lines[:2]
+ )
+ updated_ranges = broker.get_shard_ranges()
+ self.assert_shard_ranges_equal(shard_ranges, updated_ranges)
+
def test_repair_not_needed(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
@@ -1767,7 +2137,9 @@ class TestManageShardRanges(unittest.TestCase):
mock_timestamp_now(ts_now), \
mock.patch('swift.cli.manage_shard_ranges.input',
return_value=user_input):
- ret = main([broker.db_file, 'repair'] + options)
+ ret = main(
+ [broker.db_file, 'repair', '--min-shard-age', '0'] +
+ options)
self.assertEqual(exit_code, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
@@ -1815,6 +2187,172 @@ class TestManageShardRanges(unittest.TestCase):
key=ShardRange.sort_key)
self.assert_shard_ranges_equal(expected, updated_ranges)
+ def test_repair_younger_overlapping_donor_shards(self):
+ # test shard range repair on the normal acceptor ranges and young
+ # overlapping shard ranges which are younger than '--min-shard-age',
+ # expect them not to be repaired.
+ broker = self._make_broker()
+ broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
+ ts_now = next(self.ts_iter)
+ with mock_timestamp_now(Timestamp(float(ts_now) - 61)):
+ acceptor_ranges = make_shard_ranges(
+ broker, self.shard_data, '.shards_')
+ with mock_timestamp_now(ts_now):
+ overlap_donor_ranges = make_shard_ranges(
+ broker, self.overlap_shard_data_2, '.shards_')
+ broker.merge_shard_ranges(acceptor_ranges + overlap_donor_ranges)
+ self.assertTrue(broker.is_root_container())
+ out = StringIO()
+ err = StringIO()
+ with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err):
+ ret = main(
+ [broker.db_file, 'repair', '--min-shard-age', '60', '-y'])
+ self.assertEqual(0, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ self.assertEqual(
+ ['2 overlapping donor shards ignored due to minimum age limit'],
+ out_lines[:1])
+ updated_ranges = broker.get_shard_ranges()
+ expected = sorted(
+ acceptor_ranges + overlap_donor_ranges,
+ key=ShardRange.sort_key)
+ self.assert_shard_ranges_equal(expected, updated_ranges)
+
+ def test_repair_younger_acceptor_with_overlapping_donor_shards(self):
+ # test shard range repair on the overlapping normal donor ranges and
+ # young acceptor shard ranges who are younger than '--min-shard-age',
+ # expect no overlapping ranges to be repaired.
+ broker = self._make_broker()
+ broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
+ ts_now = next(self.ts_iter)
+ with mock_timestamp_now(Timestamp(float(ts_now) + 3601)):
+ acceptor_ranges = make_shard_ranges(
+ broker, self.shard_data, '.shards_')
+ with mock_timestamp_now(ts_now):
+ overlap_donor_ranges = make_shard_ranges(
+ broker, self.overlap_shard_data_2, '.shards_')
+ broker.merge_shard_ranges(acceptor_ranges + overlap_donor_ranges)
+ self.assertTrue(broker.is_root_container())
+ out = StringIO()
+ err = StringIO()
+ with mock.patch('sys.stdout', out), \
+ mock.patch('sys.stderr', err), \
+ mock_timestamp_now(Timestamp(float(ts_now) + 3601 + 59)):
+ ret = main(
+ [broker.db_file, 'repair', '--min-shard-age', '60', '-y'])
+ self.assertEqual(0, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ self.assertEqual(
+ ['2 donor shards ignored due to existence of overlapping young'
+ ' acceptors'], out_lines[:1])
+ updated_ranges = broker.get_shard_ranges()
+ expected = sorted(
+ acceptor_ranges + overlap_donor_ranges,
+ key=ShardRange.sort_key)
+ self.assert_shard_ranges_equal(expected, updated_ranges)
+
+ def test_repair_older_overlapping_donor_and_acceptor_shards(self):
+ # test shard range repair on the overlapping donor and acceptor shard
+ # ranges which all are older than '--min-shard-age', expect them to be
+ # repaired.
+ broker = self._make_broker()
+ broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
+ ts_now = next(self.ts_iter)
+ with mock_timestamp_now(ts_now):
+ acceptor_ranges = make_shard_ranges(
+ broker, self.shard_data, '.shards_')
+ with mock_timestamp_now(Timestamp(float(ts_now) + 1800)):
+ overlap_donor_ranges = make_shard_ranges(
+ broker, self.overlap_shard_data_2, '.shards_')
+ broker.merge_shard_ranges(acceptor_ranges + overlap_donor_ranges)
+ self.assertTrue(broker.is_root_container())
+ out = StringIO()
+ err = StringIO()
+ ts_1hr_after = Timestamp(float(ts_now) + 3601)
+ with mock.patch('sys.stdout', out), \
+ mock.patch('sys.stderr', err), \
+ mock_timestamp_now(ts_1hr_after):
+ ret = main(
+ [broker.db_file, 'repair', '--min-shard-age', '60', '-y'])
+ self.assertEqual(0, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ self.assertEqual(
+ ['Repairs necessary to remove overlapping shard ranges.'],
+ out_lines[:1])
+ updated_ranges = broker.get_shard_ranges()
+ for sr in overlap_donor_ranges:
+ sr.update_state(ShardRange.SHRINKING, ts_1hr_after)
+ sr.epoch = ts_1hr_after
+ expected = sorted(
+ acceptor_ranges + overlap_donor_ranges,
+ key=ShardRange.sort_key)
+ self.assert_shard_ranges_equal(expected, updated_ranges)
+
+ def test_repair_overlapping_donor_and_acceptor_shards_default(self):
+ # test shard range repair on the overlapping donor and acceptor shard
+ # ranges wth default '--min-shard-age' value.
+ broker = self._make_broker()
+ broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
+ ts_now = next(self.ts_iter)
+ with mock_timestamp_now(ts_now):
+ acceptor_ranges = make_shard_ranges(
+ broker, self.shard_data, '.shards_')
+ with mock_timestamp_now(Timestamp(int(ts_now) + 1)):
+ overlap_donor_ranges = make_shard_ranges(
+ broker, self.overlap_shard_data_2, '.shards_')
+ broker.merge_shard_ranges(acceptor_ranges + overlap_donor_ranges)
+ self.assertTrue(broker.is_root_container())
+ out = StringIO()
+ err = StringIO()
+ ts_repair = Timestamp(int(ts_now) + 4 * 3600 - 1)
+ with mock.patch('sys.stdout', out), \
+ mock.patch('sys.stderr', err), \
+ mock_timestamp_now(ts_repair):
+ # default min-shard-age prevents repair...
+ ret = main([broker.db_file, 'repair', '-y'])
+ self.assertEqual(0, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ self.assertEqual(
+ ['2 overlapping donor shards ignored due to minimum age limit'],
+ out_lines[:1])
+ updated_ranges = broker.get_shard_ranges()
+ expected = sorted(
+ acceptor_ranges + overlap_donor_ranges,
+ key=ShardRange.sort_key)
+ self.assert_shard_ranges_equal(expected, updated_ranges)
+
+ out = StringIO()
+ err = StringIO()
+ ts_repair = Timestamp(int(ts_now) + 4 * 3600 + 2)
+ with mock.patch('sys.stdout', out), \
+ mock.patch('sys.stderr', err), \
+ mock_timestamp_now(ts_repair):
+ # default min-shard-age allows repair now...
+ ret = main([broker.db_file, 'repair', '-y'])
+ self.assertEqual(0, ret)
+ err_lines = err.getvalue().split('\n')
+ self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
+ out_lines = out.getvalue().split('\n')
+ self.assertEqual(
+ ['Repairs necessary to remove overlapping shard ranges.'],
+ out_lines[:1])
+ updated_ranges = broker.get_shard_ranges()
+ for sr in overlap_donor_ranges:
+ sr.update_state(ShardRange.SHRINKING, ts_repair)
+ sr.epoch = ts_repair
+ expected = sorted(
+ acceptor_ranges + overlap_donor_ranges,
+ key=ShardRange.sort_key)
+ self.assert_shard_ranges_equal(expected, updated_ranges)
+
def test_repair_two_complete_sequences_one_incomplete(self):
broker = self._make_broker()
broker.set_sharding_sysmeta('Quoted-Root', 'a/c')
@@ -1835,7 +2373,8 @@ class TestManageShardRanges(unittest.TestCase):
ts_now = next(self.ts_iter)
with mock.patch('sys.stdout', out), mock.patch('sys.stderr', err), \
mock_timestamp_now(ts_now):
- ret = main([broker.db_file, 'repair', '--yes'])
+ ret = main([broker.db_file, 'repair', '--yes',
+ '--min-shard-age', '0'])
self.assertEqual(0, ret)
err_lines = err.getvalue().split('\n')
self.assert_starts_with(err_lines[0], 'Loaded db broker for ')
@@ -1978,9 +2517,9 @@ class TestManageShardRanges(unittest.TestCase):
self.assertEqual(2, cm.exception.code)
err_lines = err.getvalue().split('\n')
runner = os.path.basename(sys.argv[0])
- self.assertEqual(
+ self.assertIn(
'usage: %s path_to_file repair [-h] [--yes | --dry-run]' % runner,
err_lines[0])
self.assertIn(
"argument --yes/-y: not allowed with argument --dry-run/-n",
- err_lines[1])
+ err_lines[-2], err_lines)
diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py
index 6327c980e..11a2d40ae 100644
--- a/test/unit/cli/test_ringbuilder.py
+++ b/test/unit/cli/test_ringbuilder.py
@@ -2077,7 +2077,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
'port': 66201,
'device': 'sda2',
})
- ring.add_dev({'weight': 100.0,
+ ring.add_dev({'weight': 10000.0,
'region': 2,
'zone': 2,
'ip': '2001:db8:85a3::8a2e:370:7336',
diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py
index e184c004f..caa8122f8 100644
--- a/test/unit/common/middleware/helpers.py
+++ b/test/unit/common/middleware/helpers.py
@@ -96,6 +96,10 @@ class FakeSwift(object):
self.auto_create_account_prefix = '.'
self.backend_user_agent = "fake_swift"
self._pipeline_final_app = self
+ # some tests want to opt in to mimicking the
+ # X-Backend-Ignore-Range-If-Metadata-Present header behavior,
+ # but default to old-swift behavior
+ self.can_ignore_range = False
def _find_response(self, method, path):
path = normalize_path(path)
@@ -110,6 +114,9 @@ class FakeSwift(object):
return resp
def __call__(self, env, start_response):
+ if self.can_ignore_range:
+ # we might pop off the Range header
+ env = dict(env)
method = env['REQUEST_METHOD']
if method not in self.ALLOWED_METHODS:
raise HTTPNotImplemented()
@@ -151,6 +158,12 @@ class FakeSwift(object):
raise KeyError("Didn't find %r in allowed responses" % (
(method, path),))
+ ignore_range_meta = req.headers.get(
+ 'x-backend-ignore-range-if-metadata-present')
+ if self.can_ignore_range and ignore_range_meta and set(
+ ignore_range_meta.split(',')).intersection(headers.keys()):
+ req.headers.pop('range', None)
+
req_body = None # generally, we don't care and let eventlet discard()
if (cont and not obj and method == 'UPDATE') or (
obj and method == 'PUT'):
@@ -189,7 +202,8 @@ class FakeSwift(object):
# simulate object GET/HEAD
elif method in ('GET', 'HEAD') and obj:
- req.headers['X-Backend-Storage-Policy-Index'] = '2'
+ req.headers['X-Backend-Storage-Policy-Index'] = headers.get(
+ 'x-backend-storage-policy-index', '2')
# note: tests may assume this copy of req_headers is case insensitive
# so we deliberately use a HeaderKeyDict
diff --git a/test/unit/common/middleware/s3api/test_acl_utils.py b/test/unit/common/middleware/s3api/test_acl_utils.py
index 1ff03c49e..9a8ee1a74 100644
--- a/test/unit/common/middleware/s3api/test_acl_utils.py
+++ b/test/unit/common/middleware/s3api/test_acl_utils.py
@@ -16,6 +16,7 @@
import unittest
from swift.common.swob import Request
+from swift.common.middleware.s3api import s3response
from swift.common.middleware.s3api.acl_utils import handle_acl_header
from test.unit.common.middleware.s3api import S3ApiTestCase
@@ -26,23 +27,47 @@ class TestS3ApiAclUtils(S3ApiTestCase):
def setUp(self):
super(TestS3ApiAclUtils, self).setUp()
- def test_handle_acl_header(self):
- def check_generated_acl_header(acl, targets):
- req = Request.blank('/bucket',
- headers={'X-Amz-Acl': acl})
+ def check_generated_acl_header(self, acl, expected):
+ req = Request.blank('/bucket',
+ headers={'X-Amz-Acl': acl})
+ try:
handle_acl_header(req)
- for target in targets:
+ except s3response.ErrorResponse as e:
+ if isinstance(e, expected):
+ self.assertEqual(expected._status, e._status)
+ else:
+ raise
+ else:
+ for target in expected:
self.assertTrue(target[0] in req.headers)
self.assertEqual(req.headers[target[0]], target[1])
- check_generated_acl_header('public-read',
- [('X-Container-Read', '.r:*,.rlistings')])
- check_generated_acl_header('public-read-write',
- [('X-Container-Read', '.r:*,.rlistings'),
- ('X-Container-Write', '.r:*')])
- check_generated_acl_header('private',
- [('X-Container-Read', '.'),
- ('X-Container-Write', '.')])
+ def test_canned_acl_header(self):
+ # https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl
+ self.check_generated_acl_header(
+ 'private',
+ [('X-Container-Read', '.'), ('X-Container-Write', '.')])
+ self.check_generated_acl_header(
+ 'public-read', [('X-Container-Read', '.r:*,.rlistings')])
+ self.check_generated_acl_header(
+ 'public-read-write', [('X-Container-Read', '.r:*,.rlistings'),
+ ('X-Container-Write', '.r:*')])
+ self.check_generated_acl_header(
+ 'aws-exec-read', s3response.InvalidArgument)
+ self.check_generated_acl_header(
+ 'authenticated-read', s3response.S3NotImplemented)
+ self.check_generated_acl_header(
+ 'bucket-owner-read', [('X-Container-Read', '.'),
+ ('X-Container-Write', '.')])
+ self.check_generated_acl_header(
+ 'bucket-owner-full-control', [('X-Container-Read', '.'),
+ ('X-Container-Write', '.')])
+ self.check_generated_acl_header(
+ 'log-delivery-write', s3response.S3NotImplemented)
+
+ # the 400 response is the catch all
+ self.check_generated_acl_header(
+ 'some-non-sense', s3response.InvalidArgument)
if __name__ == '__main__':
diff --git a/test/unit/common/middleware/s3api/test_multi_upload.py b/test/unit/common/middleware/s3api/test_multi_upload.py
index 4eff8015a..ed883d0bb 100644
--- a/test/unit/common/middleware/s3api/test_multi_upload.py
+++ b/test/unit/common/middleware/s3api/test_multi_upload.py
@@ -1050,6 +1050,8 @@ class TestS3ApiMultiUpload(S3ApiTestCase):
# Delete the in-progress-upload marker
('DELETE', '/v1/AUTH_test/bucket+segments/object/X')
])
+ self.assertEqual(req.environ['swift.backend_path'],
+ '/v1/AUTH_test/bucket+segments/object/X')
_, _, headers = self.swift.calls_with_headers[-2]
self.assertEqual(headers.get('X-Object-Meta-Foo'), 'bar')
@@ -1166,6 +1168,8 @@ class TestS3ApiMultiUpload(S3ApiTestCase):
('HEAD', '/v1/AUTH_test/bucket/object'),
# So no PUT necessary
])
+ self.assertEqual(req.environ['swift.backend_path'],
+ '/v1/AUTH_test/bucket+segments/object/X')
def test_object_multipart_upload_retry_complete_etag_mismatch(self):
content_md5 = base64.b64encode(md5(
@@ -1206,6 +1210,8 @@ class TestS3ApiMultiUpload(S3ApiTestCase):
# Retry deleting the marker for the sake of completeness
('DELETE', '/v1/AUTH_test/bucket+segments/object/X')
])
+ self.assertEqual(req.environ['swift.backend_path'],
+ '/v1/AUTH_test/bucket+segments/object/X')
_, _, headers = self.swift.calls_with_headers[-2]
self.assertEqual(headers.get('X-Object-Meta-Foo'), 'bar')
@@ -1249,6 +1255,38 @@ class TestS3ApiMultiUpload(S3ApiTestCase):
# But the object does, and with the same upload ID
('HEAD', '/v1/AUTH_test/bucket/object'),
])
+ self.assertEqual(req.environ['swift.backend_path'],
+ '/v1/AUTH_test/bucket+segments/object/X')
+
+ def test_object_multipart_upload_retry_complete_nothing_there(self):
+ content_md5 = base64.b64encode(md5(
+ XML.encode('ascii'), usedforsecurity=False).digest())
+ self.swift.register('HEAD', '/v1/AUTH_test/bucket+segments/object/X',
+ swob.HTTPNotFound, {}, None)
+ self.swift.register('HEAD', '/v1/AUTH_test/bucket/object',
+ swob.HTTPNotFound, {}, None)
+ req = Request.blank('/bucket/object?uploadId=X',
+ environ={'REQUEST_METHOD': 'POST'},
+ headers={'Authorization': 'AWS test:tester:hmac',
+ 'Date': self.get_date_header(),
+ 'Content-MD5': content_md5, },
+ body=XML)
+ status, headers, body = self.call_s3api(req)
+ elem = fromstring(body, 'Error')
+ self.assertEqual(elem.find('Code').text, 'NoSuchUpload')
+ self.assertEqual(status.split()[0], '404')
+
+ self.assertEqual(self.swift.calls, [
+ # Bucket exists
+ ('HEAD', '/v1/AUTH_test'),
+ ('HEAD', '/v1/AUTH_test/bucket'),
+ # Upload marker does not exist
+ ('HEAD', '/v1/AUTH_test/bucket+segments/object/X'),
+ # Neither does the object
+ ('HEAD', '/v1/AUTH_test/bucket/object'),
+ ])
+ self.assertEqual(req.environ['swift.backend_path'],
+ '/v1/AUTH_test/bucket+segments/object/X')
def test_object_multipart_upload_invalid_md5(self):
bad_md5 = base64.b64encode(md5(
diff --git a/test/unit/common/middleware/s3api/test_obj.py b/test/unit/common/middleware/s3api/test_obj.py
index 870bf7acf..38bc8eec4 100644
--- a/test/unit/common/middleware/s3api/test_obj.py
+++ b/test/unit/common/middleware/s3api/test_obj.py
@@ -835,6 +835,30 @@ class TestS3ApiObj(S3ApiTestCase):
return_value=timestamp):
return self.call_s3api(req)
+ def test_simple_object_copy(self):
+ self.swift.register('HEAD', '/v1/AUTH_test/some/source',
+ swob.HTTPOk, {
+ 'x-backend-storage-policy-index': '1',
+ }, None)
+ req = Request.blank(
+ '/bucket/object', method='PUT',
+ headers={
+ 'Authorization': 'AWS test:tester:hmac',
+ 'X-Amz-Copy-Source': '/some/source',
+ 'Date': self.get_date_header(),
+ },
+ )
+ timestamp = time.time()
+ with patch('swift.common.middleware.s3api.utils.time.time',
+ return_value=timestamp):
+ status, headers, body = self.call_s3api(req)
+ self.assertEqual(status.split()[0], '200')
+ head_call, put_call = self.swift.calls_with_headers
+ self.assertEqual(
+ head_call.headers['x-backend-storage-policy-index'], '1')
+ self.assertEqual(put_call.headers['x-copy-from'], '/some/source')
+ self.assertNotIn('x-backend-storage-policy-index', put_call.headers)
+
@s3acl
def test_object_PUT_copy(self):
def do_test(src_path):
diff --git a/test/unit/common/middleware/s3api/test_s3_acl.py b/test/unit/common/middleware/s3api/test_s3_acl.py
index 554f09ec5..3c4eeae2f 100644
--- a/test/unit/common/middleware/s3api/test_s3_acl.py
+++ b/test/unit/common/middleware/s3api/test_s3_acl.py
@@ -379,6 +379,17 @@ class TestS3ApiS3Acl(S3ApiTestCase):
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
+ def test_grant_all_users_with_uppercase_type(self):
+ req = Request.blank('/bucket/object?acl',
+ environ={'REQUEST_METHOD': 'PUT'},
+ headers={'Authorization': 'AWS test:tester:hmac',
+ 'Date': self.get_date_header(),
+ 'x-amz-grant-read':
+ 'URI="http://acs.amazonaws.com/groups/'
+ 'global/AllUsers"'})
+ status, headers, body = self.call_s3api(req)
+ self.assertEqual(status.split()[0], '200')
+
def test_grant_invalid_uri(self):
req = Request.blank('/bucket/object?acl',
environ={'REQUEST_METHOD': 'PUT'},
diff --git a/test/unit/common/middleware/s3api/test_s3request.py b/test/unit/common/middleware/s3api/test_s3request.py
index 51eec0848..78689af33 100644
--- a/test/unit/common/middleware/s3api/test_s3request.py
+++ b/test/unit/common/middleware/s3api/test_s3request.py
@@ -31,7 +31,7 @@ from test.unit.common.middleware.s3api.test_s3api import S3ApiTestCase
from swift.common.middleware.s3api.s3request import S3Request, \
S3AclRequest, SigV4Request, SIGV4_X_AMZ_DATE_FORMAT, HashingInput
from swift.common.middleware.s3api.s3response import InvalidArgument, \
- NoSuchBucket, InternalError, \
+ NoSuchBucket, InternalError, ServiceUnavailable, \
AccessDenied, SignatureDoesNotMatch, RequestTimeTooSkewed, BadDigest
from swift.common.utils import md5
@@ -332,7 +332,8 @@ class TestRequest(S3ApiTestCase):
self.assertEqual(204, info['status']) # sanity
self.assertEqual(10, mock_info.call_count)
- expected_errors = [(404, NoSuchBucket), (0, InternalError)]
+ expected_errors = [(404, NoSuchBucket), (0, InternalError),
+ (503, ServiceUnavailable)]
for status, expected_error in expected_errors:
with patch('swift.common.middleware.s3api.s3request.'
'get_container_info',
diff --git a/test/unit/common/middleware/test_formpost.py b/test/unit/common/middleware/test_formpost.py
index a8b4a0be3..d751062e8 100644
--- a/test/unit/common/middleware/test_formpost.py
+++ b/test/unit/common/middleware/test_formpost.py
@@ -13,18 +13,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import base64
import hmac
+import hashlib
import unittest
-from hashlib import sha1
from time import time
import six
+if six.PY3:
+ from unittest import mock
+else:
+ import mock
from io import BytesIO
from swift.common.swob import Request, Response, wsgi_quote
from swift.common.middleware import tempauth, formpost
+from swift.common.middleware.tempurl import DEFAULT_ALLOWED_DIGESTS
from swift.common.utils import split_path
+from swift.common import registry, digest as digest_utils
from swift.proxy.controllers.base import get_cache_key
+from test.debug_logger import debug_logger
def hmac_msg(path, redirect, max_file_size, max_file_count, expires):
@@ -127,6 +135,7 @@ class TestFormPost(unittest.TestCase):
self.app = FakeApp()
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
+ self.logger = self.formpost.logger = debug_logger()
def _make_request(self, path, tempurl_keys=(), **kwargs):
req = Request.blank(path, **kwargs)
@@ -163,11 +172,23 @@ class TestFormPost(unittest.TestCase):
'meta': meta}
def _make_sig_env_body(self, path, redirect, max_file_size, max_file_count,
- expires, key, user_agent=True):
- sig = hmac.new(
+ expires, key, user_agent=True, algorithm='sha512',
+ prefix=True):
+ alg_name = algorithm
+ if six.PY2:
+ algorithm = getattr(hashlib, algorithm)
+ mac = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
- sha1).hexdigest()
+ algorithm)
+ if prefix:
+ if six.PY2:
+ sig = alg_name + ':' + base64.b64encode(mac.digest())
+ else:
+ sig = alg_name + ':' + base64.b64encode(
+ mac.digest()).decode('ascii')
+ else:
+ sig = mac.hexdigest()
body = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
@@ -297,7 +318,7 @@ class TestFormPost(unittest.TestCase):
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
- sha1).hexdigest()
+ hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
@@ -415,7 +436,7 @@ class TestFormPost(unittest.TestCase):
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
- sha1).hexdigest()
+ hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="redirect"',
@@ -532,7 +553,7 @@ class TestFormPost(unittest.TestCase):
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
- sha1).hexdigest()
+ hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="redirect"',
@@ -652,7 +673,7 @@ class TestFormPost(unittest.TestCase):
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
- sha1).hexdigest()
+ hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="redirect"',
@@ -770,7 +791,7 @@ class TestFormPost(unittest.TestCase):
sig = hmac.new(
key,
hmac_msg(path, redirect, max_file_size, max_file_count, expires),
- sha1).hexdigest()
+ hashlib.sha512).hexdigest()
wsgi_input = '\r\n'.join([
'--------------------------dea19ac8502ca805',
'Content-Disposition: form-data; name="redirect"',
@@ -1459,6 +1480,83 @@ class TestFormPost(unittest.TestCase):
self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
+ def test_prefixed_and_not_prefixed_sigs_good(self):
+ def do_test(digest, prefixed):
+ key = b'abc'
+ sig, env, body = self._make_sig_env_body(
+ '/v1/AUTH_test/container', '', 1024, 10,
+ int(time() + 86400), key, algorithm=digest, prefix=prefixed)
+ env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
+ env['swift.infocache'][get_cache_key('AUTH_test')] = (
+ self._fake_cache_env('AUTH_test', [key]))
+ env['swift.infocache'][get_cache_key(
+ 'AUTH_test', 'container')] = {'meta': {}}
+ self.auth.app = app = FakeApp(iter([('201 Created', {}, b''),
+ ('201 Created', {}, b'')]))
+ status = [None]
+ headers = [None]
+ exc_info = [None]
+
+ def start_response(s, h, e=None):
+ status[0] = s
+ headers[0] = h
+ exc_info[0] = e
+
+ body = b''.join(self.formpost(env, start_response))
+ status = status[0]
+ headers = headers[0]
+ exc_info = exc_info[0]
+ self.assertEqual(status, '201 Created')
+ location = None
+ for h, v in headers:
+ if h.lower() == 'location':
+ location = v
+ self.assertIsNone(location)
+ self.assertIsNone(exc_info)
+ self.assertTrue(b'201 Created' in body)
+ self.assertEqual(len(app.requests), 2)
+ self.assertEqual(app.requests[0].body, b'Test File\nOne\n')
+ self.assertEqual(app.requests[1].body, b'Test\nFile\nTwo\n')
+
+ for digest in ('sha1', 'sha256', 'sha512'):
+ do_test(digest, True)
+ do_test(digest, False)
+
+ # NB: one increment per *upload*, not client request
+ self.assertEqual(self.logger.get_increment_counts(), {
+ 'formpost.digests.sha1': 4,
+ 'formpost.digests.sha256': 4,
+ 'formpost.digests.sha512': 4,
+ })
+
+ def test_prefixed_and_not_prefixed_sigs_unsupported(self):
+ def do_test(digest, prefixed):
+ key = b'abc'
+ sig, env, body = self._make_sig_env_body(
+ '/v1/AUTH_test/container', '', 1024, 10,
+ int(time() + 86400), key, algorithm=digest, prefix=prefixed)
+ env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
+ env['swift.infocache'][get_cache_key('AUTH_test')] = (
+ self._fake_cache_env('AUTH_test', [key]))
+ env['swift.infocache'][get_cache_key(
+ 'AUTH_test', 'container')] = {'meta': {}}
+ self.app = FakeApp(iter([('201 Created', {}, b''),
+ ('201 Created', {}, b'')]))
+ self.auth = tempauth.filter_factory({})(self.app)
+ self.formpost = formpost.filter_factory({})(self.auth)
+ status = [None]
+
+ def start_response(s, h, e=None):
+ status[0] = s
+
+ body = b''.join(self.formpost(env, start_response))
+ status = status[0]
+ self.assertEqual(status, '401 Unauthorized')
+
+ for digest in ('md5', 'sha224'):
+ do_test(digest, True)
+ do_test(digest, False)
+
def test_no_redirect_expired(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
@@ -1559,6 +1657,64 @@ class TestFormPost(unittest.TestCase):
self.assertIsNone(exc_info)
self.assertTrue(b'FormPost: invalid starting boundary' in body)
+ def test_redirect_allowed_deprecated_and_unsupported_digests(self):
+ logger = debug_logger()
+
+ def do_test(digest):
+ logger.clear()
+ key = b'abc'
+ sig, env, body = self._make_sig_env_body(
+ '/v1/AUTH_test/container', 'http://redirect', 1024, 10,
+ int(time() + 86400), key, algorithm=digest)
+ env['wsgi.input'] = BytesIO(b'\r\n'.join(body))
+ env['swift.infocache'][get_cache_key('AUTH_test')] = (
+ self._fake_cache_env('AUTH_test', [key]))
+ env['swift.infocache'][get_cache_key(
+ 'AUTH_test', 'container')] = {'meta': {}}
+ self.app = FakeApp(iter([('201 Created', {}, b''),
+ ('201 Created', {}, b'')]))
+ self.auth = tempauth.filter_factory({})(self.app)
+ with mock.patch('swift.common.middleware.formpost.get_logger',
+ return_value=logger):
+ self.formpost = formpost.filter_factory(
+ {
+ 'allowed_digests': DEFAULT_ALLOWED_DIGESTS})(self.auth)
+ status = [None]
+ headers = [None]
+ exc_info = [None]
+
+ def start_response(s, h, e=None):
+ status[0] = s
+ headers[0] = h
+ exc_info[0] = e
+
+ body = b''.join(self.formpost(env, start_response))
+ return body, status[0], headers[0], exc_info[0]
+
+ for algorithm in ('sha1', 'sha256', 'sha512'):
+ body, status, headers, exc_info = do_test(algorithm)
+ self.assertEqual(status, '303 See Other')
+ location = None
+ for h, v in headers:
+ if h.lower() == 'location':
+ location = v
+ self.assertEqual(location, 'http://redirect?status=201&message=')
+ self.assertIsNone(exc_info)
+ self.assertTrue(location.encode('utf-8') in body)
+ self.assertEqual(len(self.app.requests), 2)
+ self.assertEqual(self.app.requests[0].body, b'Test File\nOne\n')
+ self.assertEqual(self.app.requests[1].body, b'Test\nFile\nTwo\n')
+ if algorithm in digest_utils.DEPRECATED_DIGESTS:
+ self.assertIn(
+ 'The following digest algorithms are configured but '
+ 'deprecated: %s. Support will be removed in a '
+ 'future release.' % algorithm,
+ logger.get_lines_for_level('warning'))
+
+ # unsupported
+ _body, status, _headers, _exc_info = do_test("md5")
+ self.assertEqual(status, '401 Unauthorized')
+
def test_no_v1(self):
key = b'abc'
sig, env, body = self._make_sig_env_body(
@@ -2099,5 +2255,58 @@ class TestFormPost(unittest.TestCase):
self.assertFalse("Content-Encoding" in self.app.requests[2].headers)
+class TestSwiftInfo(unittest.TestCase):
+ def setUp(self):
+ registry._swift_info = {}
+ registry._swift_admin_info = {}
+
+ def test_registered_defaults(self):
+ formpost.filter_factory({})
+ swift_info = registry.get_swift_info()
+ self.assertIn('formpost', swift_info)
+ info = swift_info['formpost']
+ self.assertIn('allowed_digests', info)
+ self.assertIn('deprecated_digests', info)
+ self.assertEqual(info['allowed_digests'], ['sha1', 'sha256', 'sha512'])
+ self.assertEqual(info['deprecated_digests'], ['sha1'])
+
+ def test_non_default_methods(self):
+ logger = debug_logger()
+ with mock.patch('swift.common.middleware.formpost.get_logger',
+ return_value=logger):
+ formpost.filter_factory({
+ 'allowed_digests': 'sha1 sha512 md5 not-a-valid-digest',
+ })
+ swift_info = registry.get_swift_info()
+ self.assertIn('formpost', swift_info)
+ info = swift_info['formpost']
+ self.assertIn('allowed_digests', info)
+ self.assertIn('deprecated_digests', info)
+ self.assertEqual(info['allowed_digests'], ['sha1', 'sha512'])
+ self.assertEqual(info['deprecated_digests'], ['sha1'])
+ warning_lines = logger.get_lines_for_level('warning')
+ self.assertIn(
+ 'The following digest algorithms are configured '
+ 'but not supported:',
+ warning_lines[0])
+ self.assertIn('not-a-valid-digest', warning_lines[0])
+ self.assertIn('md5', warning_lines[0])
+
+ def test_no_deprecated_digests(self):
+ formpost.filter_factory({'allowed_digests': 'sha256 sha512'})
+ swift_info = registry.get_swift_info()
+ self.assertIn('formpost', swift_info)
+ info = swift_info['formpost']
+ self.assertIn('allowed_digests', info)
+ self.assertNotIn('deprecated_digests', info)
+ self.assertEqual(info['allowed_digests'], ['sha256', 'sha512'])
+
+ def test_bad_config(self):
+ with self.assertRaises(ValueError):
+ formpost.filter_factory({
+ 'allowed_digests': 'md4',
+ })
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/common/middleware/test_proxy_logging.py b/test/unit/common/middleware/test_proxy_logging.py
index 2fe7bfa43..7ab0e8875 100644
--- a/test/unit/common/middleware/test_proxy_logging.py
+++ b/test/unit/common/middleware/test_proxy_logging.py
@@ -423,9 +423,10 @@ class TestProxyLogging(unittest.TestCase):
'template which can be edited in config: '
'{protocol} {path} {method} '
'{path.anonymized} {container.anonymized} '
- '{request_time} {start_time.datetime} {end_time} {ttfb}')})
+ '{request_time} {start_time.datetime} {end_time} {ttfb} '
+ '{domain}')})
app.access_logger = debug_logger()
- req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
+ req = Request.blank('/', headers={'Host': 'example.com'})
with mock.patch('time.time',
mock.MagicMock(
side_effect=[10000000.0, 10000000.5, 10000001.0])):
@@ -443,6 +444,7 @@ class TestProxyLogging(unittest.TestCase):
self.assertEqual(log_parts[13], '26/Apr/1970/17/46/40')
self.assertEqual(log_parts[14], '10000001.000000000')
self.assertEqual(log_parts[15], '0.5')
+ self.assertEqual(log_parts[16], 'example.com')
self.assertEqual(resp_body, b'FAKE APP')
def test_log_msg_template_s3api(self):
diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py
index b7b1365e8..87c8bce78 100644
--- a/test/unit/common/middleware/test_slo.py
+++ b/test/unit/common/middleware/test_slo.py
@@ -1327,6 +1327,7 @@ class TestSloDeleteManifest(SloTestCase):
self.assertEqual(resp_data['Errors'], [])
def test_handle_multipart_delete_segment_404(self):
+ self.app.can_ignore_range = True
req = Request.blank(
'/v1/AUTH_test/deltest/man?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
@@ -1345,6 +1346,7 @@ class TestSloDeleteManifest(SloTestCase):
self.assertEqual(resp_data['Number Not Found'], 1)
def test_handle_multipart_delete_whole(self):
+ self.app.can_ignore_range = True
req = Request.blank(
'/v1/AUTH_test/deltest/man-all-there?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE'})
@@ -1356,7 +1358,40 @@ class TestSloDeleteManifest(SloTestCase):
('DELETE', '/v1/AUTH_test/deltest/c_3'),
('DELETE', ('/v1/AUTH_test/deltest/man-all-there'))]))
+ def test_handle_multipart_delete_whole_old_swift(self):
+ # behave like pre-2.24.0 swift; initial GET will return just one byte
+ self.app.can_ignore_range = False
+
+ req = Request.blank(
+ '/v1/AUTH_test/deltest/man-all-there?multipart-manifest=delete',
+ environ={'REQUEST_METHOD': 'DELETE'})
+ self.call_slo(req)
+ self.assertEqual(self.app.calls_with_headers[:2], [
+ ('GET',
+ '/v1/AUTH_test/deltest/man-all-there?multipart-manifest=get',
+ {'Host': 'localhost:80',
+ 'User-Agent': 'Mozzarella Foxfire MultipartDELETE',
+ 'Range': 'bytes=-1',
+ 'X-Backend-Ignore-Range-If-Metadata-Present':
+ 'X-Static-Large-Object',
+ 'X-Backend-Storage-Policy-Index': '2',
+ 'Content-Length': '0'}),
+ ('GET',
+ '/v1/AUTH_test/deltest/man-all-there?multipart-manifest=get',
+ {'Host': 'localhost:80',
+ 'User-Agent': 'Mozzarella Foxfire MultipartDELETE',
+ 'X-Backend-Storage-Policy-Index': '2',
+ 'Content-Length': '0'}),
+ ])
+ self.assertEqual(set(self.app.calls), set([
+ ('GET',
+ '/v1/AUTH_test/deltest/man-all-there?multipart-manifest=get'),
+ ('DELETE', '/v1/AUTH_test/deltest/b_2'),
+ ('DELETE', '/v1/AUTH_test/deltest/c_3'),
+ ('DELETE', ('/v1/AUTH_test/deltest/man-all-there'))]))
+
def test_handle_multipart_delete_non_ascii(self):
+ self.app.can_ignore_range = True
unicode_acct = u'AUTH_test-un\u00efcode'
wsgi_acct = bytes_to_wsgi(unicode_acct.encode('utf-8'))
req = Request.blank(
@@ -1384,6 +1419,7 @@ class TestSloDeleteManifest(SloTestCase):
('DELETE', ('/v1/%s/deltest/man-all-there' % wsgi_acct))]))
def test_handle_multipart_delete_nested(self):
+ self.app.can_ignore_range = True
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-with-submanifest?' +
'multipart-manifest=delete',
@@ -1403,6 +1439,7 @@ class TestSloDeleteManifest(SloTestCase):
('DELETE', '/v1/AUTH_test/deltest/manifest-with-submanifest')})
def test_handle_multipart_delete_nested_too_many_segments(self):
+ self.app.can_ignore_range = True
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-with-too-many-segs?' +
'multipart-manifest=delete',
@@ -1417,6 +1454,7 @@ class TestSloDeleteManifest(SloTestCase):
'Too many buffered slo segments to delete.')
def test_handle_multipart_delete_nested_404(self):
+ self.app.can_ignore_range = True
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-missing-submanifest' +
'?multipart-manifest=delete',
@@ -1440,6 +1478,7 @@ class TestSloDeleteManifest(SloTestCase):
self.assertEqual(resp_data['Errors'], [])
def test_handle_multipart_delete_nested_401(self):
+ self.app.can_ignore_range = True
self.app.register(
'GET', '/v1/AUTH_test/deltest/submanifest',
swob.HTTPUnauthorized, {}, None)
@@ -1457,6 +1496,7 @@ class TestSloDeleteManifest(SloTestCase):
[['/deltest/submanifest', '401 Unauthorized']])
def test_handle_multipart_delete_nested_500(self):
+ self.app.can_ignore_range = True
self.app.register(
'GET', '/v1/AUTH_test/deltest/submanifest',
swob.HTTPServerError, {}, None)
@@ -1475,6 +1515,7 @@ class TestSloDeleteManifest(SloTestCase):
'Unable to load SLO manifest or segment.']])
def test_handle_multipart_delete_not_a_manifest(self):
+ self.app.can_ignore_range = True
req = Request.blank(
'/v1/AUTH_test/deltest/a_1?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
@@ -1490,8 +1531,10 @@ class TestSloDeleteManifest(SloTestCase):
self.assertEqual(resp_data['Number Not Found'], 0)
self.assertEqual(resp_data['Errors'],
[['/deltest/a_1', 'Not an SLO manifest']])
+ self.assertFalse(self.app.unread_requests)
def test_handle_multipart_delete_bad_json(self):
+ self.app.can_ignore_range = True
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-badjson?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE',
@@ -1510,6 +1553,7 @@ class TestSloDeleteManifest(SloTestCase):
'Unable to load SLO manifest']])
def test_handle_multipart_delete_401(self):
+ self.app.can_ignore_range = True
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-with-unauth-segment' +
'?multipart-manifest=delete',
@@ -1533,6 +1577,7 @@ class TestSloDeleteManifest(SloTestCase):
[['/deltest-unauth/q_17', '401 Unauthorized']])
def test_handle_multipart_delete_client_content_type(self):
+ self.app.can_ignore_range = True
req = Request.blank(
'/v1/AUTH_test/deltest/man-all-there?multipart-manifest=delete',
environ={'REQUEST_METHOD': 'DELETE', 'CONTENT_TYPE': 'foo/bar'},
@@ -1564,6 +1609,7 @@ class TestSloDeleteManifest(SloTestCase):
'/v1/AUTH_test/deltest/man_404?multipart-manifest=get')])
def test_handle_async_delete_turned_off(self):
+ self.app.can_ignore_range = True
self.slo.allow_async_delete = False
req = Request.blank(
'/v1/AUTH_test/deltest/man-all-there?'
@@ -1584,6 +1630,7 @@ class TestSloDeleteManifest(SloTestCase):
('DELETE', '/v1/AUTH_test/deltest/man-all-there')]))
def test_handle_async_delete_whole(self):
+ self.app.can_ignore_range = True
self.slo.allow_async_delete = True
now = Timestamp(time.time())
exp_obj_cont = get_expirer_container(
@@ -1636,6 +1683,7 @@ class TestSloDeleteManifest(SloTestCase):
])
def test_handle_async_delete_non_ascii(self):
+ self.app.can_ignore_range = True
self.slo.allow_async_delete = True
unicode_acct = u'AUTH_test-un\u00efcode'
wsgi_acct = bytes_to_wsgi(unicode_acct.encode('utf-8'))
@@ -1710,6 +1758,7 @@ class TestSloDeleteManifest(SloTestCase):
])
def test_handle_async_delete_non_ascii_same_container(self):
+ self.app.can_ignore_range = True
self.slo.allow_async_delete = True
unicode_acct = u'AUTH_test-un\u00efcode'
wsgi_acct = bytes_to_wsgi(unicode_acct.encode('utf-8'))
@@ -1780,6 +1829,7 @@ class TestSloDeleteManifest(SloTestCase):
])
def test_handle_async_delete_nested(self):
+ self.app.can_ignore_range = True
self.slo.allow_async_delete = True
req = Request.blank(
'/v1/AUTH_test/deltest/manifest-with-submanifest' +
@@ -1793,6 +1843,7 @@ class TestSloDeleteManifest(SloTestCase):
'manifest-with-submanifest?multipart-manifest=get')])
def test_handle_async_delete_too_many_containers(self):
+ self.app.can_ignore_range = True
self.slo.allow_async_delete = True
self.app.register(
'GET', '/v1/AUTH_test/deltest/man',
diff --git a/test/unit/common/middleware/test_staticweb.py b/test/unit/common/middleware/test_staticweb.py
index b72b34bf2..02547f4d4 100644
--- a/test/unit/common/middleware/test_staticweb.py
+++ b/test/unit/common/middleware/test_staticweb.py
@@ -61,6 +61,7 @@ meta_map = {
'web-error': 'error.html'}},
'c13': {'meta': {'web-listings': 'f',
'web-listings-css': 'listing.css'}},
+ 'c14': {'meta': {'web-listings': 't'}},
}
@@ -271,6 +272,8 @@ class FakeApp(object):
elif env['PATH_INFO'] == '/v1/a/c12/200error.html':
return Response(status='200 Ok', body='error file')(env,
start_response)
+ elif env['PATH_INFO'] == '/v1/a/c14':
+ return self.listing(env, start_response)
else:
raise Exception('Unknown path %r' % env['PATH_INFO'])
@@ -344,6 +347,13 @@ class FakeApp(object):
"last_modified":"2011-03-24T04:27:52.709100"},
{"subdir":"\u2603/\u2603/"}]
'''.strip()
+ elif env['PATH_INFO'] == '/v1/a/c14' and env['QUERY_STRING'] == \
+ 'delimiter=/':
+ headers.update({'X-Container-Object-Count': '0',
+ 'X-Container-Bytes-Used': '0',
+ 'X-Container-Read': '.r:*',
+ 'Content-Type': 'application/json; charset=utf-8'})
+ body = '[]'
elif 'prefix=' in env['QUERY_STRING']:
return Response(status='204 No Content')(env, start_response)
else:
@@ -777,6 +787,16 @@ class TestStaticWeb(unittest.TestCase):
self.assertEqual(resp.status_int, 200)
self.assertIn(b'index file', resp.body)
+ def test_container13empty(self):
+ resp = Request.blank(
+ '/v1/a/c14/').get_response(self.test_staticweb)
+ self.assertEqual(resp.status_int, 200)
+ self.assertIn(b'Listing of /v1/a/c14/', resp.body)
+ self.assertIn(b'</style>', resp.body)
+ self.assertNotIn(b'<link', resp.body)
+ self.assertNotIn(b'listing.css', resp.body)
+ self.assertNotIn(b'<td', resp.body)
+
def test_container_404_has_css(self):
resp = Request.blank('/v1/a/c13/').get_response(
self.test_staticweb)
diff --git a/test/unit/common/middleware/test_tempurl.py b/test/unit/common/middleware/test_tempurl.py
index 115f0e525..ba970c2d5 100644
--- a/test/unit/common/middleware/test_tempurl.py
+++ b/test/unit/common/middleware/test_tempurl.py
@@ -78,6 +78,7 @@ class TestTempURL(unittest.TestCase):
self.app = FakeApp()
self.auth = tempauth.filter_factory({'reseller_prefix': ''})(self.app)
self.tempurl = tempurl.filter_factory({})(self.auth)
+ self.logger = self.tempurl.logger = debug_logger()
def _make_request(self, path, environ=None, keys=(), container_keys=None,
**kwargs):
@@ -157,6 +158,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
+
sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
self.assert_valid_sig(expires, path, [key], sig)
@@ -171,6 +173,12 @@ class TestTempURL(unittest.TestCase):
key, hmac_body, hashlib.sha512).digest())
self.assert_valid_sig(expires, path, [key], b'sha512:' + sig)
+ self.assertEqual(self.logger.get_increment_counts(), {
+ 'tempurl.digests.sha1': 1,
+ 'tempurl.digests.sha256': 2,
+ 'tempurl.digests.sha512': 1
+ })
+
def test_get_valid_key2(self):
method = 'GET'
expires = int(time() + 86400)
@@ -178,8 +186,8 @@ class TestTempURL(unittest.TestCase):
key1 = b'abc123'
key2 = b'def456'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig1 = hmac.new(key1, hmac_body, hashlib.sha1).hexdigest()
- sig2 = hmac.new(key2, hmac_body, hashlib.sha1).hexdigest()
+ sig1 = hmac.new(key1, hmac_body, hashlib.sha256).hexdigest()
+ sig2 = hmac.new(key2, hmac_body, hashlib.sha256).hexdigest()
for sig in (sig1, sig2):
self.assert_valid_sig(expires, path, [key1, key2], sig)
@@ -201,8 +209,8 @@ class TestTempURL(unittest.TestCase):
key1 = b'me'
key2 = b'other'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig1 = hmac.new(key1, hmac_body, hashlib.sha1).hexdigest()
- sig2 = hmac.new(key2, hmac_body, hashlib.sha1).hexdigest()
+ sig1 = hmac.new(key1, hmac_body, hashlib.sha256).hexdigest()
+ sig2 = hmac.new(key2, hmac_body, hashlib.sha256).hexdigest()
account_keys = []
for sig in (sig1, sig2):
self.assert_valid_sig(expires, path, account_keys, sig, environ)
@@ -210,11 +218,13 @@ class TestTempURL(unittest.TestCase):
def test_signature_trim(self):
# Insert proxy logging into the pipeline
p_logging = proxy_logging.filter_factory({})(self.app)
- self.auth = tempauth.filter_factory({'reseller_prefix': ''})(
- p_logging)
- self.tempurl = tempurl.filter_factory({})(self.auth)
+ self.auth = tempauth.filter_factory({
+ 'reseller_prefix': ''})(p_logging)
+ self.tempurl = tempurl.filter_factory({
+ 'allowed_digests': 'sha1'})(self.auth)
- sig = 'valid_sigs_will_be_exactly_40_characters'
+ # valid sig should be exactly 40 hex chars
+ sig = 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef'
expires = int(time() + 1000)
p_logging.access_logger.logger = debug_logger('fake')
resp = self._make_request(
@@ -239,7 +249,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bob%%20%%22killer%%22.txt' % (sig, expires)})
@@ -261,7 +271,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
@@ -279,7 +289,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s'
@@ -290,7 +300,7 @@ class TestTempURL(unittest.TestCase):
get_method = 'GET'
get_hmac_body = ('%s\n%i\n%s' %
(get_method, expires, path)).encode('utf-8')
- get_sig = hmac.new(key, get_hmac_body, hashlib.sha1).hexdigest()
+ get_sig = hmac.new(key, get_hmac_body, hashlib.sha256).hexdigest()
get_req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s'
@@ -306,7 +316,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bob%%20%%22killer%%22.txt&inline=' % (sig, expires)})
@@ -328,7 +338,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'inline=' % (sig, expires)})
@@ -349,14 +359,14 @@ class TestTempURL(unittest.TestCase):
key = b'abc'
hmac_body = ('%s\n%i\n%s' %
(method, expires, sig_path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
self.assert_valid_sig(expires, query_path, [key], sig, prefix=prefix)
query_path = query_path[:-1] + 'p3/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' %
(method, expires, sig_path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
self.assert_valid_sig(expires, query_path, [key], sig, prefix=prefix)
def test_get_valid_with_prefix_empty(self):
@@ -367,7 +377,7 @@ class TestTempURL(unittest.TestCase):
key = b'abc'
hmac_body = ('%s\n%i\n%s' %
(method, expires, sig_path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
self.assert_valid_sig(expires, query_path, [key], sig, prefix='')
def test_obj_odd_chars(self):
@@ -376,7 +386,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/a\r\nb'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(quote(path), keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
@@ -396,7 +406,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
@@ -416,7 +426,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o/'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
@@ -436,7 +446,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=/i/want/this/just/as/it/is/' % (sig, expires)})
@@ -457,7 +467,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
@@ -475,7 +485,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'PUT',
@@ -492,7 +502,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'PUT',
@@ -526,7 +536,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'PUT',
@@ -545,7 +555,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
@@ -561,7 +571,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_expires=%s' % expires})
@@ -576,7 +586,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s' % sig})
@@ -591,7 +601,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
@@ -607,7 +617,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
@@ -623,7 +633,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
@@ -640,7 +650,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
@@ -657,7 +667,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
@@ -674,7 +684,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
# Deliberately fudge expires to show HEADs aren't just automatically
# allowed.
expires += 1
@@ -694,7 +704,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'POST',
@@ -712,7 +722,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'DELETE',
@@ -729,7 +739,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'DELETE',
@@ -744,7 +754,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'UNKNOWN',
@@ -772,7 +782,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(b'account-key', hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(b'account-key', hmac_body, hashlib.sha256).hexdigest()
qs = '?temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)
# make request will setup the environ cache for us
@@ -794,7 +804,7 @@ class TestTempURL(unittest.TestCase):
# the container level; a different container in the same account is
# out of scope and thus forbidden.
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(b'container-key', hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(b'container-key', hmac_body, hashlib.sha256).hexdigest()
qs = '?temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)
req = self._make_request(path + qs, **key_kwargs)
@@ -815,7 +825,7 @@ class TestTempURL(unittest.TestCase):
# account-level tempurls by reusing one of the account's keys on a
# container.
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(b'shared-key', hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(b'shared-key', hmac_body, hashlib.sha256).hexdigest()
qs = '?temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)
req = self._make_request(path + qs, **key_kwargs)
@@ -836,7 +846,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path + '2', keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
@@ -852,7 +862,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
if sig[-1] != '0':
sig = sig[:-1] + '0'
else:
@@ -872,7 +882,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
@@ -936,7 +946,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key + b'2'],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
@@ -954,7 +964,7 @@ class TestTempURL(unittest.TestCase):
key = b'abc'
hmac_body = ('%s\n%i\n%s' %
(method, expires, sig_path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
query_path, keys=[key],
environ={'QUERY_STRING':
@@ -972,7 +982,7 @@ class TestTempURL(unittest.TestCase):
key = b'abc'
hmac_body = ('%s\n%i\n%s' %
(method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING':
@@ -993,7 +1003,7 @@ class TestTempURL(unittest.TestCase):
('X-Symlink-Target', 'cont/symlink')]:
hmac_body = ('%s\n%i\n%s' %
(method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, method=method, keys=[key],
headers={hdr: value},
@@ -1014,7 +1024,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-remove-this': 'value'},
@@ -1033,7 +1043,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-remove-this-one': 'value1',
@@ -1055,7 +1065,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-conflict-header': 'value'},
@@ -1074,7 +1084,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-conflict-header-test': 'value'},
@@ -1092,7 +1102,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
@@ -1111,7 +1121,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
@@ -1131,7 +1141,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
headers={},
@@ -1153,7 +1163,7 @@ class TestTempURL(unittest.TestCase):
path = '/v1/a/c/o'
key = b'abc'
hmac_body = ('%s\n%i\n%s' % (method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(
path, keys=[key],
headers={},
@@ -1467,7 +1477,7 @@ class TestTempURL(unittest.TestCase):
ip_range = '127.0.0.0/29'
hmac_body = ('ip=%s\n%s\n%i\n%s' %
(ip_range, method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'temp_url_ip_range=%s' % (sig, expires, ip_range),
@@ -1491,7 +1501,7 @@ class TestTempURL(unittest.TestCase):
ip = '127.0.0.1'
hmac_body = ('ip=%s\n%s\n%i\n%s' %
(ip, method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'temp_url_ip_range=%s' % (sig, expires, ip),
@@ -1536,7 +1546,7 @@ class TestTempURL(unittest.TestCase):
ip = '2001:db8::'
hmac_body = ('ip=%s\n%s\n%i\n%s' %
(ip, method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'temp_url_ip_range=%s' % (sig, expires, ip),
@@ -1560,7 +1570,7 @@ class TestTempURL(unittest.TestCase):
ip_range = '2001:db8::/127'
hmac_body = ('ip=%s\n%s\n%i\n%s' %
(ip_range, method, expires, path)).encode('utf-8')
- sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ sig = hmac.new(key, hmac_body, hashlib.sha256).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'temp_url_ip_range=%s' % (sig, expires, ip_range),
@@ -1615,6 +1625,7 @@ class TestSwiftInfo(unittest.TestCase):
self.assertEqual(set(info['outgoing_allow_headers']),
set(('x-object-meta-public-*',)))
self.assertEqual(info['allowed_digests'], ['sha1', 'sha256', 'sha512'])
+ self.assertEqual(info['deprecated_digests'], ['sha1'])
def test_non_default_methods(self):
tempurl.filter_factory({
@@ -1623,7 +1634,7 @@ class TestSwiftInfo(unittest.TestCase):
'incoming_allow_headers': 'x-timestamp x-versions-location',
'outgoing_remove_headers': 'x-*',
'outgoing_allow_headers': 'x-object-meta-* content-type',
- 'allowed_digests': 'sha512 md5 not-a-valid-digest',
+ 'allowed_digests': 'sha1 sha512 md5 not-a-valid-digest',
})
swift_info = registry.get_swift_info()
self.assertIn('tempurl', swift_info)
@@ -1636,7 +1647,25 @@ class TestSwiftInfo(unittest.TestCase):
self.assertEqual(set(info['outgoing_remove_headers']), set(('x-*', )))
self.assertEqual(set(info['outgoing_allow_headers']),
set(('x-object-meta-*', 'content-type')))
- self.assertEqual(info['allowed_digests'], ['sha512'])
+ self.assertEqual(info['allowed_digests'], ['sha1', 'sha512'])
+ self.assertEqual(info['deprecated_digests'], ['sha1'])
+
+ def test_no_deprecated_digests(self):
+ tempurl.filter_factory({'allowed_digests': 'sha256 sha512'})
+ swift_info = registry.get_swift_info()
+ self.assertIn('tempurl', swift_info)
+ info = swift_info['tempurl']
+ self.assertEqual(set(info['methods']),
+ set(('GET', 'HEAD', 'PUT', 'POST', 'DELETE')))
+ self.assertEqual(set(info['incoming_remove_headers']),
+ set(('x-timestamp',)))
+ self.assertEqual(set(info['incoming_allow_headers']), set())
+ self.assertEqual(set(info['outgoing_remove_headers']),
+ set(('x-object-meta-*',)))
+ self.assertEqual(set(info['outgoing_allow_headers']),
+ set(('x-object-meta-public-*',)))
+ self.assertEqual(info['allowed_digests'], ['sha256', 'sha512'])
+ self.assertNotIn('deprecated_digests', info)
def test_bad_config(self):
with self.assertRaises(ValueError):
@@ -1645,5 +1674,19 @@ class TestSwiftInfo(unittest.TestCase):
})
+class TestTempurlWarning(unittest.TestCase):
+
+ def test_deprecation_warning(self):
+ logger = debug_logger()
+ with mock.patch('swift.common.middleware.tempurl.get_logger',
+ lambda *a, **kw: logger):
+ tempurl.filter_factory({'allowed_digests': 'sha1'})
+ log_lines = logger.get_lines_for_level('warning')
+ self.assertIn(
+ 'The following digest algorithms are configured but deprecated:'
+ ' sha1. Support will be removed in a future release.',
+ log_lines)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py
index 111b77f70..485773a0b 100644
--- a/test/unit/common/test_db_replicator.py
+++ b/test/unit/common/test_db_replicator.py
@@ -352,7 +352,17 @@ class TestDBReplicator(unittest.TestCase):
def other_req(method, path, body, headers):
raise Exception('blah')
conn.request = other_req
+
+ class Closeable(object):
+ closed = False
+
+ def close(self):
+ self.closed = True
+
+ conn.sock = fake_sock = Closeable()
self.assertIsNone(conn.replicate(1, 2, 3))
+ self.assertTrue(fake_sock.closed)
+ self.assertEqual(None, conn.sock)
def test_rsync_file(self):
replicator = TestReplicator({})
@@ -837,6 +847,51 @@ class TestDBReplicator(unittest.TestCase):
self.assertEqual(['/path/to/file'], self.delete_db_calls)
self.assertEqual(0, replicator.stats['failure'])
+ def test_handoff_delete(self):
+ def do_test(config, repl_to_node_results, expect_delete):
+ self.delete_db_calls = []
+ replicator = TestReplicator(config)
+ replicator.ring = FakeRingWithNodes().Ring('path')
+ replicator.brokerclass = FakeAccountBroker
+ mock_repl_to_node = mock.Mock()
+ mock_repl_to_node.side_effect = repl_to_node_results
+ replicator._repl_to_node = mock_repl_to_node
+ replicator.delete_db = self.stub_delete_db
+ orig_cleanup = replicator.cleanup_post_replicate
+ with mock.patch.object(replicator, 'cleanup_post_replicate',
+ side_effect=orig_cleanup) as mock_cleanup:
+ replicator._replicate_object('0', '/path/to/file', 'node_id')
+ mock_cleanup.assert_called_once_with(mock.ANY, mock.ANY,
+ repl_to_node_results)
+ self.assertIsInstance(mock_cleanup.call_args[0][0],
+ replicator.brokerclass)
+ if expect_delete:
+ self.assertEqual(['/path/to/file'], self.delete_db_calls)
+ else:
+ self.assertNotEqual(['/path/to/file'], self.delete_db_calls)
+
+ self.assertEqual(repl_to_node_results.count(True),
+ replicator.stats['success'])
+ self.assertEqual(repl_to_node_results.count(False),
+ replicator.stats['failure'])
+
+ for cfg, repl_results, expected_delete in (
+ # Start with the sanilty check
+ ({}, [True] * 3, True),
+ ({}, [True, True, False], False),
+ ({'handoff_delete': 'auto'}, [True] * 3, True),
+ ({'handoff_delete': 'auto'}, [True, True, False], False),
+ ({'handoff_delete': 0}, [True] * 3, True),
+ ({'handoff_delete': 0}, [True, True, False], False),
+ # Now test a lower handoff delete
+ ({'handoff_delete': 2}, [True] * 3, True),
+ ({'handoff_delete': 2}, [True, True, False], True),
+ ({'handoff_delete': 2}, [True, False, False], False),
+ ({'handoff_delete': 1}, [True] * 3, True),
+ ({'handoff_delete': 1}, [True, True, False], True),
+ ({'handoff_delete': 1}, [True, False, False], True)):
+ do_test(cfg, repl_results, expected_delete)
+
def test_replicate_object_delete_delegated_to_cleanup_post_replicate(self):
replicator = TestReplicator({})
replicator.ring = FakeRingWithNodes().Ring('path')
@@ -1850,11 +1905,13 @@ class TestHandoffsOnly(unittest.TestCase):
self.assertEqual(
self.logger.get_lines_for_level('warning'),
- [('Starting replication pass with handoffs_only enabled. This '
- 'mode is not intended for normal operation; use '
- 'handoffs_only with care.'),
- ('Finished replication pass with handoffs_only enabled. '
- 'If handoffs_only is no longer required, disable it.')])
+ [('Starting replication pass with handoffs_only and/or '
+ 'handoffs_delete enabled. These '
+ 'modes are not intended for normal operation; use '
+ 'these options with care.'),
+ ('Finished replication pass with handoffs_only and/or '
+ 'handoffs_delete enabled. If these are no longer required, '
+ 'disable them.')])
def test_skips_primary_partitions(self):
replicator = TestReplicator({
@@ -2156,10 +2213,10 @@ class TestReplicatorSync(unittest.TestCase):
for node in self._ring.devs:
daemon = self._run_once(node)
if node['device'] == 'sdc':
- self.assertEqual(daemon._local_device_ids, set())
+ self.assertEqual(daemon._local_device_ids, {})
else:
self.assertEqual(daemon._local_device_ids,
- set([node['id']]))
+ {node['id']: node})
def test_clean_up_after_deleted_brokers(self):
broker = self._get_broker('a', 'c', node_index=0)
diff --git a/test/unit/common/test_digest.py b/test/unit/common/test_digest.py
new file mode 100644
index 000000000..d2105169f
--- /dev/null
+++ b/test/unit/common/test_digest.py
@@ -0,0 +1,191 @@
+# Copyright (c) 2022 NVIDIA
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import hashlib
+import unittest
+
+from swift.common import digest
+from test.debug_logger import debug_logger
+
+
+class TestDigestUtils(unittest.TestCase):
+ """Tests for swift.common.middleware.digest """
+ def setUp(self):
+ self.logger = debug_logger('test_digest_utils')
+
+ def test_get_hmac(self):
+ self.assertEqual(
+ digest.get_hmac('GET', '/path', 1, 'abc'),
+ 'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
+
+ def test_get_hmac_ip_range(self):
+ self.assertEqual(
+ digest.get_hmac('GET', '/path', 1, 'abc', ip_range='127.0.0.1'),
+ 'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
+
+ def test_get_hmac_ip_range_non_binary_type(self):
+ self.assertEqual(
+ digest.get_hmac(
+ u'GET', u'/path', 1, u'abc', ip_range=u'127.0.0.1'),
+ 'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
+
+ def test_get_hmac_digest(self):
+ self.assertEqual(
+ digest.get_hmac(u'GET', u'/path', 1, u'abc', digest='sha256'),
+ '64c5558394f86b042ce1e929b34907abd9d0a57f3e20cd3f93cffd83de0206a7')
+ self.assertEqual(
+ digest.get_hmac(
+ u'GET', u'/path', 1, u'abc', digest=hashlib.sha256),
+ '64c5558394f86b042ce1e929b34907abd9d0a57f3e20cd3f93cffd83de0206a7')
+
+ self.assertEqual(
+ digest.get_hmac(u'GET', u'/path', 1, u'abc', digest='sha512'),
+ '7e95af818aec1b69b53fc2cb6d69456ec64ebda6c17b8fc8b7303b78acc8ca'
+ '14fc4aed96c1614a8e9d6ff45a6237711d8be294cda679624825d79aa6959b'
+ '5229')
+ self.assertEqual(
+ digest.get_hmac(
+ u'GET', u'/path', 1, u'abc', digest=hashlib.sha512),
+ '7e95af818aec1b69b53fc2cb6d69456ec64ebda6c17b8fc8b7303b78acc8ca'
+ '14fc4aed96c1614a8e9d6ff45a6237711d8be294cda679624825d79aa6959b'
+ '5229')
+
+ def test_extract_digest_and_algorithm(self):
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ 'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f'),
+ ('sha1', 'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f'))
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ 'sha1:sw3eTSuFYrhJZGbDtGsrmsUFRGE='),
+ ('sha1', 'b30dde4d2b8562b8496466c3b46b2b9ac5054461'))
+ # also good with '=' stripped
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ 'sha1:sw3eTSuFYrhJZGbDtGsrmsUFRGE'),
+ ('sha1', 'b30dde4d2b8562b8496466c3b46b2b9ac5054461'))
+
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ 'b963712313cd4236696fb4c4cf11fc56'
+ 'ff4158e0bcbf1d4424df147783fd1045'),
+ ('sha256', 'b963712313cd4236696fb4c4cf11fc56'
+ 'ff4158e0bcbf1d4424df147783fd1045'))
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ 'sha256:uWNxIxPNQjZpb7TEzxH8Vv9BWOC8vx1EJN8Ud4P9EEU='),
+ ('sha256', 'b963712313cd4236696fb4c4cf11fc56'
+ 'ff4158e0bcbf1d4424df147783fd1045'))
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ 'sha256:uWNxIxPNQjZpb7TEzxH8Vv9BWOC8vx1EJN8Ud4P9EEU'),
+ ('sha256', 'b963712313cd4236696fb4c4cf11fc56'
+ 'ff4158e0bcbf1d4424df147783fd1045'))
+
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ '26df3d9d59da574d6f8d359cb2620b1b'
+ '86737215c38c412dfee0a410acea1ac4'
+ '285ad0c37229ca74e715c443979da17d'
+ '3d77a97d2ac79cc5e395b05bfa4bdd30'),
+ ('sha512', '26df3d9d59da574d6f8d359cb2620b1b'
+ '86737215c38c412dfee0a410acea1ac4'
+ '285ad0c37229ca74e715c443979da17d'
+ '3d77a97d2ac79cc5e395b05bfa4bdd30'))
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ 'sha512:Jt89nVnaV01vjTWcsmILG4ZzchXDjEEt/uCkEKzq'
+ 'GsQoWtDDcinKdOcVxEOXnaF9PXepfSrHnMXjlbBb+kvdMA=='),
+ ('sha512', '26df3d9d59da574d6f8d359cb2620b1b'
+ '86737215c38c412dfee0a410acea1ac4'
+ '285ad0c37229ca74e715c443979da17d'
+ '3d77a97d2ac79cc5e395b05bfa4bdd30'))
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ 'sha512:Jt89nVnaV01vjTWcsmILG4ZzchXDjEEt_uCkEKzq'
+ 'GsQoWtDDcinKdOcVxEOXnaF9PXepfSrHnMXjlbBb-kvdMA'),
+ ('sha512', '26df3d9d59da574d6f8d359cb2620b1b'
+ '86737215c38c412dfee0a410acea1ac4'
+ '285ad0c37229ca74e715c443979da17d'
+ '3d77a97d2ac79cc5e395b05bfa4bdd30'))
+
+ with self.assertRaises(ValueError):
+ digest.extract_digest_and_algorithm('')
+ with self.assertRaises(ValueError):
+ digest.extract_digest_and_algorithm(
+ 'exactly_forty_chars_but_not_hex_encoded!')
+ # Too short (md5)
+ with self.assertRaises(ValueError):
+ digest.extract_digest_and_algorithm(
+ 'd41d8cd98f00b204e9800998ecf8427e')
+ # but you can slip it in via the prefix notation!
+ self.assertEqual(
+ digest.extract_digest_and_algorithm(
+ 'md5:1B2M2Y8AsgTpgAmY7PhCfg'),
+ ('md5', 'd41d8cd98f00b204e9800998ecf8427e'))
+
+ def test_get_allowed_digests(self):
+ # start with defaults
+ allowed, deprecated = digest.get_allowed_digests(
+ ''.split(), self.logger)
+ self.assertEqual(allowed, {'sha256', 'sha512', 'sha1'})
+ self.assertEqual(deprecated, {'sha1'})
+ warning_lines = self.logger.get_lines_for_level('warning')
+ expected_warning_line = (
+ 'The following digest algorithms are allowed by default but '
+ 'deprecated: sha1. Support will be disabled by default in a '
+ 'future release, and later removed entirely.')
+ self.assertIn(expected_warning_line, warning_lines)
+ self.logger.clear()
+
+ # now with a subset
+ allowed, deprecated = digest.get_allowed_digests(
+ 'sha1 sha256'.split(), self.logger)
+ self.assertEqual(allowed, {'sha256', 'sha1'})
+ self.assertEqual(deprecated, {'sha1'})
+ warning_lines = self.logger.get_lines_for_level('warning')
+ expected_warning_line = (
+ 'The following digest algorithms are configured but '
+ 'deprecated: sha1. Support will be removed in a future release.')
+ self.assertIn(expected_warning_line, warning_lines)
+ self.logger.clear()
+
+ # Now also with an unsupported digest
+ allowed, deprecated = digest.get_allowed_digests(
+ 'sha1 sha256 md5'.split(), self.logger)
+ self.assertEqual(allowed, {'sha256', 'sha1'})
+ self.assertEqual(deprecated, {'sha1'})
+ warning_lines = self.logger.get_lines_for_level('warning')
+ self.assertIn(expected_warning_line, warning_lines)
+ expected_unsupported_warning_line = (
+ 'The following digest algorithms are configured but not '
+ 'supported: md5')
+ self.assertIn(expected_unsupported_warning_line, warning_lines)
+ self.logger.clear()
+
+ # Now with no deprecated digests
+ allowed, deprecated = digest.get_allowed_digests(
+ 'sha256 sha512'.split(), self.logger)
+ self.assertEqual(allowed, {'sha256', 'sha512'})
+ self.assertEqual(deprecated, set())
+ warning_lines = self.logger.get_lines_for_level('warning')
+ self.assertFalse(warning_lines)
+ self.logger.clear()
+
+ # no valid digest
+ # Now also with an unsupported digest
+ with self.assertRaises(ValueError):
+ digest.get_allowed_digests(['md5'], self.logger)
+ warning_lines = self.logger.get_lines_for_level('warning')
+ self.assertIn(expected_unsupported_warning_line, warning_lines)
diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py
index 2c8682932..d26ef0e2d 100644
--- a/test/unit/common/test_internal_client.py
+++ b/test/unit/common/test_internal_client.py
@@ -25,7 +25,8 @@ from textwrap import dedent
import six
from six.moves import range, zip_longest
from six.moves.urllib.parse import quote, parse_qsl
-from swift.common import exceptions, internal_client, request_helpers, swob
+from swift.common import exceptions, internal_client, request_helpers, swob, \
+ utils
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import StoragePolicy
from swift.common.middleware.proxy_logging import ProxyLoggingMiddleware
@@ -439,6 +440,38 @@ class TestInternalClient(unittest.TestCase):
app=FakeApp(self))
client.make_request('GET', '/', {}, (200,))
+ def test_make_request_clears_txn_id_after_calling_app(self):
+ class InternalClient(internal_client.InternalClient):
+ def __init__(self, test, logger):
+ def fake_app(env, start_response):
+ self.app.logger.txn_id = 'foo'
+ self.app.logger.debug('Inside of request')
+ start_response('200 Ok', [('Content-Length', '0')])
+ return []
+
+ self.test = test
+ self.user_agent = 'some_agent'
+ self.app = fake_app
+ self.app.logger = logger
+ self.request_tries = 1
+ self.use_replication_network = False
+
+ fake_logger = debug_logger()
+ logger = utils.LogAdapter(fake_logger, 'test-server')
+ # Make sure there's no transaction ID set -- other tests may have
+ # polluted the logger
+ logger.txn_id = None
+ logger.debug('Before request')
+ client = InternalClient(self, logger)
+ client.make_request('GET', '/', {}, (200,))
+ logger.debug('After request')
+ self.assertEqual([(args[0], kwargs['extra'].get('txn_id'))
+ for args, kwargs in fake_logger.log_dict['debug']], [
+ ('Before request', None),
+ ('Inside of request', 'foo'),
+ ('After request', None),
+ ])
+
def test_make_request_defaults_replication_network_header(self):
class FakeApp(FakeSwift):
def __init__(self, test):
diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py
index 69f90cfa0..c99c39a9f 100644
--- a/test/unit/common/test_utils.py
+++ b/test/unit/common/test_utils.py
@@ -1548,6 +1548,7 @@ class TestUtils(unittest.TestCase):
self.handleError(record)
logger = logging.getLogger()
+ logger.setLevel(logging.DEBUG)
handler = CrashyLogger()
logger.addHandler(handler)
@@ -3849,21 +3850,6 @@ cluster_dfw1 = http://dfw1.host/v1/
self.assertEqual(u'abc_%EC%9D%BC%EC%98%81',
utils.quote(u'abc_\uc77c\uc601'))
- def test_get_hmac(self):
- self.assertEqual(
- utils.get_hmac('GET', '/path', 1, 'abc'),
- 'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
-
- def test_get_hmac_ip_range(self):
- self.assertEqual(
- utils.get_hmac('GET', '/path', 1, 'abc', ip_range='127.0.0.1'),
- 'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
-
- def test_get_hmac_ip_range_non_binary_type(self):
- self.assertEqual(
- utils.get_hmac(u'GET', u'/path', 1, u'abc', ip_range=u'127.0.0.1'),
- 'b30dde4d2b8562b8496466c3b46b2b9ac5054461')
-
def test_parse_override_options(self):
# When override_<thing> is passed in, it takes precedence.
opts = utils.parse_override_options(
@@ -5447,6 +5433,8 @@ class TestStatsdLogging(unittest.TestCase):
conf = {'log_statsd_host': 'another.host.com'}
with warnings.catch_warnings(record=True) as cm:
+ if six.PY2:
+ getattr(utils, '__warningregistry__', {}).clear()
warnings.resetwarnings()
warnings.simplefilter('always', DeprecationWarning)
logger = utils.get_logger(
@@ -6119,7 +6107,7 @@ class TestStatsdLoggingDelegation(unittest.TestCase):
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
- self.reader_thread.setDaemon(1)
+ self.reader_thread.daemon = True
self.reader_thread.start()
def tearDown(self):
diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py
index 2471fd2de..731e70388 100644
--- a/test/unit/common/test_wsgi.py
+++ b/test/unit/common/test_wsgi.py
@@ -1604,6 +1604,35 @@ class TestServersPerPortStrategy(unittest.TestCase, CommonTestMixin):
# This is one of the workers for port 6006 that already got reaped.
self.assertIsNone(self.strategy.register_worker_exit(89))
+ def test_servers_per_port_in_container(self):
+ # normally there's no configured ring_ip
+ conf = {
+ 'bind_ip': '1.2.3.4',
+ }
+ self.strategy = wsgi.ServersPerPortStrategy(conf, self.logger, 1)
+ self.assertEqual(self.mock_cache_class.call_args,
+ mock.call('/etc/swift', '1.2.3.4'))
+ self.assertEqual({6006, 6007},
+ self.strategy.cache.all_bind_ports_for_node())
+ ports = {item[1][0] for item in self.strategy.new_worker_socks()}
+ self.assertEqual({6006, 6007}, ports)
+
+ # but in a container we can override it
+ conf = {
+ 'bind_ip': '1.2.3.4',
+ 'ring_ip': '2.3.4.5'
+ }
+ self.strategy = wsgi.ServersPerPortStrategy(conf, self.logger, 1)
+ # N.B. our fake BindPortsCache always returns {6006, 6007}, but a real
+ # BindPortsCache would only return ports for devices that match the ip
+ # address in the ring
+ self.assertEqual(self.mock_cache_class.call_args,
+ mock.call('/etc/swift', '2.3.4.5'))
+ self.assertEqual({6006, 6007},
+ self.strategy.cache.all_bind_ports_for_node())
+ ports = {item[1][0] for item in self.strategy.new_worker_socks()}
+ self.assertEqual({6006, 6007}, ports)
+
def test_shutdown_sockets(self):
pid = 88
for s, i in self.strategy.new_worker_socks():
diff --git a/test/unit/container/test_backend.py b/test/unit/container/test_backend.py
index c1ed3ca6d..206f3f0fa 100644
--- a/test/unit/container/test_backend.py
+++ b/test/unit/container/test_backend.py
@@ -304,7 +304,6 @@ class TestContainerBroker(unittest.TestCase):
# move to sharding state
broker.enable_sharding(next(self.ts))
self.assertTrue(broker.set_sharding_state())
- broker.delete_db(next(self.ts).internal)
self.assertTrue(broker.is_deleted())
# check object in retiring db is considered
@@ -3703,16 +3702,20 @@ class TestContainerBroker(unittest.TestCase):
@with_tempdir
def test_create_broker(self, tempdir):
- broker = ContainerBroker.create_broker(tempdir, 0, 'a', 'c')
+ broker, init = ContainerBroker.create_broker(tempdir, 0, 'a', 'c')
hsh = hash_path('a', 'c')
expected_path = os.path.join(
tempdir, 'containers', '0', hsh[-3:], hsh, hsh + '.db')
self.assertEqual(expected_path, broker.db_file)
self.assertTrue(os.path.isfile(expected_path))
+ self.assertTrue(init)
+ broker, init = ContainerBroker.create_broker(tempdir, 0, 'a', 'c')
+ self.assertEqual(expected_path, broker.db_file)
+ self.assertFalse(init)
ts = Timestamp.now()
- broker = ContainerBroker.create_broker(tempdir, 0, 'a', 'c1',
- put_timestamp=ts.internal)
+ broker, init = ContainerBroker.create_broker(tempdir, 0, 'a', 'c1',
+ put_timestamp=ts.internal)
hsh = hash_path('a', 'c1')
expected_path = os.path.join(
tempdir, 'containers', '0', hsh[-3:], hsh, hsh + '.db')
@@ -3720,15 +3723,17 @@ class TestContainerBroker(unittest.TestCase):
self.assertTrue(os.path.isfile(expected_path))
self.assertEqual(ts.internal, broker.get_info()['put_timestamp'])
self.assertEqual(0, broker.get_info()['storage_policy_index'])
+ self.assertTrue(init)
epoch = Timestamp.now()
- broker = ContainerBroker.create_broker(tempdir, 0, 'a', 'c3',
- epoch=epoch)
+ broker, init = ContainerBroker.create_broker(tempdir, 0, 'a', 'c3',
+ epoch=epoch)
hsh = hash_path('a', 'c3')
expected_path = os.path.join(
tempdir, 'containers', '0', hsh[-3:],
hsh, '%s_%s.db' % (hsh, epoch.internal))
self.assertEqual(expected_path, broker.db_file)
+ self.assertTrue(init)
@with_tempdir
def test_pending_file_name(self, tempdir):
@@ -4755,6 +4760,11 @@ class TestContainerBroker(unittest.TestCase):
self.assertTrue(os.path.exists(new_db_path))
self.assertEqual([], broker.get_objects())
self.assertEqual(objects, broker.get_brokers()[0].get_objects())
+ self.assertEqual(broker.get_reconciler_sync(), -1)
+ info = broker.get_info()
+ if info.get('x_container_sync_point1'):
+ self.assertEqual(info['x_container_sync_point1'], -1)
+ self.assertEqual(info['x_container_sync_point2'], -1)
check_sharding_state(broker)
# to confirm we're definitely looking at the shard db
@@ -4842,6 +4852,111 @@ class TestContainerBroker(unittest.TestCase):
do_revive_shard_delete(shard_ranges)
@with_tempdir
+ def test_set_sharding_state(self, tempdir):
+ db_path = os.path.join(
+ tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
+ broker = ContainerBroker(db_path, account='a', container='c',
+ logger=debug_logger())
+ broker.initialize(next(self.ts).internal, 0)
+ broker.merge_items([{'name': 'obj_%d' % i,
+ 'created_at': next(self.ts).normal,
+ 'content_type': 'text/plain',
+ 'etag': 'etag_%d' % i,
+ 'size': 1024 * i,
+ 'deleted': 0,
+ 'storage_policy_index': 0,
+ } for i in range(1, 6)])
+ broker.set_x_container_sync_points(1, 2)
+ broker.update_reconciler_sync(3)
+ self.assertEqual(3, broker.get_reconciler_sync())
+ broker.reported(next(self.ts).internal, next(self.ts).internal,
+ next(self.ts).internal, next(self.ts).internal)
+ epoch = next(self.ts)
+ broker.enable_sharding(epoch)
+ self.assertEqual(UNSHARDED, broker.get_db_state())
+ self.assertFalse(broker.is_deleted())
+ retiring_info = broker.get_info()
+ self.assertEqual(1, len(broker.db_files))
+
+ self.assertTrue(broker.set_sharding_state())
+ broker = ContainerBroker(db_path, account='a', container='c',
+ logger=debug_logger())
+ self.assertEqual(SHARDING, broker.get_db_state())
+ fresh_info = broker.get_info()
+ for key in ('reported_put_timestamp', 'reported_delete_timestamp'):
+ retiring_info.pop(key)
+ self.assertEqual('0', fresh_info.pop(key), key)
+ for key in ('reported_object_count', 'reported_bytes_used'):
+ retiring_info.pop(key)
+ self.assertEqual(0, fresh_info.pop(key), key)
+ self.assertNotEqual(retiring_info.pop('id'), fresh_info.pop('id'))
+ self.assertNotEqual(retiring_info.pop('hash'), fresh_info.pop('hash'))
+ self.assertNotEqual(retiring_info.pop('x_container_sync_point1'),
+ fresh_info.pop('x_container_sync_point1'))
+ self.assertNotEqual(retiring_info.pop('x_container_sync_point2'),
+ fresh_info.pop('x_container_sync_point2'))
+ self.assertEqual(-1, broker.get_reconciler_sync())
+ self.assertEqual('unsharded', retiring_info.pop('db_state'))
+ self.assertEqual('sharding', fresh_info.pop('db_state'))
+ self.assertEqual(retiring_info, fresh_info)
+ self.assertFalse(broker.is_deleted())
+ self.assertEqual(2, len(broker.db_files))
+ self.assertEqual(db_path, broker.db_files[0])
+ fresh_db_path = os.path.join(
+ tempdir, 'containers', 'part', 'suffix', 'hash',
+ 'container_%s.db' % epoch.internal)
+ self.assertEqual(fresh_db_path, broker.db_files[1])
+
+ @with_tempdir
+ def test_set_sharding_state_deleted(self, tempdir):
+ db_path = os.path.join(
+ tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
+ broker = ContainerBroker(db_path, account='a', container='c',
+ logger=debug_logger())
+ broker.initialize(next(self.ts).internal, 0)
+ broker.set_x_container_sync_points(1, 2)
+ broker.update_reconciler_sync(3)
+ self.assertEqual(3, broker.get_reconciler_sync())
+ broker.reported(next(self.ts).internal, next(self.ts).internal,
+ next(self.ts).internal, next(self.ts).internal)
+ epoch = next(self.ts)
+ broker.enable_sharding(epoch)
+ self.assertEqual(UNSHARDED, broker.get_db_state())
+ broker.delete_db(next(self.ts).internal)
+ self.assertTrue(broker.is_deleted())
+ retiring_info = broker.get_info()
+ self.assertEqual("DELETED", retiring_info['status'])
+ self.assertEqual(1, len(broker.db_files))
+
+ self.assertTrue(broker.set_sharding_state())
+ broker = ContainerBroker(db_path, account='a', container='c',
+ logger=debug_logger())
+ self.assertEqual(SHARDING, broker.get_db_state())
+ fresh_info = broker.get_info()
+ for key in ('reported_put_timestamp', 'reported_delete_timestamp'):
+ retiring_info.pop(key)
+ self.assertEqual('0', fresh_info.pop(key), key)
+ for key in ('reported_object_count', 'reported_bytes_used'):
+ retiring_info.pop(key)
+ self.assertEqual(0, fresh_info.pop(key), key)
+ self.assertNotEqual(retiring_info.pop('id'), fresh_info.pop('id'))
+ self.assertNotEqual(retiring_info.pop('x_container_sync_point1'),
+ fresh_info.pop('x_container_sync_point1'))
+ self.assertNotEqual(retiring_info.pop('x_container_sync_point2'),
+ fresh_info.pop('x_container_sync_point2'))
+ self.assertEqual(-1, broker.get_reconciler_sync())
+ self.assertEqual('unsharded', retiring_info.pop('db_state'))
+ self.assertEqual('sharding', fresh_info.pop('db_state'))
+ self.assertEqual(retiring_info, fresh_info)
+ self.assertTrue(broker.is_deleted())
+ self.assertEqual(2, len(broker.db_files))
+ self.assertEqual(db_path, broker.db_files[0])
+ fresh_db_path = os.path.join(
+ tempdir, 'containers', 'part', 'suffix', 'hash',
+ 'container_%s.db' % epoch.internal)
+ self.assertEqual(fresh_db_path, broker.db_files[1])
+
+ @with_tempdir
def test_set_sharding_state_errors(self, tempdir):
db_path = os.path.join(
tempdir, 'containers', 'part', 'suffix', 'hash', 'container.db')
@@ -4873,7 +4988,9 @@ class TestContainerBroker(unittest.TestCase):
res = broker.set_sharding_state()
self.assertFalse(res)
lines = broker.logger.get_lines_for_level('error')
- self.assertIn('Failed to set matching', lines[0])
+ self.assertIn(
+ 'Failed to sync the container_stat table/view with the fresh '
+ 'database', lines[0])
self.assertFalse(lines[1:])
@with_tempdir
diff --git a/test/unit/container/test_replicator.py b/test/unit/container/test_replicator.py
index 31fccd17d..37fb3304f 100644
--- a/test/unit/container/test_replicator.py
+++ b/test/unit/container/test_replicator.py
@@ -883,6 +883,10 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
daemon = self._run_once(node)
# push to remote, and third node was missing (also maybe reconciler)
self.assertTrue(2 < daemon.stats['rsync'] <= 3, daemon.stats['rsync'])
+ self.assertEqual(
+ 1, self.logger.get_stats_counts().get('reconciler_db_created'))
+ self.assertFalse(
+ self.logger.get_stats_counts().get('reconciler_db_exists'))
# grab the rsynced instance of remote_broker
remote_broker = self._get_broker('a', 'c', node_index=1)
@@ -902,7 +906,12 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
# and we should have also enqueued these rows in a single reconciler,
# since we forced the object timestamps to be in the same hour.
+ self.logger.clear()
reconciler = daemon.get_reconciler_broker(misplaced[0]['created_at'])
+ self.assertFalse(
+ self.logger.get_stats_counts().get('reconciler_db_created'))
+ self.assertEqual(
+ 1, self.logger.get_stats_counts().get('reconciler_db_exists'))
# but it may not be on the same node as us anymore though...
reconciler = self._get_broker(reconciler.account,
reconciler.container, node_index=0)
@@ -2635,6 +2644,75 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
os.path.basename(rsync_calls[0][1]))
self.assertFalse(rsync_calls[1:])
+ @mock.patch('swift.common.ring.ring.Ring.get_part_nodes', return_value=[])
+ def test_find_local_handoff_for_part(self, mock_part_nodes):
+
+ with mock.patch(
+ 'swift.common.db_replicator.ring.Ring',
+ return_value=self._ring):
+ daemon = replicator.ContainerReplicator({}, logger=self.logger)
+
+ # First let's assume we find a primary node
+ ring_node1, ring_node2, ring_node3 = daemon.ring.devs[-3:]
+ mock_part_nodes.return_value = [ring_node1, ring_node2]
+ daemon._local_device_ids = {ring_node1['id']: ring_node1,
+ ring_node3['id']: ring_node3}
+ node = daemon.find_local_handoff_for_part(0)
+ self.assertEqual(node['id'], ring_node1['id'])
+
+ # And if we can't find one from the primaries get *some* local device
+ mock_part_nodes.return_value = []
+ daemon._local_device_ids = {ring_node3['id']: ring_node3}
+ node = daemon.find_local_handoff_for_part(0)
+ self.assertEqual(node['id'], ring_node3['id'])
+
+ # if there are more then 1 local_dev_id it'll randomly pick one, but
+ # not a zero-weight device
+ ring_node3['weight'] = 0
+ selected_node_ids = set()
+ local_dev_ids = {dev['id']: dev for dev in daemon.ring.devs[-3:]}
+ daemon._local_device_ids = local_dev_ids
+ for _ in range(15):
+ node = daemon.find_local_handoff_for_part(0)
+ self.assertIn(node['id'], local_dev_ids)
+ selected_node_ids.add(node['id'])
+ if len(selected_node_ids) == 3:
+ break # unexpected
+ self.assertEqual(len(selected_node_ids), 2)
+ self.assertEqual([1, 1], [local_dev_ids[dev_id]['weight']
+ for dev_id in selected_node_ids])
+ warning_lines = self.logger.get_lines_for_level('warning')
+ self.assertFalse(warning_lines)
+
+ # ...unless all devices have zero-weight
+ ring_node3['weight'] = 0
+ ring_node2['weight'] = 0
+ selected_node_ids = set()
+ local_dev_ids = {dev['id']: dev for dev in daemon.ring.devs[-2:]}
+ daemon._local_device_ids = local_dev_ids
+ for _ in range(15):
+ self.logger.clear()
+ node = daemon.find_local_handoff_for_part(0)
+ self.assertIn(node['id'], local_dev_ids)
+ selected_node_ids.add(node['id'])
+ if len(selected_node_ids) == 2:
+ break # expected
+ self.assertEqual(len(selected_node_ids), 2)
+ self.assertEqual([0, 0], [local_dev_ids[dev_id]['weight']
+ for dev_id in selected_node_ids])
+ warning_lines = self.logger.get_lines_for_level('warning')
+ self.assertEqual(1, len(warning_lines), warning_lines)
+ self.assertIn(
+ 'Could not find a non-zero weight device for handoff partition',
+ warning_lines[0])
+
+ # If there are also no local_dev_ids, then we'll get the RuntimeError
+ daemon._local_device_ids = {}
+ with self.assertRaises(RuntimeError) as dev_err:
+ daemon.find_local_handoff_for_part(0)
+ expected_error_string = 'Cannot find local handoff; no local devices'
+ self.assertEqual(str(dev_err.exception), expected_error_string)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/container/test_sharder.py b/test/unit/container/test_sharder.py
index 1cdd6d250..1ad829f10 100644
--- a/test/unit/container/test_sharder.py
+++ b/test/unit/container/test_sharder.py
@@ -42,7 +42,8 @@ from swift.container.sharder import ContainerSharder, sharding_enabled, \
CleavingContext, DEFAULT_SHARDER_CONF, finalize_shrinking, \
find_shrinking_candidates, process_compactible_shard_sequences, \
find_compactible_shard_sequences, is_shrinking_candidate, \
- is_sharding_candidate, find_paths, rank_paths, ContainerSharderConf
+ is_sharding_candidate, find_paths, rank_paths, ContainerSharderConf, \
+ find_paths_with_gaps
from swift.common.utils import ShardRange, Timestamp, hash_path, \
encode_timestamps, parse_db_filename, quorum_size, Everything, md5
from test import annotate_failure
@@ -67,7 +68,7 @@ class BaseTestSharder(unittest.TestCase):
[dict(sr) for sr in actual])
def _make_broker(self, account='a', container='c', epoch=None,
- device='sda', part=0, hash_=None):
+ device='sda', part=0, hash_=None, put_timestamp=None):
hash_ = hash_ or md5(
container.encode('utf-8'), usedforsecurity=False).hexdigest()
datadir = os.path.join(
@@ -80,7 +81,7 @@ class BaseTestSharder(unittest.TestCase):
broker = ContainerBroker(
db_file, account=account, container=container,
logger=self.logger)
- broker.initialize()
+ broker.initialize(put_timestamp=put_timestamp)
return broker
def _make_old_style_sharding_broker(self, account='a', container='c',
@@ -431,13 +432,11 @@ class TestSharder(BaseTestSharder):
sharder._increment_stat('visited', 'failure')
sharder._increment_stat('visited', 'completed')
sharder._increment_stat('cleaved', 'success')
- sharder._increment_stat('scanned', 'found', step=4)
expected = {'success': 2,
'failure': 1,
'completed': 1}
self._assert_stats(expected, sharder, 'visited')
self._assert_stats({'success': 1}, sharder, 'cleaved')
- self._assert_stats({'found': 4}, sharder, 'scanned')
def test_increment_stats_with_statsd(self):
with self._mock_sharder() as sharder:
@@ -450,11 +449,23 @@ class TestSharder(BaseTestSharder):
'failure': 2,
'completed': 1}
self._assert_stats(expected, sharder, 'visited')
- counts = sharder.logger.get_increment_counts()
+ counts = sharder.logger.get_stats_counts()
self.assertEqual(2, counts.get('visited_success'))
self.assertEqual(1, counts.get('visited_failure'))
self.assertIsNone(counts.get('visited_completed'))
+ def test_update_stat(self):
+ with self._mock_sharder() as sharder:
+ sharder._update_stat('scanned', 'found', step=4)
+ self._assert_stats({'found': 4}, sharder, 'scanned')
+ with self._mock_sharder() as sharder:
+ sharder._update_stat('scanned', 'found', step=4)
+ sharder._update_stat('misplaced', 'placed', step=456, statsd=True)
+ self._assert_stats({'found': 4}, sharder, 'scanned')
+ self._assert_stats({'placed': 456}, sharder, 'misplaced')
+ self.assertEqual({'misplaced_placed': 456},
+ sharder.logger.get_stats_counts())
+
def test_run_forever(self):
conf = {'recon_cache_path': self.tempdir,
'devices': self.tempdir}
@@ -681,14 +692,14 @@ class TestSharder(BaseTestSharder):
with mock.patch('eventlet.sleep'), mock.patch.object(
sharder, '_process_broker'
) as mock_process_broker:
- sharder._local_device_ids = {'stale_node_id'}
+ sharder._local_device_ids = {'stale_node_id': {}}
sharder._one_shard_cycle(Everything(), Everything())
lines = sharder.logger.get_lines_for_level('warning')
expected = 'Skipping %s as it is not mounted' % \
unmounted_dev['device']
self.assertIn(expected, lines[0])
- self.assertEqual(device_ids, sharder._local_device_ids)
+ self.assertEqual(device_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(2, mock_process_broker.call_count)
processed_paths = [call[0][0].path
for call in mock_process_broker.call_args_list]
@@ -740,14 +751,14 @@ class TestSharder(BaseTestSharder):
with mock.patch('eventlet.sleep'), mock.patch.object(
sharder, '_process_broker', side_effect=mock_processing
) as mock_process_broker:
- sharder._local_device_ids = {'stale_node_id'}
+ sharder._local_device_ids = {'stale_node_id': {}}
sharder._one_shard_cycle(Everything(), Everything())
lines = sharder.logger.get_lines_for_level('warning')
expected = 'Skipping %s as it is not mounted' % \
unmounted_dev['device']
self.assertIn(expected, lines[0])
- self.assertEqual(device_ids, sharder._local_device_ids)
+ self.assertEqual(device_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(3, mock_process_broker.call_count)
processed_paths = [call[0][0].path
for call in mock_process_broker.call_args_list]
@@ -798,10 +809,10 @@ class TestSharder(BaseTestSharder):
with mock.patch('eventlet.sleep'), mock.patch.object(
sharder, '_process_broker'
) as mock_process_broker:
- sharder._local_device_ids = {999}
+ sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
- self.assertEqual(device_ids, sharder._local_device_ids)
+ self.assertEqual(device_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(3, mock_process_broker.call_count)
processed_paths = [call[0][0].path
for call in mock_process_broker.call_args_list]
@@ -825,7 +836,7 @@ class TestSharder(BaseTestSharder):
with mock.patch('eventlet.sleep'), mock.patch.object(
sharder, '_process_broker'
) as mock_process_broker:
- sharder._local_device_ids = {999}
+ sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
expected_in_progress_stats = {
@@ -865,7 +876,7 @@ class TestSharder(BaseTestSharder):
with mock.patch('eventlet.sleep'), \
mock.patch.object(sharder, '_process_broker') \
as mock_process_broker, mock_timestamp_now(ts_now):
- sharder._local_device_ids = {999}
+ sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
expected_in_progress_stats = {
@@ -895,7 +906,7 @@ class TestSharder(BaseTestSharder):
with mock.patch('eventlet.sleep'), \
mock.patch.object(sharder, '_process_broker') \
as mock_process_broker, mock_timestamp_now(ts_now):
- sharder._local_device_ids = {999}
+ sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
self._assert_stats(
expected_in_progress_stats, sharder, 'sharding_in_progress')
@@ -907,7 +918,7 @@ class TestSharder(BaseTestSharder):
with mock.patch('eventlet.sleep'), \
mock.patch.object(sharder, '_process_broker') \
as mock_process_broker, mock_timestamp_now(ts_now):
- sharder._local_device_ids = {999}
+ sharder._local_device_ids = {999: {}}
sharder._one_shard_cycle(Everything(), Everything())
expected_in_progress_stats = {
@@ -1004,7 +1015,8 @@ class TestSharder(BaseTestSharder):
'swift.common.db_replicator.ring.Ring',
return_value=fake_ring):
sharder = ContainerSharder(conf, logger=self.logger)
- sharder._local_device_ids = {0, 1, 2}
+ sharder._local_device_ids = {dev['id']: dev
+ for dev in fake_ring.devs}
sharder._replicate_object = mock.MagicMock(
return_value=(True, [True] * sharder.ring.replica_count))
yield sharder
@@ -1242,11 +1254,16 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder._cleave(broker))
expected = {'attempted': 1, 'success': 1, 'failure': 0,
- 'min_time': mock.ANY, 'max_time': mock.ANY}
+ 'min_time': mock.ANY, 'max_time': mock.ANY,
+ 'db_created': 1, 'db_exists': 0}
stats = self._assert_stats(expected, sharder, 'cleaved')
self.assertIsInstance(stats['min_time'], float)
self.assertIsInstance(stats['max_time'], float)
self.assertLessEqual(stats['min_time'], stats['max_time'])
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts().get('cleaved_db_created'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('cleaved_db_exists'))
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[0], 0)
@@ -1304,6 +1321,15 @@ class TestSharder(BaseTestSharder):
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[1], 0)
+ expected = {'attempted': 1, 'success': 0, 'failure': 1,
+ 'min_time': mock.ANY, 'max_time': mock.ANY,
+ 'db_created': 1, 'db_exists': 0}
+ self._assert_stats(expected, sharder, 'cleaved')
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts().get('cleaved_db_created'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('cleaved_db_exists'))
+
# cleaving state is unchanged
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(4, len(updated_shard_ranges))
@@ -1332,11 +1358,16 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder._cleave(broker))
expected = {'attempted': 2, 'success': 2, 'failure': 0,
- 'min_time': mock.ANY, 'max_time': mock.ANY}
+ 'min_time': mock.ANY, 'max_time': mock.ANY,
+ 'db_created': 1, 'db_exists': 1}
stats = self._assert_stats(expected, sharder, 'cleaved')
self.assertIsInstance(stats['min_time'], float)
self.assertIsInstance(stats['max_time'], float)
self.assertLessEqual(stats['min_time'], stats['max_time'])
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts().get('cleaved_db_created'))
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts().get('cleaved_db_exists'))
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_has_calls(
@@ -1393,11 +1424,16 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder._cleave(broker))
expected = {'attempted': 1, 'success': 1, 'failure': 0,
- 'min_time': mock.ANY, 'max_time': mock.ANY}
+ 'min_time': mock.ANY, 'max_time': mock.ANY,
+ 'db_created': 1, 'db_exists': 0}
stats = self._assert_stats(expected, sharder, 'cleaved')
self.assertIsInstance(stats['min_time'], float)
self.assertIsInstance(stats['max_time'], float)
self.assertLessEqual(stats['min_time'], stats['max_time'])
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts().get('cleaved_db_created'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('cleaved_db_exists'))
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_called_once_with(
@@ -1461,11 +1497,16 @@ class TestSharder(BaseTestSharder):
self.assertTrue(sharder._cleave(broker))
expected = {'attempted': 1, 'success': 1, 'failure': 0,
- 'min_time': mock.ANY, 'max_time': mock.ANY}
+ 'min_time': mock.ANY, 'max_time': mock.ANY,
+ 'db_created': 1, 'db_exists': 0}
stats = self._assert_stats(expected, sharder, 'cleaved')
self.assertIsInstance(stats['min_time'], float)
self.assertIsInstance(stats['max_time'], float)
self.assertLessEqual(stats['min_time'], stats['max_time'])
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts().get('cleaved_db_created'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('cleaved_db_exists'))
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[4], 0)
@@ -1662,6 +1703,9 @@ class TestSharder(BaseTestSharder):
self.assertEqual(cleaving_context.ranges_todo, 0)
self.assertTrue(cleaving_context.cleaving_done)
+ self.assertEqual([ShardRange.CLEAVED] * 3,
+ [sr.state for sr in broker.get_shard_ranges()])
+
def test_cleave_root_empty_db_with_pre_existing_shard_db_handoff(self):
broker = self._make_broker()
broker.enable_sharding(Timestamp.now())
@@ -1694,6 +1738,10 @@ class TestSharder(BaseTestSharder):
self.assertEqual(cleaving_context.ranges_todo, 2)
self.assertFalse(cleaving_context.cleaving_done)
+ self.assertEqual(
+ [ShardRange.CLEAVED, ShardRange.CREATED, ShardRange.CREATED],
+ [sr.state for sr in broker.get_shard_ranges()])
+
def test_cleave_shard_range_no_own_shard_range(self):
# create an unsharded broker that has shard ranges but no
# own_shard_range, verify that it does not cleave...
@@ -3289,10 +3337,19 @@ class TestSharder(BaseTestSharder):
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_not_called()
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
- 'found': 0, 'placed': 0, 'unplaced': 0}
+ 'found': 0, 'placed': 0, 'unplaced': 0,
+ 'db_created': 0, 'db_exists': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
- sharder.logger.get_increment_counts().get('misplaced_found'))
+ sharder.logger.get_stats_counts().get('misplaced_found'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_unplaced'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_created'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_exists'))
# sharding - no misplaced objects
self.assertTrue(broker.set_sharding_state())
@@ -3301,7 +3358,15 @@ class TestSharder(BaseTestSharder):
sharder._replicate_object.assert_not_called()
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
- sharder.logger.get_increment_counts().get('misplaced_found'))
+ sharder.logger.get_stats_counts().get('misplaced_found'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_unplaced'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_created'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_exists'))
# pretend we cleaved up to end of second shard range
context = CleavingContext.load(broker)
@@ -3312,7 +3377,15 @@ class TestSharder(BaseTestSharder):
sharder._replicate_object.assert_not_called()
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
- sharder.logger.get_increment_counts().get('misplaced_found'))
+ sharder.logger.get_stats_counts().get('misplaced_found'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_unplaced'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_created'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_exists'))
# sharding - misplaced objects
for obj in objects:
@@ -3325,7 +3398,15 @@ class TestSharder(BaseTestSharder):
sharder._replicate_object.assert_not_called()
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
- sharder.logger.get_increment_counts().get('misplaced_found'))
+ sharder.logger.get_stats_counts().get('misplaced_found'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_unplaced'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_created'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_exists'))
self.assertFalse(os.path.exists(expected_shard_dbs[0]))
self.assertFalse(os.path.exists(expected_shard_dbs[1]))
self.assertFalse(os.path.exists(expected_shard_dbs[2]))
@@ -3341,10 +3422,18 @@ class TestSharder(BaseTestSharder):
sharder._replicate_object.assert_called_once_with(
0, expected_shard_dbs[1], 0)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
- 'found': 1, 'placed': 2, 'unplaced': 0}
+ 'found': 1, 'placed': 2, 'unplaced': 0,
+ 'db_created': 1, 'db_exists': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
+ self.assertEqual(
+ 2, sharder.logger.get_stats_counts()['misplaced_placed'])
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts()['misplaced_db_created'])
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_exists'))
+
# check misplaced objects were moved
self._check_objects(objects[:2], expected_shard_dbs[1])
# ... and removed from the source db
@@ -3371,7 +3460,8 @@ class TestSharder(BaseTestSharder):
with self._mock_sharder(conf={'cleave_row_batch_size': 2}) as sharder:
sharder._move_misplaced_objects(broker)
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
- 'found': 1, 'placed': 4, 'unplaced': 0}
+ 'found': 1, 'placed': 4, 'unplaced': 0,
+ 'db_created': 3, 'db_exists': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
sharder._replicate_object.assert_has_calls(
[mock.call(0, db, 0) for db in expected_shard_dbs[2:4]],
@@ -3379,7 +3469,13 @@ class TestSharder(BaseTestSharder):
)
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
+ self.assertEqual(
+ 4, sharder.logger.get_stats_counts()['misplaced_placed'])
+ self.assertEqual(
+ 3, sharder.logger.get_stats_counts()['misplaced_db_created'])
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_exists'))
# check misplaced objects were moved
self._check_objects(new_objects, expected_shard_dbs[0])
@@ -3396,10 +3492,17 @@ class TestSharder(BaseTestSharder):
sharder._move_misplaced_objects(broker)
sharder._replicate_object.assert_not_called()
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
- 'found': 0, 'placed': 0, 'unplaced': 0}
+ 'found': 0, 'placed': 0, 'unplaced': 0,
+ 'db_created': 0, 'db_exists': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertFalse(
- sharder.logger.get_increment_counts().get('misplaced_found'))
+ sharder.logger.get_stats_counts().get('misplaced_found'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_created'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_db_exists'))
# and then more misplaced updates arrive
newer_objects = [
@@ -3420,11 +3523,21 @@ class TestSharder(BaseTestSharder):
for db in (expected_shard_dbs[0], expected_shard_dbs[-1])],
any_order=True
)
+ # shard broker for first shard range was already created but not
+ # removed due to mocked _replicate_object so expect one created and one
+ # existed db stat...
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
- 'found': 1, 'placed': 3, 'unplaced': 0}
+ 'found': 1, 'placed': 3, 'unplaced': 0,
+ 'db_created': 1, 'db_exists': 1}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
+ self.assertEqual(
+ 3, sharder.logger.get_stats_counts()['misplaced_placed'])
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts()['misplaced_db_created'])
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts()['misplaced_db_exists'])
# check new misplaced objects were moved
self._check_objects(newer_objects[:1] + new_objects,
@@ -3615,7 +3728,7 @@ class TestSharder(BaseTestSharder):
'found': 1, 'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
# check misplaced objects were moved
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
@@ -3648,7 +3761,7 @@ class TestSharder(BaseTestSharder):
'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
# check misplaced objects were moved to shard dbs
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
@@ -3681,7 +3794,7 @@ class TestSharder(BaseTestSharder):
'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
# check misplaced objects were moved to shard dbs
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
@@ -3714,7 +3827,7 @@ class TestSharder(BaseTestSharder):
'placed': 4, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
# check misplaced objects were moved to shard dbs
self._check_objects(objects[:2], expected_dbs[1])
self._check_objects(objects[2:3], expected_dbs[2])
@@ -3767,8 +3880,16 @@ class TestSharder(BaseTestSharder):
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 0, 'placed': 0, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts().get('misplaced_success'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_failure'))
self.assertFalse(
- sharder.logger.get_increment_counts().get('misplaced_found'))
+ sharder.logger.get_stats_counts().get('misplaced_found'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_unplaced'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# now put objects
@@ -3795,8 +3916,16 @@ class TestSharder(BaseTestSharder):
expected_stats = {'attempted': 1, 'success': 0, 'failure': 1,
'found': 1, 'placed': 2, 'unplaced': 2}
self._assert_stats(expected_stats, sharder, 'misplaced')
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_success'))
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts().get('misplaced_failure'))
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
+ self.assertEqual(
+ 2, sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertEqual(
+ 2, sharder.logger.get_stats_counts().get('misplaced_unplaced'))
# some misplaced objects could not be moved...
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertIn(
@@ -3832,7 +3961,15 @@ class TestSharder(BaseTestSharder):
'found': 1, 'placed': 2, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts().get('misplaced_success'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_failure'))
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
+ self.assertEqual(
+ 2, sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_unplaced'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# check misplaced objects were moved
@@ -3855,9 +3992,17 @@ class TestSharder(BaseTestSharder):
expected_stats = {'attempted': 1, 'success': 1, 'failure': 0,
'found': 0, 'placed': 0, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts().get('misplaced_success'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_failure'))
self.assertFalse(
- sharder.logger.get_increment_counts().get('misplaced_found'))
+ sharder.logger.get_stats_counts().get('misplaced_found'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_unplaced'))
# and then more misplaced updates arrive
new_objects = [
@@ -3891,7 +4036,15 @@ class TestSharder(BaseTestSharder):
'found': 1, 'placed': 2, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts().get('misplaced_success'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_failure'))
+ self.assertEqual(
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
+ self.assertEqual(
+ 2, sharder.logger.get_stats_counts().get('misplaced_placed'))
+ self.assertFalse(
+ sharder.logger.get_stats_counts().get('misplaced_unplaced'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# check new misplaced objects were moved
@@ -3978,7 +4131,7 @@ class TestSharder(BaseTestSharder):
'found': 1, 'placed': 1, 'unplaced': 2}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertIn(
'Failed to find destination for at least 2 misplaced objects',
@@ -4013,7 +4166,7 @@ class TestSharder(BaseTestSharder):
'found': 1, 'placed': 2, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# check misplaced objects were moved
@@ -4064,7 +4217,7 @@ class TestSharder(BaseTestSharder):
'found': 1, 'placed': 5, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
# check *all* the misplaced objects were moved
@@ -4120,7 +4273,7 @@ class TestSharder(BaseTestSharder):
'found': 1, 'placed': 2, 'unplaced': 0}
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
# check new misplaced objects were moved
self._check_objects(objects[:1], expected_shard_dbs[0])
@@ -4147,7 +4300,7 @@ class TestSharder(BaseTestSharder):
)
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
# check older misplaced objects were not merged to shard brokers
self._check_objects(objects[:1], expected_shard_dbs[0])
@@ -4186,7 +4339,7 @@ class TestSharder(BaseTestSharder):
)
self._assert_stats(expected_stats, sharder, 'misplaced')
self.assertEqual(
- 1, sharder.logger.get_increment_counts()['misplaced_found'])
+ 1, sharder.logger.get_stats_counts()['misplaced_found'])
# check only the newer misplaced object was moved
self._check_objects([newer_object], expected_shard_dbs[0])
@@ -4776,37 +4929,62 @@ class TestSharder(BaseTestSharder):
self.assertFalse(broker.logger.get_lines_for_level('error'))
broker.logger.clear()
- def _check_process_broker_sharding_no_others(self, state):
+ def _check_process_broker_sharding_no_others(self, start_state, deleted):
# verify that when existing own_shard_range has given state and there
- # are other shard ranges then the sharding process will begin
- broker = self._make_broker(hash_='hash%s' % state)
+ # are other shard ranges then the sharding process will complete
+ broker = self._make_broker(hash_='hash%s%s' % (start_state, deleted))
node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
'index': 0}
own_sr = broker.get_own_shard_range()
- self.assertTrue(own_sr.update_state(state))
- epoch = Timestamp.now()
+ self.assertTrue(own_sr.update_state(start_state))
+ epoch = next(self.ts_iter)
own_sr.epoch = epoch
shard_ranges = self._make_shard_ranges((('', 'm'), ('m', '')))
broker.merge_shard_ranges([own_sr] + shard_ranges)
+ if deleted:
+ broker.delete_db(next(self.ts_iter).internal)
with self._mock_sharder() as sharder:
with mock.patch.object(
- sharder, '_create_shard_containers', return_value=0):
- with mock_timestamp_now() as now:
+ sharder, '_send_shard_ranges', return_value=True):
+ with mock_timestamp_now_with_iter(self.ts_iter):
sharder._audit_container = mock.MagicMock()
sharder._process_broker(broker, node, 99)
- final_own_sr = broker.get_own_shard_range(no_default=True)
- self.assertEqual(dict(own_sr, meta_timestamp=now),
- dict(final_own_sr))
- self.assertEqual(SHARDING, broker.get_db_state())
+ final_own_sr = broker.get_own_shard_range(no_default=True)
+ self.assertEqual(SHARDED, broker.get_db_state())
self.assertEqual(epoch.normal, parse_db_filename(broker.db_file)[1])
self.assertFalse(broker.logger.get_lines_for_level('warning'))
self.assertFalse(broker.logger.get_lines_for_level('error'))
+ # self.assertEqual(deleted, broker.is_deleted())
+ return own_sr, final_own_sr
def test_process_broker_sharding_with_own_shard_range_no_others(self):
- self._check_process_broker_sharding_no_others(ShardRange.SHARDING)
- self._check_process_broker_sharding_no_others(ShardRange.SHRINKING)
+ own_sr, final_own_sr = self._check_process_broker_sharding_no_others(
+ ShardRange.SHARDING, False)
+ exp_own_sr = dict(own_sr, state=ShardRange.SHARDED,
+ meta_timestamp=mock.ANY)
+ self.assertEqual(exp_own_sr, dict(final_own_sr))
+
+ # verify that deleted DBs will be sharded
+ own_sr, final_own_sr = self._check_process_broker_sharding_no_others(
+ ShardRange.SHARDING, True)
+ exp_own_sr = dict(own_sr, state=ShardRange.SHARDED,
+ meta_timestamp=mock.ANY)
+ self.assertEqual(exp_own_sr, dict(final_own_sr))
+
+ own_sr, final_own_sr = self._check_process_broker_sharding_no_others(
+ ShardRange.SHRINKING, False)
+ exp_own_sr = dict(own_sr, state=ShardRange.SHRUNK,
+ meta_timestamp=mock.ANY)
+ self.assertEqual(exp_own_sr, dict(final_own_sr))
+
+ # verify that deleted DBs will be shrunk
+ own_sr, final_own_sr = self._check_process_broker_sharding_no_others(
+ ShardRange.SHRINKING, True)
+ exp_own_sr = dict(own_sr, state=ShardRange.SHRUNK,
+ meta_timestamp=mock.ANY)
+ self.assertEqual(exp_own_sr, dict(final_own_sr))
def test_process_broker_not_sharding_others(self):
# verify that sharding process will not start when own shard range is
@@ -4899,6 +5077,83 @@ class TestSharder(BaseTestSharder):
self._check_process_broker_sharding_others(ShardRange.SHRINKING)
self._check_process_broker_sharding_others(ShardRange.SHARDED)
+ def test_process_broker_leader_auto_shard(self):
+ # verify conditions for acting as auto-shard leader
+ broker = self._make_broker(put_timestamp=next(self.ts_iter).internal)
+ objects = [
+ ['obj%3d' % i, self.ts_encoded(), i, 'text/plain',
+ 'etag%s' % i, 0] for i in range(10)]
+ for obj in objects:
+ broker.put_object(*obj)
+ self.assertEqual(10, broker.get_info()['object_count'])
+ node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
+ 'index': 0}
+
+ def do_process(conf):
+ with self._mock_sharder(conf) as sharder:
+ with mock_timestamp_now():
+ # we're not testing rest of the process here so prevent any
+ # attempt to progress shard range states
+ sharder._create_shard_containers = lambda *args: 0
+ sharder._process_broker(broker, node, 99)
+
+ # auto shard disabled
+ conf = {'shard_container_threshold': 10,
+ 'rows_per_shard': 5,
+ 'shrink_threshold': 1,
+ 'auto_shard': False}
+ do_process(conf)
+ self.assertEqual(UNSHARDED, broker.get_db_state())
+ own_sr = broker.get_own_shard_range(no_default=True)
+ self.assertIsNone(own_sr)
+
+ # auto shard enabled, not node 0
+ conf['auto_shard'] = True
+ node['index'] = 1
+ do_process(conf)
+ self.assertEqual(UNSHARDED, broker.get_db_state())
+ own_sr = broker.get_own_shard_range(no_default=True)
+ self.assertIsNone(own_sr)
+
+ # auto shard enabled, node 0 -> start sharding
+ node['index'] = 0
+ do_process(conf)
+ self.assertEqual(SHARDING, broker.get_db_state())
+ own_sr = broker.get_own_shard_range(no_default=True)
+ self.assertIsNotNone(own_sr)
+ self.assertEqual(ShardRange.SHARDING, own_sr.state)
+ self.assertEqual(own_sr.epoch.normal,
+ parse_db_filename(broker.db_file)[1])
+ self.assertEqual(2, len(broker.get_shard_ranges()))
+
+ def test_process_broker_leader_auto_shard_deleted_db(self):
+ # verify no auto-shard leader if broker is deleted
+ conf = {'shard_container_threshold': 10,
+ 'rows_per_shard': 5,
+ 'shrink_threshold': 1,
+ 'auto_shard': True}
+ broker = self._make_broker(put_timestamp=next(self.ts_iter).internal)
+ broker.delete_db(next(self.ts_iter).internal)
+ self.assertTrue(broker.is_deleted()) # sanity check
+ node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
+ 'index': 0}
+
+ with self._mock_sharder(conf) as sharder:
+ with mock_timestamp_now():
+ with mock.patch.object(
+ sharder, '_find_and_enable_sharding_candidates'
+ ) as mock_find_and_enable:
+ sharder._process_broker(broker, node, 99)
+
+ self.assertEqual(UNSHARDED, broker.get_db_state())
+ own_sr = broker.get_own_shard_range(no_default=True)
+ self.assertIsNone(own_sr)
+ # this is the only concrete assertion that verifies the leader actions
+ # are not taken; no shard ranges would actually be found for an empty
+ # deleted db so there's no other way to differentiate from an undeleted
+ # db being processed...
+ mock_find_and_enable.assert_not_called()
+
def check_shard_ranges_sent(self, broker, expected_sent):
bodies = []
servers = []
@@ -5448,9 +5703,10 @@ class TestSharder(BaseTestSharder):
self.assertEqual([], self.logger.get_lines_for_level('warning'))
# advance time
- with mock.patch('swift.container.sharder.time.time') as fake_time, \
- self._mock_sharder() as sharder:
- fake_time.return_value = 6048000 + float(delete_ts)
+ future_time = 6048000 + float(delete_ts)
+ with mock.patch(
+ 'swift.container.sharder.time.time',
+ return_value=future_time), self._mock_sharder() as sharder:
sharder._audit_container(broker)
message = 'Reclaimable db stuck waiting for shrinking: %s (%s)' % (
broker.db_file, broker.path)
@@ -5464,9 +5720,9 @@ class TestSharder(BaseTestSharder):
broker.merge_shard_ranges(shard_ranges)
# no more warning
- with mock.patch('swift.container.sharder.time.time') as fake_time, \
- self._mock_sharder() as sharder:
- fake_time.return_value = 6048000 + float(delete_ts)
+ with mock.patch(
+ 'swift.container.sharder.time.time',
+ return_value=future_time), self._mock_sharder() as sharder:
sharder._audit_container(broker)
self.assertEqual([], self.logger.get_lines_for_level('warning'))
@@ -5939,7 +6195,7 @@ class TestSharder(BaseTestSharder):
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once()
- self.assertEqual(dev_ids, set(sharder._local_device_ids))
+ self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set(container_data),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
@@ -5951,7 +6207,7 @@ class TestSharder(BaseTestSharder):
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once(partitions='0')
- self.assertEqual(dev_ids, set(sharder._local_device_ids))
+ self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set([container_data[0]]),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
@@ -5963,7 +6219,7 @@ class TestSharder(BaseTestSharder):
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once(partitions='2,0')
- self.assertEqual(dev_ids, set(sharder._local_device_ids))
+ self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set([container_data[0], container_data[2]]),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
@@ -5975,7 +6231,7 @@ class TestSharder(BaseTestSharder):
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once(partitions='2,0', devices='sdc')
- self.assertEqual(dev_ids, set(sharder._local_device_ids))
+ self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set([container_data[2]]),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
@@ -5987,7 +6243,7 @@ class TestSharder(BaseTestSharder):
with mock.patch.object(
sharder, '_process_broker') as mock_process_broker:
sharder.run_once(devices='sdb,sdc')
- self.assertEqual(dev_ids, set(sharder._local_device_ids))
+ self.assertEqual(dev_ids, set(sharder._local_device_ids.keys()))
self.assertEqual(set(container_data[1:]),
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
@@ -6293,6 +6549,56 @@ class TestSharder(BaseTestSharder):
self._assert_recon_stats(expected_shrinking_candidates_data,
sharder, 'shrinking_candidates')
+ @mock.patch('swift.common.ring.ring.Ring.get_part_nodes', return_value=[])
+ @mock.patch('swift.common.ring.ring.Ring.get_more_nodes', return_value=[])
+ def test_get_shard_broker_no_local_handoff_for_part(
+ self, mock_part_nodes, mock_more_nodes):
+ broker = self._make_broker()
+ broker.enable_sharding(Timestamp.now())
+
+ shard_bounds = (('', 'd'), ('d', 'x'), ('x', ''))
+ shard_ranges = self._make_shard_ranges(
+ shard_bounds, state=ShardRange.CREATED)
+
+ broker.merge_shard_ranges(shard_ranges)
+ self.assertTrue(broker.set_sharding_state())
+
+ # first, let's assume there local_handoff_for_part fails because the
+ # node we're on is at zero weight for all disks. So it wont appear in
+ # the replica2part2dev table, meaning we wont get a node back.
+ # in this case, we'll fall back to one of our own devices which we
+ # determine from the ring.devs not the replica2part2dev table.
+ with self._mock_sharder() as sharder:
+ local_dev_ids = {dev['id']: dev for dev in sharder.ring.devs[-1:]}
+ sharder._local_device_ids = local_dev_ids
+ part, shard_broker, node_id, _ = sharder._get_shard_broker(
+ shard_ranges[0], broker.root_path, 0)
+ self.assertIn(node_id, local_dev_ids)
+
+ # if there are more then 1 local_dev_id it'll randomly pick one
+ selected_node_ids = set()
+ for _ in range(10):
+ with self._mock_sharder() as sharder:
+ local_dev_ids = {dev['id']: dev
+ for dev in sharder.ring.devs[-2:]}
+ sharder._local_device_ids = local_dev_ids
+ part, shard_broker, node_id, _ = sharder._get_shard_broker(
+ shard_ranges[0], broker.root_path, 0)
+ self.assertIn(node_id, local_dev_ids)
+ selected_node_ids.add(node_id)
+ if len(selected_node_ids) == 2:
+ break
+ self.assertEqual(len(selected_node_ids), 2)
+
+ # If there are also no local_dev_ids, then we'll get the RuntimeError
+ with self._mock_sharder() as sharder:
+ sharder._local_device_ids = {}
+ with self.assertRaises(RuntimeError) as dev_err:
+ sharder._get_shard_broker(shard_ranges[0], broker.root_path, 0)
+
+ expected_error_string = 'Cannot find local handoff; no local devices'
+ self.assertEqual(str(dev_err.exception), expected_error_string)
+
class TestCleavingContext(BaseTestSharder):
def test_init(self):
@@ -7446,6 +7752,40 @@ class TestSharderFunctions(BaseTestSharder):
rank_paths(paths, own_sr)
)
+ def test_find_paths_with_gaps(self):
+ bounds = (
+ # gap
+ ('a', 'f'),
+ ('f', 'm'), # overlap
+ ('k', 'p'),
+ # gap
+ ('q', 'y')
+ # gap
+ )
+ ranges = self._make_shard_ranges(
+ bounds, ShardRange.ACTIVE,
+ timestamp=next(self.ts_iter), object_count=1)
+ paths_with_gaps = find_paths_with_gaps(ranges)
+ self.assertEqual(3, len(paths_with_gaps))
+ self.assertEqual(
+ [(ShardRange.MIN, ShardRange.MIN),
+ (ShardRange.MIN, 'a'),
+ ('a', 'm')],
+ [(r.lower, r.upper) for r in paths_with_gaps[0]]
+ )
+ self.assertEqual(
+ [('k', 'p'),
+ ('p', 'q'),
+ ('q', 'y')],
+ [(r.lower, r.upper) for r in paths_with_gaps[1]]
+ )
+ self.assertEqual(
+ [('q', 'y'),
+ ('y', ShardRange.MAX),
+ (ShardRange.MAX, ShardRange.MAX)],
+ [(r.lower, r.upper) for r in paths_with_gaps[2]]
+ )
+
class TestContainerSharderConf(unittest.TestCase):
def test_default(self):
diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py
index 029197f4f..37dbb6be9 100644
--- a/test/unit/obj/test_diskfile.py
+++ b/test/unit/obj/test_diskfile.py
@@ -8560,6 +8560,59 @@ class TestSuffixHashes(unittest.TestCase):
)
self.assertTrue(os.path.exists(quarantine_path))
+ def test_auditor_hashdir_not_listable(self):
+ def list_locations(dirname, datadir):
+ return [(loc.path, loc.device, loc.partition, loc.policy)
+ for loc in diskfile.object_audit_location_generator(
+ devices=dirname, datadir=datadir, mount_check=False)]
+
+ real_listdir = os.listdir
+
+ def splode_if_endswith(suffix, err):
+ def sploder(path):
+ if path.endswith(suffix):
+ raise OSError(err, os.strerror(err))
+ else:
+ return real_listdir(path)
+
+ return sploder
+
+ with temptree([]) as tmpdir:
+ hashdir1 = os.path.join(tmpdir, "sdf", "objects", "2607", "b54",
+ "fe450ec990a88cc4b252b181bab04b54")
+ os.makedirs(hashdir1)
+ with open(os.path.join(hashdir1, '1656032666.98003.ts'), 'w'):
+ pass
+ hashdir2 = os.path.join(tmpdir, "sdf", "objects", "2809", "afd",
+ "7089ab48d955ab0851fc51cc17a34afd")
+ os.makedirs(hashdir2)
+ with open(os.path.join(hashdir2, '1656080624.31899.ts'), 'w'):
+ pass
+
+ expected = [(hashdir2, 'sdf', '2809', POLICIES[0])]
+
+ # Parts that look like files are just skipped
+ with mock.patch('os.listdir', splode_if_endswith(
+ "2607", errno.ENOTDIR)):
+ self.assertEqual(expected, list_locations(tmpdir, 'objects'))
+ diskfile.clear_auditor_status(tmpdir, 'objects')
+ # ENODATA on a suffix is ok
+ with mock.patch('os.listdir', splode_if_endswith(
+ "b54", errno.ENODATA)):
+ self.assertEqual(expected, list_locations(tmpdir, 'objects'))
+ diskfile.clear_auditor_status(tmpdir, 'objects')
+
+ # sanity the other way
+ expected = [(hashdir1, 'sdf', '2607', POLICIES[0])]
+ with mock.patch('os.listdir', splode_if_endswith(
+ "2809", errno.ENODATA)):
+ self.assertEqual(expected, list_locations(tmpdir, 'objects'))
+ diskfile.clear_auditor_status(tmpdir, 'objects')
+ with mock.patch('os.listdir', splode_if_endswith(
+ "afd", errno.ENOTDIR)):
+ self.assertEqual(expected, list_locations(tmpdir, 'objects'))
+ diskfile.clear_auditor_status(tmpdir, 'objects')
+
def test_hash_suffix_cleanup_ondisk_files_enodata_quarantined(self):
for policy in self.iter_policies():
df = self.df_router[policy].get_diskfile(
diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py
index c1876b93b..7d698d2fd 100644
--- a/test/unit/obj/test_reconstructor.py
+++ b/test/unit/obj/test_reconstructor.py
@@ -271,7 +271,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
policy=policy, frag_index=scenarios[part_num](obj_set),
timestamp=utils.Timestamp(t))
- ips = utils.whataremyips(self.reconstructor.bind_ip)
+ ips = utils.whataremyips(self.reconstructor.ring_ip)
for policy in [p for p in POLICIES if p.policy_type == EC_POLICY]:
self.ec_policy = policy
self.ec_obj_ring = self.reconstructor.load_object_ring(
@@ -1246,7 +1246,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
self.reconstructor.reconstruct()
for context in ssync_calls:
if context['job']['job_type'] == REVERT:
- self.assertTrue(True, context.get('include_non_durable'))
+ self.assertTrue(context.get('include_non_durable'))
data_file_tail = ('#%s.data'
% context['node']['index'])
for dirpath, files in visit_obj_dirs(context):
@@ -1286,7 +1286,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
# verify reconstructor only deletes reverted nondurable fragments older
# commit_window
shutil.rmtree(self.ec_obj_path)
- ips = utils.whataremyips(self.reconstructor.bind_ip)
+ ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
@@ -1318,7 +1318,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
self.reconstructor.reconstruct()
for context in ssync_calls:
self.assertEqual(REVERT, context['job']['job_type'])
- self.assertTrue(True, context.get('include_non_durable'))
+ self.assertTrue(context.get('include_non_durable'))
# neither nondurable should be removed yet with default commit_window
# because their mtimes are too recent
self.assertTrue(os.path.exists(datafile_recent))
@@ -1337,7 +1337,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
self.reconstructor.reconstruct()
for context in ssync_calls:
self.assertEqual(REVERT, context['job']['job_type'])
- self.assertTrue(True, context.get('include_non_durable'))
+ self.assertTrue(context.get('include_non_durable'))
# ...now the nondurables get purged
self.assertFalse(os.path.exists(datafile_recent))
@@ -1348,7 +1348,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
# visited by the reconstructor, despite having timestamp older than
# reclaim_age
shutil.rmtree(self.ec_obj_path)
- ips = utils.whataremyips(self.reconstructor.bind_ip)
+ ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
@@ -1396,7 +1396,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
# reclaim_age and commit_window is zero; this test illustrates the
# potential data loss bug that commit_window addresses
shutil.rmtree(self.ec_obj_path)
- ips = utils.whataremyips(self.reconstructor.bind_ip)
+ ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
@@ -1446,7 +1446,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
# survive being visited by the reconstructor if its timestamp is older
# than reclaim_age
shutil.rmtree(self.ec_obj_path)
- ips = utils.whataremyips(self.reconstructor.bind_ip)
+ ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
@@ -1494,7 +1494,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
# verify reconstructor only deletes objects that were actually reverted
# when ssync is limited by max_objects_per_revert
shutil.rmtree(self.ec_obj_path)
- ips = utils.whataremyips(self.reconstructor.bind_ip)
+ ips = utils.whataremyips(self.reconstructor.ring_ip)
local_devs = [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
@@ -3084,6 +3084,36 @@ class BaseTestObjectReconstructor(unittest.TestCase):
class TestObjectReconstructor(BaseTestObjectReconstructor):
+ def test_ring_ip_and_bind_ip(self):
+ # make clean base_conf
+ base_conf = dict(self.conf)
+ for key in ('bind_ip', 'ring_ip'):
+ base_conf.pop(key, None)
+
+ # default ring_ip is always 0.0.0.0
+ self.conf = base_conf
+ self._configure_reconstructor()
+ self.assertEqual('0.0.0.0', self.reconstructor.ring_ip)
+
+ # bind_ip works fine for legacy configs
+ self.conf = dict(base_conf)
+ self.conf['bind_ip'] = '192.168.1.42'
+ self._configure_reconstructor()
+ self.assertEqual('192.168.1.42', self.reconstructor.ring_ip)
+
+ # ring_ip works fine by-itself
+ self.conf = dict(base_conf)
+ self.conf['ring_ip'] = '192.168.1.43'
+ self._configure_reconstructor()
+ self.assertEqual('192.168.1.43', self.reconstructor.ring_ip)
+
+ # if you have both ring_ip wins
+ self.conf = dict(base_conf)
+ self.conf['bind_ip'] = '192.168.1.44'
+ self.conf['ring_ip'] = '192.168.1.45'
+ self._configure_reconstructor()
+ self.assertEqual('192.168.1.45', self.reconstructor.ring_ip)
+
def test_handoffs_only_default(self):
# sanity neither option added to default conf
self.conf.pop('handoffs_first', None)
@@ -3276,7 +3306,7 @@ class TestObjectReconstructor(BaseTestObjectReconstructor):
'replication_ip': '127.0.0.88', # not local via IP
'replication_port': self.port,
})
- self.reconstructor.bind_ip = '0.0.0.0' # use whataremyips
+ self.reconstructor.ring_ip = '0.0.0.0' # use whataremyips
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
@@ -3331,7 +3361,7 @@ class TestObjectReconstructor(BaseTestObjectReconstructor):
'replication_ip': '127.0.0.88', # not local via IP
'replication_port': self.port,
})
- self.reconstructor.bind_ip = '0.0.0.0' # use whataremyips
+ self.reconstructor.ring_ip = '0.0.0.0' # use whataremyips
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
@@ -3372,7 +3402,7 @@ class TestObjectReconstructor(BaseTestObjectReconstructor):
'replication_ip': self.ip,
'replication_port': self.port,
} for i, dev in enumerate(local_devs)]
- self.reconstructor.bind_ip = '0.0.0.0' # use whataremyips
+ self.reconstructor.ring_ip = '0.0.0.0' # use whataremyips
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
mock.patch.object(self.policy.object_ring, '_devs',
diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py
index d25c25490..53eac6db2 100644
--- a/test/unit/obj/test_replicator.py
+++ b/test/unit/obj/test_replicator.py
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
+import errno
+import io
import json
import unittest
import os
@@ -260,6 +262,36 @@ class TestObjectReplicator(unittest.TestCase):
rmtree(self.testdir, ignore_errors=1)
rmtree(self.recon_cache, ignore_errors=1)
+ def test_ring_ip_and_bind_ip(self):
+ # make clean base_conf
+ base_conf = dict(self.conf)
+ for key in ('bind_ip', 'ring_ip'):
+ base_conf.pop(key, None)
+
+ # default ring_ip is always 0.0.0.0
+ self.conf = base_conf
+ self._create_replicator()
+ self.assertEqual('0.0.0.0', self.replicator.ring_ip)
+
+ # bind_ip works fine for legacy configs
+ self.conf = dict(base_conf)
+ self.conf['bind_ip'] = '192.168.1.42'
+ self._create_replicator()
+ self.assertEqual('192.168.1.42', self.replicator.ring_ip)
+
+ # ring_ip works fine by-itself
+ self.conf = dict(base_conf)
+ self.conf['ring_ip'] = '192.168.1.43'
+ self._create_replicator()
+ self.assertEqual('192.168.1.43', self.replicator.ring_ip)
+
+ # if you have both ring_ip wins
+ self.conf = dict(base_conf)
+ self.conf['bind_ip'] = '192.168.1.44'
+ self.conf['ring_ip'] = '192.168.1.45'
+ self._create_replicator()
+ self.assertEqual('192.168.1.45', self.replicator.ring_ip)
+
def test_handoff_replication_setting_warnings(self):
conf_tests = [
# (config, expected_warning)
@@ -1431,6 +1463,45 @@ class TestObjectReplicator(unittest.TestCase):
override_partitions=[1])
self.assertFalse(os.access(part_path, os.F_OK))
+ def _make_OSError(self, err):
+ return OSError(err, os.strerror(err))
+
+ def test_delete_partition_override_params_os_not_empty_error(self):
+ part_path = os.path.join(self.objects, '1')
+ with mock.patch('swift.obj.replicator.shutil.rmtree') as mockrmtree:
+ mockrmtree.side_effect = self._make_OSError(errno.ENOTEMPTY)
+ self.replicator.replicate(override_devices=['sda'],
+ override_partitions=[1],
+ override_policies=[0])
+ error_lines = self.replicator.logger.get_lines_for_level('error')
+ self.assertFalse(error_lines)
+ self.assertTrue(os.path.exists(part_path))
+ self.assertEqual([mock.call(part_path)], mockrmtree.call_args_list)
+
+ def test_delete_partition_ignores_os_no_entity_error(self):
+ part_path = os.path.join(self.objects, '1')
+ with mock.patch('swift.obj.replicator.shutil.rmtree') as mockrmtree:
+ mockrmtree.side_effect = self._make_OSError(errno.ENOENT)
+ self.replicator.replicate(override_devices=['sda'],
+ override_partitions=[1],
+ override_policies=[0])
+ error_lines = self.replicator.logger.get_lines_for_level('error')
+ self.assertFalse(error_lines)
+ self.assertTrue(os.path.exists(part_path))
+ self.assertEqual([mock.call(part_path)], mockrmtree.call_args_list)
+
+ def test_delete_partition_ignores_os_no_data_error(self):
+ part_path = os.path.join(self.objects, '1')
+ with mock.patch('swift.obj.replicator.shutil.rmtree') as mockrmtree:
+ mockrmtree.side_effect = self._make_OSError(errno.ENODATA)
+ self.replicator.replicate(override_devices=['sda'],
+ override_partitions=[1],
+ override_policies=[0])
+ error_lines = self.replicator.logger.get_lines_for_level('error')
+ self.assertFalse(error_lines)
+ self.assertTrue(os.path.exists(part_path))
+ self.assertEqual([mock.call(part_path)], mockrmtree.call_args_list)
+
def test_delete_policy_override_params(self):
df0 = self.df_mgr.get_diskfile('sda', '99', 'a', 'c', 'o',
policy=POLICIES.legacy)
@@ -2096,6 +2167,157 @@ class TestObjectReplicator(unittest.TestCase):
self.assertEqual(expected_reqs, [
(r['method'], r['ip'], r['path']) for r in request_log.requests])
+ def test_rsync_failure_logging(self):
+ with mock.patch('swift.obj.replicator.subprocess.Popen') as mock_popen:
+ mock_popen.return_value.stdout = io.BytesIO(b'\n'.join([
+ b'',
+ b'cd+++++++++ suf',
+ b'cd+++++++++ suf/hash1',
+ b'<f+++++++++ suf/hash1/1637956993.28907.data',
+ b'',
+ b'cd+++++++++ suf/hash2',
+ b'<f+++++++++ suf/hash2/1615174984.55017.data',
+ b'',
+ b'cd+++++++++ suf/hash3',
+ b'<f+++++++++ suf/hash3/1616276756.37760.data',
+ b'<f+++++++++ suf/hash3/1637954870.98055.meta',
+ b'',
+ b'Oh no, some error!',
+ ]))
+ mock_popen.return_value.wait.return_value = 5
+ self.assertEqual(5, self.replicator._rsync([
+ 'rsync', '--recursive', '--whole-file', '--human-readable',
+ '--xattrs', '--itemize-changes', '--ignore-existing',
+ '--timeout=30', '--contimeout=30', '--bwlimit=100M',
+ '--exclude=rsync-tempfile-pattern',
+ '/srv/node/d1/objects/part/suf',
+ '192.168.50.30::object/d8/objects/241']))
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(error_lines[:5], [
+ '<f+++++++++ suf/hash1/1637956993.28907.data',
+ '<f+++++++++ suf/hash2/1615174984.55017.data',
+ '<f+++++++++ suf/hash3/1616276756.37760.data',
+ '<f+++++++++ suf/hash3/1637954870.98055.meta',
+ 'Oh no, some error!',
+ ])
+ expected_start = "Bad rsync return code: 5 <- ['rsync', '--recursive'"
+ self.assertEqual(error_lines[5][:len(expected_start)], expected_start,
+ 'Expected %r to start with %r' % (error_lines[5],
+ expected_start))
+ self.assertFalse(error_lines[6:])
+ self.assertFalse(self.logger.get_lines_for_level('info'))
+ self.assertFalse(self.logger.get_lines_for_level('debug'))
+
+ def test_rsync_failure_logging_no_transfer(self):
+ with mock.patch('swift.obj.replicator.subprocess.Popen') as mock_popen:
+ mock_popen.return_value.stdout = io.BytesIO(b'\n'.join([
+ b'',
+ b'cd+++++++++ suf',
+ b'cd+++++++++ suf/hash1',
+ b'<f+++++++++ suf/hash1/1637956993.28907.data',
+ b'',
+ b'cd+++++++++ suf/hash2',
+ b'<f+++++++++ suf/hash2/1615174984.55017.data',
+ b'',
+ b'cd+++++++++ suf/hash3',
+ b'<f+++++++++ suf/hash3/1616276756.37760.data',
+ b'<f+++++++++ suf/hash3/1637954870.98055.meta',
+ b'',
+ b'Oh no, some error!',
+ ]))
+ mock_popen.return_value.wait.return_value = 5
+ self.replicator.log_rsync_transfers = False
+ self.assertEqual(5, self.replicator._rsync([
+ 'rsync', '--recursive', '--whole-file', '--human-readable',
+ '--xattrs', '--itemize-changes', '--ignore-existing',
+ '--timeout=30', '--contimeout=30', '--bwlimit=100M',
+ '--exclude=rsync-tempfile-pattern',
+ '/srv/node/d1/objects/part/suf',
+ '192.168.50.30::object/d8/objects/241']))
+ error_lines = self.logger.get_lines_for_level('error')
+ self.assertEqual(error_lines[0], 'Oh no, some error!')
+ expected_start = "Bad rsync return code: 5 <- ['rsync', '--recursive'"
+ self.assertEqual(error_lines[1][:len(expected_start)], expected_start,
+ 'Expected %r to start with %r' % (error_lines[1],
+ expected_start))
+ self.assertFalse(error_lines[2:])
+ self.assertFalse(self.logger.get_lines_for_level('info'))
+ self.assertFalse(self.logger.get_lines_for_level('debug'))
+
+ def test_rsync_success_logging(self):
+ with mock.patch('swift.obj.replicator.subprocess.Popen') as mock_popen:
+ mock_popen.return_value.stdout = io.BytesIO(b'\n'.join([
+ b'',
+ b'cd+++++++++ suf',
+ b'cd+++++++++ suf/hash1',
+ b'<f+++++++++ suf/hash1/1637956993.28907.data',
+ b'',
+ b'cd+++++++++ suf/hash2',
+ b'<f+++++++++ suf/hash2/1615174984.55017.data',
+ b'',
+ b'cd+++++++++ suf/hash3',
+ b'<f+++++++++ suf/hash3/1616276756.37760.data',
+ b'<f+++++++++ suf/hash3/1637954870.98055.meta',
+ b'',
+ b'Yay! It worked!',
+ ]))
+ mock_popen.return_value.wait.return_value = 0
+ self.assertEqual(0, self.replicator._rsync([
+ 'rsync', '--recursive', '--whole-file', '--human-readable',
+ '--xattrs', '--itemize-changes', '--ignore-existing',
+ '--timeout=30', '--contimeout=30', '--bwlimit=100M',
+ '--exclude=rsync-tempfile-pattern',
+ '/srv/node/d1/objects/part/suf',
+ '192.168.50.30::object/d8/objects/241']))
+ self.assertFalse(self.logger.get_lines_for_level('error'))
+ debug_lines = self.logger.get_lines_for_level('debug')
+ self.assertEqual(debug_lines, [
+ '<f+++++++++ suf/hash1/1637956993.28907.data',
+ '<f+++++++++ suf/hash2/1615174984.55017.data',
+ '<f+++++++++ suf/hash3/1616276756.37760.data',
+ '<f+++++++++ suf/hash3/1637954870.98055.meta',
+ 'Yay! It worked!',
+ ])
+ info_lines = self.logger.get_lines_for_level('info')
+ self.assertEqual(info_lines, [
+ 'Successful rsync of /srv/node/d1/objects/part/... to '
+ '192.168.50.30::object/d8/objects/241 (0.000)'])
+
+ def test_rsync_success_logging_no_transfer(self):
+ with mock.patch('swift.obj.replicator.subprocess.Popen') as mock_popen:
+ mock_popen.return_value.stdout = io.BytesIO(b'\n'.join([
+ b'',
+ b'cd+++++++++ sf1',
+ b'cd+++++++++ sf1/hash1',
+ b'<f+++++++++ sf1/hash1/1637956993.28907.data',
+ b'',
+ b'cd+++++++++ sf1/hash2',
+ b'<f+++++++++ sf1/hash2/1615174984.55017.data',
+ b'',
+ b'cd+++++++++ sf2/hash3',
+ b'<f+++++++++ sf2/hash3/1616276756.37760.data',
+ b'<f+++++++++ sf2/hash3/1637954870.98055.meta',
+ b'',
+ b'Yay! It worked!',
+ ]))
+ mock_popen.return_value.wait.return_value = 0
+ self.replicator.log_rsync_transfers = False
+ self.assertEqual(0, self.replicator._rsync([
+ 'rsync', '--recursive', '--whole-file', '--human-readable',
+ '--xattrs', '--itemize-changes', '--ignore-existing',
+ '--timeout=30', '--contimeout=30', '--bwlimit=100M',
+ '--exclude=rsync-tempfile-pattern',
+ '/srv/node/d1/objects/part/sf1',
+ '/srv/node/d1/objects/part/sf2',
+ '192.168.50.30::object/d8/objects/241']))
+ self.assertFalse(self.logger.get_lines_for_level('error'))
+ debug_lines = self.logger.get_lines_for_level('debug')
+ self.assertEqual(debug_lines, ['Yay! It worked!'])
+ info_lines = self.logger.get_lines_for_level('info')
+ self.assertEqual(info_lines, [
+ 'Successful rsync of /srv/node/d1/objects/part/... to '
+ '192.168.50.30::object/d8/objects/241 (0.000)'])
+
def test_do_listdir(self):
# Test if do_listdir is enabled for every 10th partition to rehash
# First number is the number of partitions in the job, list entries
@@ -2135,6 +2357,7 @@ class TestObjectReplicator(unittest.TestCase):
def test_replicate_skipped_partpower_increase(self):
_create_test_rings(self.testdir, next_part_power=4)
+ self.replicator.get_local_devices() # refresh rings
self.replicator.replicate()
self.assertEqual(0, self.replicator.job_count)
self.assertEqual(0, self.replicator.total_stats.attempted)
diff --git a/test/unit/proxy/controllers/test_info.py b/test/unit/proxy/controllers/test_info.py
index 560d4d660..5ddd76f3d 100644
--- a/test/unit/proxy/controllers/test_info.py
+++ b/test/unit/proxy/controllers/test_info.py
@@ -20,7 +20,7 @@ from mock import Mock
from swift.proxy.controllers import InfoController
from swift.proxy.server import Application as ProxyApp
-from swift.common import utils, registry
+from swift.common import registry, digest
from swift.common.swob import Request, HTTPException
from test.debug_logger import debug_logger
@@ -133,7 +133,7 @@ class TestInfoController(unittest.TestCase):
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
- sig = utils.get_hmac('GET', '/info', expires, '')
+ sig = digest.get_hmac('GET', '/info', expires, '')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
@@ -149,7 +149,7 @@ class TestInfoController(unittest.TestCase):
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
- sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
+ sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
@@ -170,7 +170,7 @@ class TestInfoController(unittest.TestCase):
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
- sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
+ sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
@@ -180,7 +180,7 @@ class TestInfoController(unittest.TestCase):
self.assertEqual('200 OK', str(resp))
expires = int(time.time() + 86400)
- sig = utils.get_hmac('HEAD', '/info', expires, 'secret-admin-key')
+ sig = digest.get_hmac('HEAD', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
@@ -196,7 +196,7 @@ class TestInfoController(unittest.TestCase):
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
- sig = utils.get_hmac('HEAD', '/info', expires, 'secret-admin-key')
+ sig = digest.get_hmac('HEAD', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
@@ -212,7 +212,7 @@ class TestInfoController(unittest.TestCase):
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = 1
- sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
+ sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
@@ -222,7 +222,7 @@ class TestInfoController(unittest.TestCase):
self.assertEqual('401 Unauthorized', str(resp))
expires = 'abc'
- sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
+ sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
@@ -238,7 +238,7 @@ class TestInfoController(unittest.TestCase):
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
- sig = utils.get_hmac('GET', '/foo', expires, 'secret-admin-key')
+ sig = digest.get_hmac('GET', '/foo', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
@@ -254,7 +254,7 @@ class TestInfoController(unittest.TestCase):
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
- sig = utils.get_hmac('GET', '/foo', expires, 'invalid-admin-key')
+ sig = digest.get_hmac('GET', '/foo', expires, 'invalid-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
@@ -272,7 +272,7 @@ class TestInfoController(unittest.TestCase):
registry._swift_admin_info = {'qux': {'quux': 'corge'}}
expires = int(time.time() + 86400)
- sig = utils.get_hmac('GET', '/info', expires, 'secret-admin-key')
+ sig = digest.get_hmac('GET', '/info', expires, 'secret-admin-key')
path = '/info?swiftinfo_sig={sig}&swiftinfo_expires={expires}'.format(
sig=sig, expires=expires)
req = Request.blank(
diff --git a/tools/playbooks/ceph-s3tests/run.yaml b/tools/playbooks/ceph-s3tests/run.yaml
index 618c6821a..45bb1745e 100644
--- a/tools/playbooks/ceph-s3tests/run.yaml
+++ b/tools/playbooks/ceph-s3tests/run.yaml
@@ -14,15 +14,6 @@
# limitations under the License.
- hosts: all
tasks:
- - name: Shutdown main swift services
- shell: "swift-init stop main"
- become: true
- ignore_errors: true
-
- - name: Start main swift services
- shell: "swift-init start main"
- become: true
-
- name: Clone s3compat repository
git:
repo: "https://github.com/tipabu/s3compat.git"
diff --git a/tools/playbooks/common/restart_swift.yaml b/tools/playbooks/common/restart_swift.yaml
new file mode 100644
index 000000000..4223e3774
--- /dev/null
+++ b/tools/playbooks/common/restart_swift.yaml
@@ -0,0 +1,24 @@
+# Copyright (c) 2018 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+- hosts: all
+ tasks:
+ - name: Shutdown main swift services
+ shell: "swift-init stop main"
+ become: true
+ ignore_errors: true
+
+ - name: Start main swift services
+ shell: "swift-init start main"
+ become: true
diff --git a/tools/playbooks/multinode_setup/run.yaml b/tools/playbooks/multinode_setup/run.yaml
index a186124c2..072b157cc 100644
--- a/tools/playbooks/multinode_setup/run.yaml
+++ b/tools/playbooks/multinode_setup/run.yaml
@@ -39,4 +39,5 @@
vars:
tox_envlist: func-py3
tox_environment:
+ TOX_CONSTRAINTS_FILE: https://releases.openstack.org/constraints/upper/yoga
SWIFT_TEST_CONFIG_FILE: /home/{{ ansible_ssh_user }}/test.conf
diff --git a/tools/playbooks/s3api-tests/run.yaml b/tools/playbooks/s3api-tests/run.yaml
new file mode 100644
index 000000000..d73c2d840
--- /dev/null
+++ b/tools/playbooks/s3api-tests/run.yaml
@@ -0,0 +1,23 @@
+# Copyright (c) 2022 NVIDIA
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+- hosts: all
+ roles:
+ - ensure-tox
+ tasks:
+ - name: Run s3api tests
+ include_role:
+ name: tox
+ vars:
+ tox_envlist: s3api
diff --git a/tox.ini b/tox.ini
index c6acc4f53..3b7e5f30b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -23,12 +23,28 @@ allowlist_externals =
find
passenv = SWIFT_* *_proxy
+[testenv:s3api]
+usedevelop = False
+deps = {[testenv:py36]deps}
+commands =
+ nosetests {posargs:test/s3api}
+passenv = SWIFT_* *_proxy
+
[testenv:py27]
deps =
-c{toxinidir}/py2-constraints.txt
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
+[testenv:py36]
+deps =
+ -c{toxinidir}/py36-constraints.txt
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/test-requirements.txt
+
+[testenv:py37]
+deps = {[testenv:py36]deps}
+
[testenv:cover]
setenv = VIRTUAL_ENV={envdir}
NOSE_WITH_COVERAGE=1
@@ -46,16 +62,19 @@ commands =
[testenv:func-py3]
basepython = python3
+deps = {[testenv:py36]deps}
commands = ./.functests {posargs}
[testenv:func-ec-py3]
basepython = python3
+deps = {[testenv:py36]deps}
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=ec
[testenv:func-encryption-py3]
basepython = python3
+deps = {[testenv:py36]deps}
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption