summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xbin/swift-dispersion-populate3
-rwxr-xr-xbin/swift-dispersion-report3
-rw-r--r--doc/source/admin_guide.rst4
-rw-r--r--doc/source/associated_projects.rst4
-rw-r--r--doc/source/index.rst1
-rw-r--r--doc/source/overview_encryption.rst373
-rw-r--r--doc/source/overview_erasure_code.rst8
-rw-r--r--etc/dispersion.conf-sample4
-rw-r--r--etc/proxy-server.conf-sample2
-rw-r--r--swift/common/constraints.py1
-rw-r--r--swift/common/middleware/decrypter.py3
-rw-r--r--swift/common/middleware/encrypter.py16
-rw-r--r--swift/common/middleware/keymaster.py147
-rw-r--r--swift/common/middleware/tempurl.py7
-rw-r--r--swift/common/wsgi.py2
-rw-r--r--swift/locale/de/LC_MESSAGES/swift.po125
-rw-r--r--swift/locale/es/LC_MESSAGES/swift.po57
-rw-r--r--swift/locale/fr/LC_MESSAGES/swift.po57
-rw-r--r--swift/locale/it/LC_MESSAGES/swift.po58
-rw-r--r--swift/locale/ja/LC_MESSAGES/swift.po57
-rw-r--r--swift/locale/ko_KR/LC_MESSAGES/swift.po56
-rw-r--r--swift/locale/pt_BR/LC_MESSAGES/swift.po57
-rw-r--r--swift/locale/ru/LC_MESSAGES/swift.po56
-rw-r--r--swift/locale/swift.pot1370
-rw-r--r--swift/locale/tr_TR/LC_MESSAGES/swift.po56
-rw-r--r--swift/locale/zh_CN/LC_MESSAGES/swift.po52
-rw-r--r--swift/locale/zh_TW/LC_MESSAGES/swift.po52
-rw-r--r--swift/obj/server.py20
-rw-r--r--swift/proxy/controllers/obj.py2
-rw-r--r--test/probe/common.py30
-rwxr-xr-xtest/probe/test_account_failures.py8
-rwxr-xr-xtest/probe/test_container_failures.py21
-rw-r--r--test/probe/test_container_merge_policy_index.py86
-rwxr-xr-xtest/probe/test_empty_device_handoff.py27
-rwxr-xr-xtest/probe/test_object_async_update.py224
-rwxr-xr-xtest/probe/test_object_failures.py19
-rwxr-xr-xtest/probe/test_object_handoff.py52
-rw-r--r--test/unit/common/middleware/crypto_helpers.py3
-rw-r--r--test/unit/common/middleware/test_decrypter.py2
-rw-r--r--test/unit/common/middleware/test_encrypter.py6
-rw-r--r--test/unit/common/middleware/test_encrypter_decrypter.py9
-rw-r--r--test/unit/common/middleware/test_keymaster.py114
-rw-r--r--test/unit/common/middleware/test_tempurl.py39
-rwxr-xr-xtest/unit/obj/test_server.py22
44 files changed, 956 insertions, 2359 deletions
diff --git a/bin/swift-dispersion-populate b/bin/swift-dispersion-populate
index 465966160..afdc7cd32 100755
--- a/bin/swift-dispersion-populate
+++ b/bin/swift-dispersion-populate
@@ -134,6 +134,7 @@ Usage: %%prog [options] [conf_file]
retries = int(conf.get('retries', 5))
concurrency = int(conf.get('concurrency', 25))
endpoint_type = str(conf.get('endpoint_type', 'publicURL'))
+ region_name = str(conf.get('region_name', ''))
user_domain_name = str(conf.get('user_domain_name', ''))
project_domain_name = str(conf.get('project_domain_name', ''))
project_name = str(conf.get('project_name', ''))
@@ -157,6 +158,8 @@ Usage: %%prog [options] [conf_file]
os_options['project_domain_name'] = project_domain_name
if project_name:
os_options['project_name'] = project_name
+ if region_name:
+ os_options['region_name'] = region_name
url, token = get_auth(conf['auth_url'], conf['auth_user'],
conf['auth_key'],
diff --git a/bin/swift-dispersion-report b/bin/swift-dispersion-report
index 48dff80a8..813156cf5 100755
--- a/bin/swift-dispersion-report
+++ b/bin/swift-dispersion-report
@@ -353,6 +353,7 @@ Usage: %%prog [options] [conf_file]
retries = int(conf.get('retries', 5))
concurrency = int(conf.get('concurrency', 25))
endpoint_type = str(conf.get('endpoint_type', 'publicURL'))
+ region_name = str(conf.get('region_name', ''))
if options.dump_json or config_true_value(conf.get('dump_json', 'no')):
json_output = True
container_report = config_true_value(conf.get('container_report', 'yes')) \
@@ -378,6 +379,8 @@ Usage: %%prog [options] [conf_file]
os_options['project_domain_name'] = project_domain_name
if project_name:
os_options['project_name'] = project_name
+ if region_name:
+ os_options['region_name'] = region_name
url, token = get_auth(conf['auth_url'], conf['auth_user'],
conf['auth_key'],
diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst
index 392ccdf9d..c0190880e 100644
--- a/doc/source/admin_guide.rst
+++ b/doc/source/admin_guide.rst
@@ -425,7 +425,7 @@ cluster: region 1 in San Francisco (SF), and region 2 in New York
read_affinity
~~~~~~~~~~~~~
-This setting makes the proxy server prefer local backend servers for
+This setting, combined with sorting_method setting, makes the proxy server prefer local backend servers for
GET and HEAD requests over non-local ones. For example, it is
preferable for an SF proxy server to service object GET requests
by talking to SF object servers, as the client will receive lower
@@ -440,6 +440,7 @@ This is where the read_affinity setting comes in.
Example::
[app:proxy-server]
+ sorting_method = affinity
read_affinity = r1=100
This will make the proxy attempt to service GET and HEAD requests from
@@ -451,6 +452,7 @@ fall back to backend servers in other regions.
Example::
[app:proxy-server]
+ sorting_method = affinity
read_affinity = r1z1=100, r1=200
This will make the proxy attempt to service GET and HEAD requests from
diff --git a/doc/source/associated_projects.rst b/doc/source/associated_projects.rst
index 46e023856..10f061fab 100644
--- a/doc/source/associated_projects.rst
+++ b/doc/source/associated_projects.rst
@@ -109,8 +109,8 @@ Other
* `Better Staticweb <https://github.com/CloudVPS/better-staticweb>`_ - Makes swift containers accessible by default.
* `Django Swiftbrowser <https://github.com/cschwede/django-swiftbrowser>`_ - Simple Django web app to access OpenStack Swift.
* `Swift-account-stats <https://github.com/enovance/swift-account-stats>`_ - Swift-account-stats is a tool to report statistics on Swift usage at tenant and global levels.
-* `PyECLib <https://bitbucket.org/kmgreen2/pyeclib>`_ - High Level Erasure Code library used by Swift
-* `liberasurecode <http://www.bytebucket.org/tsg-/liberasurecode>`_ - Low Level Erasure Code library used by PyECLib
+* `PyECLib <https://github.com/openstack/pyeclib>`_ - High Level Erasure Code library used by Swift
+* `liberasurecode <https://github.com/openstack/liberasurecode>`_ - Low Level Erasure Code library used by PyECLib
* `Swift Browser <https://github.com/zerovm/swift-browser>`_ - JavaScript interface for Swift
* `swift-ui <https://github.com/fanatic/swift-ui>`_ - OpenStack Swift web browser
* `Swift Durability Calculator <https://github.com/enovance/swift-durability-calculator>`_ - Data Durability Calculation Tool for Swift
diff --git a/doc/source/index.rst b/doc/source/index.rst
index c648d0af4..cc86a02c3 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -59,6 +59,7 @@ Overview and Concepts
overview_erasure_code
overview_backing_store
ring_background
+ overview_encryption
associated_projects
Developer Documentation
diff --git a/doc/source/overview_encryption.rst b/doc/source/overview_encryption.rst
new file mode 100644
index 000000000..127b2b699
--- /dev/null
+++ b/doc/source/overview_encryption.rst
@@ -0,0 +1,373 @@
+=================
+Object Encryption
+=================
+
+Swift supports the optional encryption of object data at rest on storage nodes.
+The encryption of object data is intended to mitigate the risk of users' data
+being read if an unauthorised party were to gain physical access to a disk.
+
+Encryption of data at rest is implemented by a set of three middleware modules
+that may be included in the proxy server WSGI pipeline. The feature is internal
+to a Swift cluster and not exposed through the API. Clients are unaware that
+data is encrypted by this feature internally to the Swift service; internally
+encrypted data should never be returned to clients via the Swift API.
+
+The following data are encrypted while at rest in Swift:
+
+* Object content i.e. the content of an object PUT request's body
+* The entity tag (ETag) of the object
+* All custom user metadata values i.e. metadata sent using X-Object-Meta-
+ prefixed headers with PUT or POST requests
+
+Any data not included in the list above are not encrypted, including:
+
+* Account, container and object names
+* Account and container custom user metadata
+* Custom user metadata names
+* Object Content-Type values
+* Object size
+* System metadata
+
+------------------------
+Deployment and operation
+------------------------
+
+Encryption at rest is deployed by adding three middleware filters to the proxy
+server WSGI pipeline and including their respective filter configuration
+sections in the `proxy-server.conf` file::
+
+ ... decrypter keymaster encrypter proxy-logging proxy-server
+
+ [filter:decrypter]
+ use = egg:swift#decrypter
+
+ [filter:keymaster]
+ use = egg:swift#keymaster
+ encryption_root_secret = your_secret
+
+ [filter:encrypter]
+ use = egg:swift#encrypter
+ # disable_encryption = False
+
+See the example pipeline in `proxy-server.conf-sample` for further details on
+the positioning of those middlewares relative to other middleware.
+
+The keymaster config option ``encryption_root_secret`` MUST be set to a value
+of at least 44 valid base-64 characters before the middleware is used and
+should be consistent across all proxy servers.
+
+.. note::
+
+ The ``encryption_root_secret`` option holds the master secret key used for
+ encryption. The security of all encrypted data critically depends on this
+ key, therefore it should be set to a high-entropy value. For example, a
+ suitable ``encryption_root_secret`` may be obtained by base-64 encoding a
+ 32 byte (or longer) value generated by a cryptographically secure random
+ number generator.
+
+ The ``encryption_root_secret`` value is necessary to recover any encrypted
+ data from the storage system, and therefore, it must be guarded against
+ accidental loss. Its value (and consequently, the proxy-server.conf file)
+ should not be stored on any disk that is in any account, container or
+ object ring.
+
+One method for generating a suitable value for ``encryption_root_secret`` is to
+use the ``openssl`` command line tool::
+
+ openssl rand -base64 32
+
+Once deployed, the encrypter will by default encrypt object data and metadata
+when PUT and POST requests are made to the proxy server and the decrypter will
+decrypt object data and metadata when handling GET and HEAD requests.
+
+Objects that existed in the cluster prior to the encryption middlewares being
+deployed are still readable with GET and HEAD requests. The content of those
+objects will not be encrypted unless they are written again by a PUT or COPY
+request. Any user metadata of those objects will not be encrypted unless it is
+written again by a PUT, POST or COPY request.
+
+Once deployed, the encryption middlewares should not be removed from the
+pipeline. To do so might cause encrypted object data and/or metadata to be
+returned in response to GET or HEAD requests.
+
+Encryption of inbound object data may be disabled by setting the encrypter
+``disable_encryption`` option to ``True``, in which case existing encrypted
+objects will remain encrypted but new data written with PUT, POST or COPY
+requests will not be encrypted. The encryption middlewares should remain in the
+pipeline even when encryption of new objects is not required. The encrypter
+middleware is needed to handle conditional GET requests that may be for
+previously encrypted objects. The decrypter middleware is needed to handle all
+GET requests that are for encrypted objects. The keymaster is needed to provide
+keys for those requests.
+
+.. _container_sync_client_config:
+
+Container sync
+--------------
+
+If container sync is being used then the encryption middlewares must be added
+to the container sync internal client pipeline. The following configuration
+steps are required:
+
+#. Create a custom internal client configuration file for container sync (if
+ one is not already in use) based on the sample file
+ `internal-client.conf-sample`. For example, copy
+ `internal-client.conf-sample` to `/etc/swift/container-sync-client.conf`.
+#. Modify this file include to include the encryption middlewares in the
+ pipeline in the same way as described above for the proxy server.
+#. Modify the container-sync section of all container server config files to
+ point to this internal client config file using the
+ ``internal_client_conf_path`` option. For example::
+
+ internal_client_conf_path = /etc/swift/container-sync-client.conf
+
+--------------------------
+Performance Considerations
+--------------------------
+
+TODO
+
+--------------
+Implementation
+--------------
+
+Encryption scheme
+-----------------
+
+Plaintext data is encrypted to a ciphertext using the AES cipher with 256-bit
+keys. The cipher is used in counter mode so that any byte or range of bytes in
+the ciphertext may be decrypted independently of any other bytes in the
+ciphertext. This enables very simple handling of ranged GETs.
+
+In general an item of plaintext data ``p`` is transformed to a ciphertext
+``c``::
+
+ ciphertext = E(plaintext, k, iv)
+
+where ``E`` is the encryption function, ``k`` is an encryption key and ``iv``
+is a unique initialization vector (IV) chosen for each encryption operation.
+The IV is stored as metadata of the encrypted item so that it is available for
+decryption::
+
+ plaintext = D(ciphertext, k, iv)
+
+where ``D`` is the decryption function.
+
+In general any encrypted item has accompanying crypto-metadata that describes
+the IV and the cipher algorithm used for the encryption::
+
+ crypto_metadata = {"iv": <16 byte value>,
+ "cipher": "AES_CTR_256"}
+
+Key management
+--------------
+
+A keymaster middleware is responsible for providing the keys required for each
+encryption and decryption operation. The keymaster middleware should provide
+different keys for each object and container. These are made available to the
+encrypter and decrypter via a callback function that the keymaster installs in
+the WSGI request environ.
+
+The current keymaster implementation derives container and object keys from the
+``encryption_root_secret`` in a deterministic way by constructing an SHA256
+HMAC using the ``encryption_root_secret`` as a key and the container or object
+path as a message, for example::
+
+ object_key = HMAC(encryption_root_secret, "/a/c/o")
+
+Other strategies for providing object and container keys may be employed by
+future implementations of alternative keymaster middleware.
+
+The encrypter uses the object key to `wrap` other randomly generated keys that
+are used to encrypt object data. A random key is `wrapped` by encrypting it
+using the object key provided by the keymaster. This makes it safe to then
+store the wrapped key alongside object data and metadata.
+
+This process of `key wrapping` is performed to enable more efficient re-keying
+events when the object key may need to be replaced and consequently any data
+encrypted using that key must be re-encrypted. Key wrapping minimizes the
+amount of data encrypted using those keys to just other randomly chosen keys
+which can be re-wrapped efficiently without needing to re-encrypt the larger
+amounts of data that were encrypted using the random keys.
+
+For example, as described below, the object body is encrypted using a random
+key which is then wrapped using the object key. If re-keying requires the
+object key to be replaced then only the random key needs to be re-encrypted and
+not the object body, which is potentially a large amount of data.
+
+.. note::
+
+ Re-keying is not currently implemented. Key wrapping is implemented
+ in anticipation of future re-keying operations.
+
+
+Encrypter operation
+-------------------
+
+Custom user metadata
+++++++++++++++++++++
+
+The encrypter encrypts each item of custom user metadata using the object key
+provided by the keymaster and an IV that is randomly chosen for that metadata
+item. For example::
+
+ X-Object-Meta-Private1: value1
+ X-Object-Meta-Private2: value2
+
+are transformed to::
+
+ X-Object-Meta-Private1: E(value1, object_key, header_iv_1)
+ X-Object-Meta-Private2: E(value2, object_key, header_iv_2)
+
+For each custom user metadata header the encrypter stores the associated
+crypto-metadata using an ``X-Object-Transient-Sysmeta-`` header. For the same
+example::
+
+ X-Object-Transient-Sysmeta-Crypto-Meta-Private1:{"iv": header_iv_1,
+ "cipher": "AES_CTR_256"}
+ X-Object-Transient-Sysmeta-Crypto-Meta-Private2:{"iv": header_iv_2,
+ "cipher": "AES_CTR_256"}
+
+Object body
++++++++++++
+
+Encryption of an object body is performed using a randomly chosen body key
+and a randomly chosen IV::
+
+ body_ciphertext = E(body_plaintext, body_key, body_iv)
+
+The body_key is wrapped using the object key provided by the keymaster and a
+randomly chosen IV::
+
+ wrapped_body_key = E(body_key, object_key, body_key_iv)
+
+The encrypter stores the associated crypto metadata in a system metadata
+header::
+
+ X-Object-Sysmeta-Crypto-Meta:
+ {"iv": body_iv,
+ "cipher": "AES_CTR_256",
+ "body_key": {"key": wrapped_body_key,
+ "iv": body_key_iv}}
+
+Note that in this case there is an extra item of crypto metadata which stores
+the wrapped body key and its IV.
+
+Entity tag
+++++++++++
+
+While encrypting the object body the encrypter also calculates the ETag (md5
+digest) of the plaintext body. This value is encrypted using a keymaster
+provided container key, and an IV that is derived from the object's path, and
+saved as an item of system metadata::
+
+ X-Object-Sysmeta-Crypto-Etag: E(md5(plaintext), container_key, F(path))
+
+The encrypter stores the associated crypto metadata in a system metadata
+header::
+
+ X-Object-Sysmeta-Crypto-Meta-Etag: {"iv": F(path),
+ "cipher": "AES_CTR_256"}
+
+The reason for using the container key for this encryption is that the
+encrypted ETag must also be included in the object update to the container
+server, and will be included in container listings. The decrypter must be able
+to decrypt the ETags in container listings using only the container key (since
+object keys may not be available when handling a container request) so the
+ETags must therefore be encrypted using the container key.
+
+The encrypter forces the encrypted plaintext ETag to be sent with container
+updates by adding an update override header to the PUT request, which also has
+the associated crypto metadata appended to the encrypted ETag value::
+
+ X-Object-Sysmeta-Container-Update-Override-Etag:
+ E(md5(plaintext), container_key, F(path));
+ meta={"iv": F(path), "cipher": "AES_CTR_256"}
+
+The reason an IV derived from the object's path is used when encrypting the
+ETag is to allow the encrypter to perform the same transformation on ETag
+values specified in subsequent conditional GET or HEAD requests, so that they
+can be compared against the encrypted object ETag when the object server
+evaluates the conditional request. So, when handling a conditional GET or HEAD
+request, the encrypter updates ``If[-None]-Match`` headers::
+
+ If[-None]-Match: E(ETag, container_key, F(path))
+
+Since the plaintext ETag value is only known once the encrypter has completed
+processing the entire object body, the ``X-Object-Sysmeta-Crypto-Etag``,
+``X-Object-Sysmeta-Crypto-Meta-Etag`` and
+``X-Object-Sysmeta-Container-Update-Override-Etag`` headers are sent after the
+encrypted object body using the proxy server's support for request footers.
+
+
+Decrypter operation
+-------------------
+
+For each GET or HEAD request to an object, the decrypter inspects the response
+for encrypted items (revealed by crypto metadata headers), and if any are
+discovered then it will:
+
+#. Fetch container and object keys from the keymaster via its callback
+#. Decrypt the ``X-Object-Sysmeta-Crypto-Etag`` value using the container
+ key and the IV found in the ``X-Object-Sysmeta-Crypto-Meta-Etag`` header
+#. Decrypt metadata headers using the object key
+#. Decrypt the wrapped body key found in ``X-Object-Sysmeta-Crypto-Meta``
+#. Decrypt the body using the body key
+
+For each GET request to a container that includes a format param, the
+decrypter will:
+
+#. GET the container listing
+#. Fetch container key from the keymaster via its callback
+#. Decrypt the response body ETag entries using the container key
+
+
+Impact on other Swift services
+------------------------------
+
+`Container Sync` uses an internal client to GET objects that are to be sync'd.
+This internal client must be configured to use the encryption middlewares as
+described `above`__.
+
+.. __: container_sync_client_config_
+
+Encryption has no impact on the `object-auditor` service. Since the ETag
+header saved with the object at rest is the md5 sum of the encrypted object
+body then the auditor will verify that encrypted data is valid.
+
+Encryption has no impact on the `object-expirer` service. ``X-Delete-At`` and
+``X-Delete-After`` headers are not encrypted.
+
+Encryption has no impact on the `object-replicator` and `object-reconstructor`
+services. These services are unaware of the object or EC fragment data being
+encrypted.
+
+Encryption has no impact on the `container-reconciler` service. The
+`container-reconciler` uses an internal client to move objects between
+different policy rings. The destination object has the same URL as the source
+object and the object is moved without re-encryption.
+
+
+Considerations for developers
+-----------------------------
+
+Developers should be aware that encryption middlewares rely on the path of an
+object remaining unchanged. The keymaster derives keys for containers and
+objects based on their paths. The encrypter also uses the object path to derive
+an IV for encrypting the ETag. As explained above, this choice of IV is
+made to enable conditional request ETag values to be encrypted in an
+identical fashion prior to matching with the object ETag.
+
+Developers should therefore give careful consideration to any new features that
+would relocate object data and metadata within a Swift cluster by means that do
+not cause the object data and metadata to pass through the encryption
+middlewares in the proxy pipeline and be re-encrypted.
+
+The keymaster does persist the path that was used to derive keys as an item of
+system metadata name ``X-Object-Sysmeta-Crypto-Id``. This metadata has been
+included in anticipation of future scenarios when it may be necessary to
+decrypt an object that has been relocated without re-encrypting, in which case
+the value of ``X-Object-Sysmeta-Crypto-Id`` could be used to derive the keys
+that were used for encryption. However, this alone is not sufficient to handle
+conditional requests and to decrypt container listings where objects have been
+relocated, and further work will be required to solve those issues.
diff --git a/doc/source/overview_erasure_code.rst b/doc/source/overview_erasure_code.rst
index 64ce5621f..68c3d89a5 100644
--- a/doc/source/overview_erasure_code.rst
+++ b/doc/source/overview_erasure_code.rst
@@ -96,7 +96,7 @@ advantage of many well-known C libraries such as:
PyECLib uses a C based library called liberasurecode to implement the plug in
infrastructure; liberasure code is available at:
-* liberasurecode: https://bitbucket.org/tsg-/liberasurecode
+* liberasurecode: https://github.com/openstack/liberasurecode
PyECLib itself therefore allows for not only choice but further extensibility as
well. PyECLib also comes with a handy utility to help determine the best
@@ -105,7 +105,7 @@ configurations may vary in performance per algorithm). More on this will be
covered in the configuration section. PyECLib is included as a Swift
requirement.
-For complete details see `PyECLib <https://bitbucket.org/kmgreen2/pyeclib>`_
+For complete details see `PyECLib <https://github.com/openstack/pyeclib>`_
------------------------------
Storing and Retrieving Objects
@@ -215,7 +215,7 @@ Let's take a closer look at each configuration parameter:
PyECLib back-end. This specifies the EC scheme that is to be used. For
example the option shown here selects Vandermonde Reed-Solomon encoding while
an option of ``flat_xor_hd_3`` would select Flat-XOR based HD combination
- codes. See the `PyECLib <https://bitbucket.org/kmgreen2/pyeclib>`_ page for
+ codes. See the `PyECLib <https://github.com/openstack/pyeclib>`_ page for
full details.
* ``ec_num_data_fragments``: The total number of fragments that will be
comprised of data.
@@ -270,7 +270,7 @@ Region Support
--------------
For at least the initial version of EC, it is not recommended that an EC scheme
-span beyond a single region, neither performance nor functional validation has
+span beyond a single region, neither performance nor functional validation has
be been done in such a configuration.
--------------
diff --git a/etc/dispersion.conf-sample b/etc/dispersion.conf-sample
index 865e80fec..eae777a08 100644
--- a/etc/dispersion.conf-sample
+++ b/etc/dispersion.conf-sample
@@ -24,6 +24,10 @@ auth_key = testing
# user_domain_name = user_domain
#
# endpoint_type = publicURL
+#
+# NOTE: If you have only 1 region with a swift endpoint, no need to specify it
+# region_name =
+#
# keystone_api_insecure = no
#
# swift_dir = /etc/swift
diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample
index cade60751..a871b70af 100644
--- a/etc/proxy-server.conf-sample
+++ b/etc/proxy-server.conf-sample
@@ -769,7 +769,6 @@ use = egg:swift#copy
# Note: To enable encryption, add the following 3 dependent pieces of
# crypto middleware to the proxy-server pipeline as follows:
# ... decrypter keymaster encrypter proxy-logging (end of pipeline)
-
[filter:decrypter]
use = egg:swift#decrypter
@@ -788,6 +787,5 @@ use = egg:swift#keymaster
# to the devstack proxy-config so that gate tests can pass.
# base64 encoding of "dontEverUseThisIn_PRODUCTION_xxxxxxxxxxxxxxx"
encryption_root_secret = ZG9udEV2ZXJVc2VUaGlzSW5fUFJPRFVDVElPTl94eHh4eHh4eHh4eHh4eHg=
-
[filter:encrypter]
use = egg:swift#encrypter
diff --git a/swift/common/constraints.py b/swift/common/constraints.py
index a575e5eb2..aa00f930c 100644
--- a/swift/common/constraints.py
+++ b/swift/common/constraints.py
@@ -38,7 +38,6 @@ CONTAINER_LISTING_LIMIT = 10000
ACCOUNT_LISTING_LIMIT = 10000
MAX_ACCOUNT_NAME_LENGTH = 256
MAX_CONTAINER_NAME_LENGTH = 256
-ETAG_LENGTH = 32
VALID_API_VERSIONS = ["v1", "v1.0"]
EXTRA_HEADER_COUNT = 0
diff --git a/swift/common/middleware/decrypter.py b/swift/common/middleware/decrypter.py
index d775e881a..c0cb6f099 100644
--- a/swift/common/middleware/decrypter.py
+++ b/swift/common/middleware/decrypter.py
@@ -397,9 +397,6 @@ class DecrypterContContext(BaseDecrypterContext):
def decrypt_obj_dict(self, obj_dict, key):
ciphertext = obj_dict['hash']
obj_dict['hash'] = self.decrypt_value_with_meta(ciphertext, key)
-
- # TODO - did we need to use the length to decide to decrypt?
- # if etag and (len(etag) > constraints.ETAG_LENGTH):
return obj_dict
def process_xml_resp(self, key, resp_iter):
diff --git a/swift/common/middleware/encrypter.py b/swift/common/middleware/encrypter.py
index 39dead47b..29713f498 100644
--- a/swift/common/middleware/encrypter.py
+++ b/swift/common/middleware/encrypter.py
@@ -57,8 +57,6 @@ class EncInputWrapper(object):
self.crypto = crypto
self.body_crypto_ctxt = None
self.keys = keys
- # remove any Etag from headers, it won't be valid for ciphertext and
- # we'll send the ciphertext Etag later in footer metadata
self.plaintext_md5 = None
self.ciphertext_md5 = None
self.logger = logger
@@ -72,6 +70,7 @@ class EncInputWrapper(object):
# wrap the body key with object key
self.body_crypto_meta['body_key'] = self.crypto.wrap_key(
self.keys['object'], body_key)
+ self.body_crypto_meta['key_id'] = self.keys['id']
self.body_crypto_ctxt = self.crypto.create_encryption_ctxt(
body_key, self.body_crypto_meta.get('iv'))
self.plaintext_md5 = md5()
@@ -81,6 +80,8 @@ class EncInputWrapper(object):
# the proxy controller will call back for footer metadata after
# body has been sent
inner_callback = req.environ.get('swift.callback.update_footers')
+ # remove any Etag from headers, it won't be valid for ciphertext and
+ # we'll send the ciphertext Etag later in footer metadata
client_etag = req.headers.pop('etag', None)
container_listing_etag_header = req.headers.get(
'X-Object-Sysmeta-Container-Update-Override-Etag')
@@ -142,13 +143,13 @@ class EncInputWrapper(object):
# Encrypt the container-listing etag using the container key
# and use it to override the container update value, with the
# crypto parameters appended.
-
- val = append_crypto_meta(*encrypt_header_val(
+ val, crypto_meta = encrypt_header_val(
self.crypto, container_listing_etag,
self.keys['container'],
- iv_base=os.path.join(os.sep, path_for_base)))
+ iv_base=os.path.join(os.sep, path_for_base))
+ crypto_meta['key_id'] = self.keys['id']
footers['X-Object-Sysmeta-Container-Update-Override-Etag'] = \
- val
+ append_crypto_meta(val, crypto_meta)
req.environ['swift.callback.update_footers'] = footers_callback
@@ -198,12 +199,11 @@ class EncrypterObjContext(CryptoWSGIContext):
if is_user_meta(self.server_type, name) and val:
req.headers[name], meta = encrypt_header_val(
self.crypto, val, keys[self.server_type])
+ meta['key_id'] = keys['id']
# short_name is extracted in order to use it for naming the
# corresponding x-object-transient-sysmeta-crypto- header
short_name = strip_user_meta_prefix(self.server_type, name)
req.headers[prefix + short_name] = dump_crypto_meta(meta)
- self.logger.debug("encrypted user meta %s: %s",
- name, req.headers[name])
def PUT(self, req, start_response):
self._check_headers(req)
diff --git a/swift/common/middleware/keymaster.py b/swift/common/middleware/keymaster.py
index 7ef8e53c7..eec2dceed 100644
--- a/swift/common/middleware/keymaster.py
+++ b/swift/common/middleware/keymaster.py
@@ -28,11 +28,8 @@ import hashlib
import hmac
import os
-from swift.common.middleware.crypto_utils import (
- is_crypto_meta, CRYPTO_KEY_CALLBACK)
-from swift.common.request_helpers import get_sys_meta_prefix
-from swift.common.swob import Request, HTTPException, HTTPUnprocessableEntity
-from swift.common.utils import get_logger, split_path
+from swift.common.middleware.crypto_utils import CRYPTO_KEY_CALLBACK
+from swift.common.swob import Request, HTTPException
from swift.common.wsgi import WSGIContext
@@ -46,35 +43,52 @@ class KeyMasterContext(WSGIContext):
"""
super(KeyMasterContext, self).__init__(keymaster.app)
self.keymaster = keymaster
- self.logger = keymaster.logger
self.account = account
self.container = container
self.obj = obj
- self._init_keys()
+ self._keys = None
- def _init_keys(self):
+ def fetch_crypto_keys(self):
"""
- Setup default container and object keys based on the request path.
+ Setup container and object keys based on the request path.
+
+ Keys are derived from request path. The 'id' entry in the results dict
+ includes the part of the path used to derived keys. Other keymaster
+ implementations may use a different strategy to generate keys and may
+ include a different type of 'id', so callers should treat the 'id' as
+ opaque keymaster-specific data.
+
+ :returns: A dict containing encryption keys for 'object' and
+ 'container' and a key 'id'.
"""
- self.keys = {}
- self.account_path = os.path.join(os.sep, self.account)
- self.container_path = self.obj_path = None
- self.server_type = 'account'
+ if self._keys:
+ return self._keys
+
+ self._keys = {}
+ account_path = os.path.join(os.sep, self.account)
if self.container:
- self.server_type = 'container'
- self.container_path = os.path.join(self.account_path,
- self.container)
- self.keys['container'] = self.keymaster.create_key(
- self.container_path)
+ path = os.path.join(account_path, self.container)
+ self._keys['container'] = self.keymaster.create_key(path)
if self.obj:
- self.server_type = 'object'
- self.obj_path = os.path.join(self.container_path, self.obj)
- self.keys['object'] = self.keymaster.create_key(
- self.obj_path)
-
- def _handle_post_or_put(self, req, start_response):
+ path = os.path.join(path, self.obj)
+ self._keys['object'] = self.keymaster.create_key(path)
+
+ # For future-proofing include a keymaster version number and the
+ # path used to derive keys in the 'id' entry of the results. The
+ # encrypter will persist this as part of the crypto-meta for
+ # encrypted data and metadata. If we ever change the way keys are
+ # generated then the decrypter could pass the persisted 'id' value
+ # when it calls fetch_crypto_keys to inform the keymaster as to how
+ # that particular data or metadata had its keys generated.
+ # Currently we have no need to do that, so we are simply persisting
+ # this information for future use.
+ self._keys['id'] = {'v': '1', 'path': base64.b64encode(path)}
+
+ return self._keys
+
+ def _handle_request(self, req, start_response):
req.environ[CRYPTO_KEY_CALLBACK] = self.fetch_crypto_keys
resp = self._app_call(req.environ)
start_response(self._response_status, self._response_headers,
@@ -82,99 +96,22 @@ class KeyMasterContext(WSGIContext):
return resp
def PUT(self, req, start_response):
- if self.obj_path:
- # TODO: re-examine need for this special handling once COPY has
- # been moved to middleware.
- # For object PUT we save a key_id as obj sysmeta so that if the
- # object is copied to another location we can use the key_id
- # (rather than its new path) to calculate its key for a GET or
- # HEAD.
- id_name = "%scrypto-id" % get_sys_meta_prefix(self.server_type)
- req.headers[id_name] = \
- base64.b64encode(self.obj_path)
-
- return self._handle_post_or_put(req, start_response)
+ return self._handle_request(req, start_response)
def POST(self, req, start_response):
- return self._handle_post_or_put(req, start_response)
+ return self._handle_request(req, start_response)
def GET(self, req, start_response):
- return self._handle_get_or_head(req, start_response)
+ return self._handle_request(req, start_response)
def HEAD(self, req, start_response):
- return self._handle_get_or_head(req, start_response)
-
- def _handle_get_or_head(self, req, start_response):
- # To get if-match requests working, we now need to provide the keys
- # before we get a response from the object server. There might be
- # a better way of doing this.
- self.provide_keys_get_or_head(req, False)
- resp = self._app_call(req.environ)
- self.provide_keys_get_or_head(req, True)
- start_response(self._response_status, self._response_headers,
- self._response_exc_info)
- return resp
-
- def error_if_need_keys(self, req):
- # Determine if keys will actually be needed
- # Look for any crypto-meta headers
- if not hasattr(self, '_response_headers'):
- return
- for (h, v) in self._response_headers:
- if is_crypto_meta(h, self.server_type):
- raise HTTPUnprocessableEntity(
- "Cannot get keys for path %s" % req.path)
-
- self.logger.debug("No keys necessary for path %s", req.path)
-
- def provide_keys_get_or_head(self, req, rederive):
- if rederive and self.obj_path:
- # TODO: re-examine need for this special handling once COPY has
- # been moved to middleware.
- # For object GET or HEAD we look for a key_id that may have been
- # stored in the object sysmeta during a PUT and use that to
- # calculate the object key, in case the object has been copied to a
- # new path.
- try:
- id_name = \
- "%scrypto-id" % get_sys_meta_prefix(self.server_type)
- obj_key_path = self._response_header_value(id_name)
- if not obj_key_path:
- raise ValueError('No object key was found.')
- try:
- obj_key_path = base64.b64decode(obj_key_path)
- except TypeError:
- self.logger.warning("path %s could not be decoded",
- obj_key_path)
- raise ValueError("path %s could not be decoded" %
- obj_key_path)
- path_acc, path_cont, path_obj = \
- split_path(obj_key_path, 3, 3, True)
- cont_key_path = os.path.join(os.sep, path_acc, path_cont)
- self.keys['container'] = self.keymaster.create_key(
- cont_key_path)
- self.logger.debug("obj key id: %s", obj_key_path)
- self.logger.debug("cont key id: %s", cont_key_path)
- self.keys['object'] = self.keymaster.create_key(
- obj_key_path)
- except ValueError:
- req.environ['swift.crypto.override'] = True
- self.error_if_need_keys(req)
-
- if not req.environ.get('swift.crypto.override'):
- req.environ[CRYPTO_KEY_CALLBACK] = self.fetch_crypto_keys
- else:
- req.environ.pop(CRYPTO_KEY_CALLBACK, None)
-
- def fetch_crypto_keys(self):
- return self.keys
+ return self._handle_request(req, start_response)
class KeyMaster(object):
def __init__(self, app, conf):
self.app = app
- self.logger = get_logger(conf, log_route="keymaster")
self.root_secret = conf.get('encryption_root_secret')
try:
self.root_secret = base64.b64decode(self.root_secret)
diff --git a/swift/common/middleware/tempurl.py b/swift/common/middleware/tempurl.py
index 234820791..f2863aa9f 100644
--- a/swift/common/middleware/tempurl.py
+++ b/swift/common/middleware/tempurl.py
@@ -163,7 +163,7 @@ __all__ = ['TempURL', 'filter_factory',
from os.path import basename
-from time import time
+from time import time, strftime, gmtime
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlencode
@@ -425,6 +425,11 @@ class TempURL(object):
# newline into existing_disposition
value = disposition_value.replace('\n', '%0A')
out_headers.append(('Content-Disposition', value))
+
+ # include Expires header for better cache-control
+ out_headers.append(('Expires', strftime(
+ "%a, %d %b %Y %H:%M:%S GMT",
+ gmtime(temp_url_expires))))
headers = out_headers
return start_response(status, headers, exc_info)
diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py
index d78dec011..6676d3835 100644
--- a/swift/common/wsgi.py
+++ b/swift/common/wsgi.py
@@ -1101,7 +1101,7 @@ def make_env(env, method=None, path=None, agent='Swift', query_string=None,
'swift.trans_id', 'swift.authorize_override',
'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID',
'HTTP_REFERER', 'swift.orig_req_method', 'swift.log_info',
- 'swift.infocache', 'swift.metadata.checked'):
+ 'swift.infocache'):
if name in env:
newenv[name] = env[name]
if method:
diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po
index ab9dc8aa8..162d3b900 100644
--- a/swift/locale/de/LC_MESSAGES/swift.po
+++ b/swift/locale/de/LC_MESSAGES/swift.po
@@ -9,14 +9,14 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev175\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-03 04:47+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2016-03-24 03:15+0000\n"
-"Last-Translator: Monika Wolf <vcomas3@de.ibm.com>\n"
+"PO-Revision-Date: 2016-06-02 07:02+0000\n"
+"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
@@ -72,6 +72,10 @@ msgstr ""
"%(time).2fs (%(rate).2f/s, %(remaining)s verbleibend)"
#, python-format
+msgid "%(server)s (%(pid)s) appears to have stopped"
+msgstr "%(server)s (%(pid)s) scheinbar gestoppt"
+
+#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s Erfolge, %(failure)s Fehlschläge"
@@ -80,6 +84,10 @@ msgid "%(type)s returning 503 for %(statuses)s"
msgstr "%(type)s gab 503 für %(statuses)s zurück"
#, python-format
+msgid "%(type)s: %(value)s"
+msgstr "%(type)s: %(value)s"
+
+#, python-format
msgid "%s already started..."
msgstr "%s bereits gestartet..."
@@ -134,6 +142,10 @@ msgid "Account"
msgstr "Konto"
#, python-format
+msgid "Account %(account)s has not been reaped since %(time)s"
+msgstr "Konto %(account)s wurde nicht aufgeräumt seit %(time)s"
+
+#, python-format
msgid "Account audit \"once\" mode completed: %.02fs"
msgstr "Kontoprüfungsmodus \"once\" abgeschlossen: %.02fs"
@@ -149,6 +161,10 @@ msgstr ""
"(%(rate).5f/s)"
#, python-format
+msgid "Audit Failed for %(path)s: %(err)s"
+msgstr "Prüfung fehlgeschlagen für %(path)s: %(err)s"
+
+#, python-format
msgid "Bad rsync return code: %(ret)d <- %(args)s"
msgstr "Falscher rsync-Rückgabecode: %(ret)d <- %(args)s"
@@ -200,6 +216,14 @@ msgid "Can not load profile data from %s."
msgstr "Die Profildaten von %s können nicht geladen werden."
#, python-format
+msgid "Cannot read %(auditor_status)s (%(err)s)"
+msgstr "%(auditor_status)s (%(err)s) kann nicht gelesen werden."
+
+#, python-format
+msgid "Cannot write %(auditor_status)s (%(err)s)"
+msgstr "Schreiben von %(auditor_status)s (%(err)s) nicht möglich."
+
+#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "Client konnte nicht innerhalb von %ss vom Proxy lesen"
@@ -273,12 +297,15 @@ msgstr ""
"Änderungen"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "Keine Bindung an %s:%s möglich nach Versuch über %s Sekunden"
+msgid ""
+"Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds"
+msgstr ""
+"Keine Bindung an %(addr)s:%(port)s möglich nach Versuch über %(timeout)s "
+"Sekunden"
#, python-format
-msgid "Could not load %r: %s"
-msgstr "Konnte %r nicht laden: %s"
+msgid "Could not load %(conf)r: %(error)s"
+msgstr "%(conf)r konnte nicht geladen werden: %(error)s"
#, python-format
msgid "Data download error: %s"
@@ -289,9 +316,10 @@ msgid "Devices pass completed: %.02fs"
msgstr "Gerätedurchgang abgeschlossen: %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
+msgid "Directory %(directory)r does not map to a valid policy (%(error)s)"
msgstr ""
-"Das Verzeichnis %r kann keiner gültigen Richtlinie (%s) zugeordnet werden."
+"Das Verzeichnis %(directory)r kann keiner gültigen Richtlinie (%(error)s) "
+"zugeordnet werden."
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
@@ -325,10 +353,10 @@ msgstr ""
#, python-format
msgid ""
"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
+"request: \"%(hosts)s\" vs \"%(devices)s\""
msgstr ""
"FEHLER Kontoaktualisierung fehlgeschlagen: Unterschiedliche Anzahl von Hosts "
-"und Einheiten in der Anforderung: \"%s\" contra \"%s\""
+"und Einheiten in der Anforderung: \"%(hosts)s\" contra \"%(devices)s\""
#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
@@ -348,14 +376,6 @@ msgstr ""
"von %(ip)s:%(port)s/%(dev)s"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"FEHLER Containeraktualisierung fehlgeschlagen: Unterschiedliche Anzahl von "
-"Hosts und Einheiten in der Anforderung: \"%s\" contra \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "FEHLER Kontoinfo %s konnte nicht abgerufen werden"
@@ -514,8 +534,8 @@ msgid "Error hashing suffix"
msgstr "Fehler beim Hashing des Suffix"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "Fehler in %r mit mtime_check_interval: %s"
+msgid "Error in %(conf)r with mtime_check_interval: %(error)s"
+msgstr "Fehler in %(conf)r mit mtime_check_interval: %(error)s"
#, python-format
msgid "Error limiting server %s"
@@ -656,6 +676,10 @@ msgstr ""
msgid "Killing long-running rsync: %s"
msgstr "Lange laufendes rsync wird gekillt: %s"
+#, python-format
+msgid "Loading JSON from %(auditor_status)s failed (%(err)s)"
+msgstr "Laden von JSON aus %(auditor_status)s fehlgeschlagen: (%(err)s)"
+
msgid "Lockup detected.. killing live coros."
msgstr "Suche erkannt. Live-Coros werden gelöscht."
@@ -668,8 +692,8 @@ msgid "No %s running"
msgstr "Kein %s läuft"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "Kein Cluster-Endpunkt für %r %r"
+msgid "No cluster endpoint for %(realm)r %(cluster)r"
+msgstr "Kein Cluster-Endpunkt für %(realm)r %(cluster)r"
#, python-format
msgid "No permission to signal PID %d"
@@ -684,6 +708,10 @@ msgid "No realm key for %r"
msgstr "Kein Bereichsschlüssel für %r"
#, python-format
+msgid "No space left on device for %(file)s (%(err)s)"
+msgstr "Kein freier Speicherplatz im Gerät für %(file)s (%(err)s) vorhanden."
+
+#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "Knotenfehler begrenzt %(ip)s:%(port)s (%(device)s)"
@@ -800,22 +828,14 @@ msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
"Partitionszeiten: max. %(max).4fs, min. %(min).4fs, durchschnittl. %(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "Durchlauf wird gestartet; %s mögliche Container; %s mögliche Objekte"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "Durchgang abgeschlossen in %ds; %d Objekte abgelaufen"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "Bisherige Durchgänge %ds; %d Objekte abgelaufen"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Pfad in X-Container-Sync-To ist erforderlich"
#, python-format
+msgid "Problem cleaning up %(datadir)s (%(err)s)"
+msgstr "Problem bei der Bereinigung von %(datadir)s (%(err)s)"
+
+#, python-format
msgid "Problem cleaning up %s"
msgstr "Problem bei der Bereinigung von %s"
@@ -904,6 +924,10 @@ msgid "Running object replicator in script mode."
msgstr "Objektreplikator läuft im Skriptmodus."
#, python-format
+msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s"
+msgstr "Signal %(server)s PID: %(pid)s Signal: %(signal)s"
+
+#, python-format
msgid ""
"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
"skipped, %(fail)s failed"
@@ -1007,9 +1031,15 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "NICHT ABGEFANGENE AUSNAHME"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
+msgid "Unable to find %(section)s config section in %(conf)s"
+msgstr ""
+"%(section)s-Konfigurationsabschnitt in %(conf)s kann nicht gefunden werden"
+
+#, python-format
+msgid "Unable to load internal client from config: %(conf)r (%(error)s)"
msgstr ""
-"Interner Client konnte nicht aus der Konfiguration geladen werden: %r (%s)"
+"Interner Client konnte nicht aus der Konfiguration geladen werden: %(conf)r "
+"(%(error)s)"
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
@@ -1020,6 +1050,10 @@ msgstr ""
msgid "Unable to locate config for %s"
msgstr "Konfiguration für %s wurde nicht gefunden."
+#, python-format
+msgid "Unable to locate config number %(number)s for %(server)s"
+msgstr "Konfigurationsnummer %(number)s für %(server)s wurde nicht gefunden."
+
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
@@ -1027,6 +1061,10 @@ msgstr ""
"Nullbefehl verlassen."
#, python-format
+msgid "Unable to perform fsync() on directory %(dir)s: %(err)s"
+msgstr "fsync() kann für Verzeichnis %(dir)s nicht ausgeführt werden: %(err)s"
+
+#, python-format
msgid "Unable to read config from %s"
msgstr "Konfiguration aus %s kann nicht gelesen werden"
@@ -1076,6 +1114,17 @@ msgstr ""
"WARNUNG: Grenzwert für Speicher kann nicht geändert werden. Wird nicht als "
"Root ausgeführt?"
+#, python-format
+msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up"
+msgstr ""
+"Hat %(kill_wait)s Sekunden für %(server)s zum Erlöschen gewartet; Gibt auf"
+
+#, python-format
+msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing"
+msgstr ""
+"Hat %(kill_wait)s Sekunden für %(server)s zum Erlöschen gewartet. Wird "
+"abgebrochen."
+
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr ""
"Warnung: Geschwindigkeitsbegrenzung kann nicht ohne memcached-Client "
diff --git a/swift/locale/es/LC_MESSAGES/swift.po b/swift/locale/es/LC_MESSAGES/swift.po
index 9a137fbb2..34aca4ae0 100644
--- a/swift/locale/es/LC_MESSAGES/swift.po
+++ b/swift/locale/es/LC_MESSAGES/swift.po
@@ -6,9 +6,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -268,14 +268,6 @@ msgstr ""
"cambios"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "No se puede enlazar a %s:%s después de intentar por %s segundos"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "No se ha podido cargar %r: %s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "Error de descarga de datos: %s"
@@ -284,10 +276,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "Paso de dispositivos finalizado: %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "El directory %r no está correlacionado con una política válida (%s)"
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s"
@@ -318,14 +306,6 @@ msgstr ""
"%(device)s (se volverá a intentar más tarde): Respuesta %(status)s %(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"ERROR La actualización de la cuenta ha fallado: hay números distintos de "
-"hosts y dispositivos en la solicitud: \"%s\" frente a \"%s\""
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "ERROR Respuesta errónea %(status)s desde %(host)s"
@@ -343,14 +323,6 @@ msgstr ""
"%(port)s/%(dev)s"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"ERROR La actualización del contenedor ha fallado: hay números distintos de "
-"hosts y dispositivos en la solicitud: \"%s\" frente a \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "ERROR No se ha podido obtener la información de cuenta %s"
@@ -509,10 +481,6 @@ msgid "Error hashing suffix"
msgstr "Error en el hash del sufijo"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "Error en %r con mtime_check_interval: %s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "Error al limitar el servidor %s"
@@ -663,10 +631,6 @@ msgid "No %s running"
msgstr "Ningún %s en ejecución"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "No hay punto final de clúster para %r %r"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "No hay permiso para señalar el PID %d"
@@ -796,18 +760,6 @@ msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
"Tiempos de partición: máximo %(max).4fs, mínimo %(min).4fs, medio %(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "Inicio del paso; %s posibles contenedores; %s posibles objetos"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "Paso completado en %ds; %d objetos caducados"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "Paso hasta ahora %ds; %d objetos caducados"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Vía de acceso necesaria en X-Container-Sync-To"
@@ -1004,11 +956,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "UNCAUGHT EXCEPTION"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr ""
-"No se puede cargar el cliente interno a partir de la configuración: %r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "No se ha podido localizar %s en libc. Se dejará como no operativo."
diff --git a/swift/locale/fr/LC_MESSAGES/swift.po b/swift/locale/fr/LC_MESSAGES/swift.po
index 138c49e57..8c93eff9e 100644
--- a/swift/locale/fr/LC_MESSAGES/swift.po
+++ b/swift/locale/fr/LC_MESSAGES/swift.po
@@ -7,9 +7,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -271,14 +271,6 @@ msgstr ""
"inchangé(s)"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "Liaison impossible à %s:%s après une tentative de %s secondes"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "Impossible de charger %r: %s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "Erreur de téléchargement des données: %s"
@@ -287,10 +279,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "Session d'audit d'unité terminée : %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "Le répertoire %r n'est pas mappé à une stratégie valide (%s)"
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERREUR %(db_file)s : %(validate_sync_to_err)s"
@@ -323,14 +311,6 @@ msgstr ""
"%(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"ERREUR Echec de la mise à jour du compte. Le nombre d'hôtes et le nombre "
-"d'unités diffèrent dans la demande : \"%s\" / \"%s\""
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "ERREUR Réponse incorrecte %(status)s de %(host)s"
@@ -348,14 +328,6 @@ msgstr ""
"%(dev)s"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"ERREUR Echec de la mise à jour du conteneur. Le nombre d'hôtes et le nombre "
-"d'unités diffèrent dans la demande : \"%s\" / \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "ERREUR Impossible d'obtenir les infos de compte %s"
@@ -516,10 +488,6 @@ msgid "Error hashing suffix"
msgstr "Erreur suffixe hashing"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "Erreur dans %r liée à mtime_check_interval : %s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "Erreur limitation du serveur %s"
@@ -672,10 +640,6 @@ msgid "No %s running"
msgstr "Non démarré %s"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "Aucun noeud final de cluster pour %r %r"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "Aucun droit pour signaler le PID %d"
@@ -812,18 +776,6 @@ msgstr ""
"Temps de partition : maximum %(max).4fs, minimum %(min).4fs, moyenne "
"%(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "Début de session. %s conteneur(s) possible(s). %s objet(s) possible(s)"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "Session terminée dans %ds. %d objet(s) arrivé(s) à expiration"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "Session jusqu'à %ds. %d objet(s) arrivé(s) à expiration"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Chemin requis dans X-Container-Sync-To"
@@ -1021,11 +973,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "EXCEPTION NON INTERCEPTEE"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr ""
-"Impossible de charger le client interne depuis la configuration : %r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr ""
"Impossible de localiser %s dans libc. Laissé comme action nulle (no-op)."
diff --git a/swift/locale/it/LC_MESSAGES/swift.po b/swift/locale/it/LC_MESSAGES/swift.po
index abba03b38..e69a5a1b8 100644
--- a/swift/locale/it/LC_MESSAGES/swift.po
+++ b/swift/locale/it/LC_MESSAGES/swift.po
@@ -6,9 +6,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -273,15 +273,6 @@ msgstr ""
"senza modifiche"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr ""
-"Impossibile effettuare il bind a %s:%s dopo aver provato per %s secondi"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "Impossibile caricare %r: %s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "Errore di download dei dati: %s"
@@ -290,10 +281,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "Trasmissione dei dispositivi completata: %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "La directory %r non è associata ad una politica valida (%s)"
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERRORE %(db_file)s: %(validate_sync_to_err)s"
@@ -324,14 +311,6 @@ msgstr ""
"%(status)s %(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"ERRORE Aggiornamento dell'account non riuscito: numero differente di host e "
-"dispositivi nella richiesta: \"%s\" vs \"%s\""
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "ERRORE Risposta errata %(status)s da %(host)s"
@@ -349,14 +328,6 @@ msgstr ""
"%(port)s/%(dev)s"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"ERRORE Aggiornamento del contenitore non riuscito: numero differente di host "
-"e dispositivi nella richiesta: \"%s\" vs \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "ERRORE Impossibile ottenere le informazioni sull'account %s"
@@ -513,10 +484,6 @@ msgid "Error hashing suffix"
msgstr "Errore durante l'hash del suffisso"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "Errore in %r con mtime_check_interval: %s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "Errore durante la limitazione del server %s"
@@ -671,10 +638,6 @@ msgid "No %s running"
msgstr "Nessun %s in esecuzione"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "Nessun endpoint del cluster per %r %r"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "Nessuna autorizzazione per la segnalazione del PID %d"
@@ -799,19 +762,6 @@ msgstr "Parametri, query e frammenti non consentiti in X-Container-Sync-To"
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "Tempi partizione: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr ""
-"Avvio della trasmissione; %s contenitori possibili; %s oggetti possibili"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "Trasmissione completata in %ds; %d oggetti scaduti"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "Trasmissione eseguita fino ad ora %ds; %d oggetti scaduti"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Percorso richiesto in X-Container-Sync-To"
@@ -1010,10 +960,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "ECCEZIONE NON RILEVATA"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr "Impossibile caricare il client interno dalla configurazione: %r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "Impossibile individuare %s in libc. Lasciato come no-op."
diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po
index 88815803a..bfcd353e3 100644
--- a/swift/locale/ja/LC_MESSAGES/swift.po
+++ b/swift/locale/ja/LC_MESSAGES/swift.po
@@ -7,9 +7,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -263,14 +263,6 @@ msgstr ""
"%(success)sã€å¤±æ•— %(fail)sã€æœªå¤‰æ›´ %(no_change)s"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "%s 秒間ã®è©¦è¡Œå¾Œã« %s:%s ã«ãƒã‚¤ãƒ³ãƒ‰ã§ãã¾ã›ã‚“ã§ã—ãŸ"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "%r をロードã§ãã¾ã›ã‚“ã§ã—ãŸ: %s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "データダウンロードエラー: %s"
@@ -279,10 +271,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "デãƒã‚¤ã‚¹ã®å‡¦ç†ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "ディレクトリー %r ã¯æœ‰åŠ¹ãªãƒãƒªã‚·ãƒ¼ã«ãƒžãƒƒãƒ—ã—ã¦ã„ã¾ã›ã‚“ (%s) "
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "エラー %(db_file)s: %(validate_sync_to_err)s"
@@ -311,14 +299,6 @@ msgstr ""
"ã•ã‚Œã¾ã™): 応答 %(status)s %(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"エラー: アカウント更新ã«å¤±æ•—ã—ã¾ã—ãŸã€‚è¦æ±‚内ã®ãƒ›ã‚¹ãƒˆæ•°ãŠã‚ˆã³ãƒ‡ãƒã‚¤ã‚¹æ•°ãŒç•°ãª"
-"ã‚Šã¾ã™: 「%sã€vs「%sã€"
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "エラー: ホスト %(host)s ã‹ã‚‰ã®å¿œç­” %(status)s ãŒæ­£ã—ãã‚ã‚Šã¾ã›ã‚“"
@@ -335,14 +315,6 @@ msgstr ""
"%(port)s/%(dev)s ã‹ã‚‰ã® %(status)d 応答"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"エラー: コンテナー更新ã«å¤±æ•—ã—ã¾ã—ãŸã€‚è¦æ±‚内ã®ãƒ›ã‚¹ãƒˆæ•°ãŠã‚ˆã³ãƒ‡ãƒã‚¤ã‚¹æ•°ãŒç•°ãª"
-"ã‚Šã¾ã™: 「%sã€vs「%sã€"
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "ERROR アカウント情報 %s ãŒå–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ"
@@ -497,10 +469,6 @@ msgid "Error hashing suffix"
msgstr "サフィックスã®ãƒãƒƒã‚·ãƒ¥ã‚¨ãƒ©ãƒ¼"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "mtime_check_interval 㧠%r ã«ã‚¨ãƒ©ãƒ¼ãŒã‚ã‚Šã¾ã™: %s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "サーãƒãƒ¼ %s ã®åˆ¶é™ã‚¨ãƒ©ãƒ¼"
@@ -652,10 +620,6 @@ msgid "No %s running"
msgstr "%s ãŒå®Ÿè¡Œã•ã‚Œã¦ã„ã¾ã›ã‚“"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "%r %r ã®ã‚¨ãƒ³ãƒ‰ãƒã‚¤ãƒ³ãƒˆã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãŒã‚ã‚Šã¾ã›ã‚“"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "PID %d ã«ã‚·ã‚°ãƒŠãƒ«é€šçŸ¥ã™ã‚‹è¨±å¯ãŒã‚ã‚Šã¾ã›ã‚“"
@@ -783,19 +747,6 @@ msgstr ""
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "パーティション時間: 最大 %(max).4fsã€æœ€å° %(min).4fsã€ä¸­é–“ %(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr ""
-"パスã®é–‹å§‹ä¸­ã€‚%s コンテナーãŠã‚ˆã³ %s オブジェクトãŒå­˜åœ¨ã™ã‚‹å¯èƒ½æ€§ãŒã‚ã‚Šã¾ã™"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "%d ã§ãƒ‘スãŒå®Œäº†ã—ã¾ã—ãŸã€‚%d オブジェクトã®æœ‰åŠ¹æœŸé™ãŒåˆ‡ã‚Œã¾ã—ãŸ"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "ç¾åœ¨ã¾ã§ã®ãƒ‘ス %d。%d オブジェクトã®æœ‰åŠ¹æœŸé™ãŒåˆ‡ã‚Œã¾ã—ãŸ"
-
msgid "Path required in X-Container-Sync-To"
msgstr "X-Container-Sync-To ã«ãƒ‘スãŒå¿…è¦ã§ã™"
@@ -985,10 +936,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "キャッãƒã•ã‚Œã¦ã„ãªã„例外"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr "設定ã‹ã‚‰å†…部クライアントをロードã§ãã¾ã›ã‚“: %r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "%s ㌠libc ã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。no-op ã¨ã—ã¦çµ‚了ã—ã¾ã™ã€‚"
diff --git a/swift/locale/ko_KR/LC_MESSAGES/swift.po b/swift/locale/ko_KR/LC_MESSAGES/swift.po
index 84889e2e1..760bde07e 100644
--- a/swift/locale/ko_KR/LC_MESSAGES/swift.po
+++ b/swift/locale/ko_KR/LC_MESSAGES/swift.po
@@ -8,9 +8,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -264,14 +264,6 @@ msgstr ""
"%(fail)sê°œ 실패, %(no_change)sê°œ 변경 ì—†ìŒ"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "%sì´ˆ ë™ì•ˆ ì‹œë„í•œ 후 %s:%sì— ë°”ì¸ë“œí•  수 ì—†ìŒ"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "%rì„(를) 로드할 수 ì—†ìŒ: %s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "ë°ì´í„° 다운로드 오류: %s"
@@ -280,10 +272,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "장치 패스 완료 : %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "%r 디렉토리가 올바른 ì •ì±…(%s)ì— ë§µí•‘ë˜ì§€ ì•ŠìŒ"
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "오류 %(db_file)s: %(validate_sync_to_err)s"
@@ -312,14 +300,6 @@ msgstr ""
"ì‘답 %(status)s %(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"오류. 계정 ì—…ë°ì´íŠ¸ 실패: ë‹¤ìŒ ìš”ì²­ì—ì„œ 호스트 ë° ë””ë°”ì´ìŠ¤ 수가 서로 다름: "
-"\"%s\" 대 \"%s\""
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "오류. %(host)sì˜ ìž˜ëª»ëœ ì‘답 %(status)s"
@@ -336,14 +316,6 @@ msgstr ""
"답. 출처: %(ip)s:%(port)s/%(dev)s"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"오류. 컨테ì´ë„ˆ ì—…ë°ì´íŠ¸ 실패: ë‹¤ìŒ ìš”ì²­ì—ì„œ 호스트 ë° ë””ë°”ì´ìŠ¤ 수가 서로 다"
-"름: \"%s\" 대 \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "오류는 %sì˜ ê³„ì • 정보를 ì–»ì„ ìˆ˜ 없습니다"
@@ -494,10 +466,6 @@ msgid "Error hashing suffix"
msgstr "접미부를 해싱하는 중 오류 ë°œìƒ"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "%rì—ì„œ mtime_check_interval 오류 ë°œìƒ: %s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "서버 %s 제한 오류"
@@ -647,10 +615,6 @@ msgid "No %s running"
msgstr "%sì´(ê°€) 실행ë˜ì§€ ì•ŠìŒ"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "%r %rì— ëŒ€í•œ í´ëŸ¬ìŠ¤í„° 엔드í¬ì¸íŠ¸ê°€ ì—†ìŒ"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "PID %dì„(를) 표시할 ê¶Œí•œì´ ì—†ìŒ"
@@ -775,18 +739,6 @@ msgstr "X-Container-Sync-Toì— ë§¤ê°œë³€ìˆ˜, 조회, ë‹¨íŽ¸ì´ í—ˆìš©ë˜ì§€ ì•Šì
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "파티션 시간: 최대 %(max).4f초, 최소 %(min).4f초, 중간 %(med).4f초"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "전달 시작, %sê°œì˜ ì»¨í…Œì´ë„ˆ 사용 가능, %sê°œì˜ ì˜¤ë¸Œì íŠ¸ 사용 가능"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "%dì´ˆ ì•ˆì— ì „ë‹¬ì´ ì™„ë£Œë¨. %dê°œì˜ ì˜¤ë¸Œì íŠ¸ê°€ 만료ë¨"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "현재 %dê°œ 전달, %dê°œì˜ ì˜¤ë¸Œì íŠ¸ê°€ 만료ë¨"
-
msgid "Path required in X-Container-Sync-To"
msgstr "X-Container-Sync-Toì— ê²½ë¡œê°€ 필요함"
@@ -971,10 +923,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "미발견 예외"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr "구성ì—ì„œ 내부 í´ë¼ì´ì–¸íŠ¸ë¥¼ 로드할 수 ì—†ìŒ: %r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "libcì—ì„œ %sì„(를) ì°¾ì„ ìˆ˜ 없습니다. no-opë¡œ 남겨 둡니다."
diff --git a/swift/locale/pt_BR/LC_MESSAGES/swift.po b/swift/locale/pt_BR/LC_MESSAGES/swift.po
index ffcdab0de..11ed05097 100644
--- a/swift/locale/pt_BR/LC_MESSAGES/swift.po
+++ b/swift/locale/pt_BR/LC_MESSAGES/swift.po
@@ -10,9 +10,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -268,14 +268,6 @@ msgstr ""
"sem alterações"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "Não foi possível conectar a %s:%s após tentar por %s segundos"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "Não é possível carregar %r: %s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "Erro ao fazer download de dados: %s"
@@ -284,10 +276,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "Dispositivos finalizados: %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "O diretório %r não está mapeado para uma política válida (%s)"
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERRO %(db_file)s: %(validate_sync_to_err)s"
@@ -316,14 +304,6 @@ msgstr ""
"novamente mais tarde): Resposta %(status)s %(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"ERRO A atualização da conta falhou: números diferentes de hosts e "
-"dispositivos na solicitação: \"%s\" vs \"%s\""
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "ERRO Resposta inválida %(status)s a partir de %(host)s"
@@ -340,14 +320,6 @@ msgstr ""
"posterior): %(status)d resposta do %(ip)s:%(port)s/%(dev)s"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"ERRO A atualização do contêiner falhou: números diferentes de hosts e "
-"dispositivos na solicitação: \"%s\" vs \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "ERRO Não foi possível recuperar as informações da conta %s"
@@ -500,10 +472,6 @@ msgid "Error hashing suffix"
msgstr "Erro ao efetuar hash do sufixo"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "Erro em %r com mtime_check_interval: %s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "Erro ao limitar o servidor %s"
@@ -653,10 +621,6 @@ msgid "No %s running"
msgstr "Nenhum %s rodando"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "Nenhum terminal de cluster para %r %r"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "Nenhuma permissão para PID do sinal %d"
@@ -786,18 +750,6 @@ msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
"Tempos de partição: máximo %(max).4fs, mínimo %(min).4fs, médio %(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "Início da aprovação; %s contêineres possíveis; %s objetos possíveis"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "Aprovação concluída em %ds; %d objetos expirados"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "Aprovados até aqui %ds; %d objetos expirados"
-
msgid "Path required in X-Container-Sync-To"
msgstr "Caminho necessário em X-Container-Sync-To"
@@ -992,11 +944,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "EXCEÇÃO NÃO CAPTURADA"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr ""
-"Não é possível carregar cliente interno a partir da configuração: %r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "Não é possível localizar %s em libc. Saindo como um não operacional."
diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po
index 95e8c7e76..0bb2e8f3c 100644
--- a/swift/locale/ru/LC_MESSAGES/swift.po
+++ b/swift/locale/ru/LC_MESSAGES/swift.po
@@ -6,9 +6,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -266,14 +266,6 @@ msgstr ""
"%(no_change)s"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "Ðе удалоÑÑŒ подключитьÑÑ Ðº порту %s:%s по иÑтечении %s Ñекунд"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "Ðе удалоÑÑŒ загрузить %r: %s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "Ошибка загрузки данных: %s"
@@ -282,10 +274,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "Проход уÑтройÑтв выполнен: %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "Каталог %r не ÑвÑзан Ñо Ñтратегией policy (%s)"
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s"
@@ -316,14 +304,6 @@ msgstr ""
"%(device)s (Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ повторена позднее): Ответ: %(status)s %(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"Ошибка: обновление учетной запиÑи не выполнено, в запроÑе указано разное "
-"чиÑло хоÑтов и уÑтройÑтв: \"%s\" и \"%s\""
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "Ошибка: Ðеправильный Ð·Ð°Ð¿Ñ€Ð¾Ñ %(status)s из %(host)s"
@@ -341,14 +321,6 @@ msgstr ""
"%(dev)s"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"Ошибка: обновление контейнера не выполнено, в запроÑе указано разное чиÑло "
-"хоÑтов и уÑтройÑтв: \"%s\" и \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "Ошибка: не удалоÑÑŒ получить ÑÐ²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾Ð± учетной запиÑи %s"
@@ -507,10 +479,6 @@ msgid "Error hashing suffix"
msgstr "Ошибка Ñ…ÑÑˆÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÑуффикÑа"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "Ошибка в %r Ñ mtime_check_interval: %s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "Ошибка Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ñервера %s"
@@ -663,10 +631,6 @@ msgid "No %s running"
msgstr "%s не выполнÑетÑÑ"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "ОтÑутÑтвует ÐºÐ¾Ð½ÐµÑ‡Ð½Ð°Ñ Ñ‚Ð¾Ñ‡ÐºÐ° клаÑтера Ð´Ð»Ñ %r %r"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "Ðет прав доÑтупа Ð´Ð»Ñ Ð¾Ñ‚Ð¿Ñ€Ð°Ð²ÐºÐ¸ Ñигнала в PID %d"
@@ -794,18 +758,6 @@ msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
"Ð’Ñ€ÐµÐ¼Ñ Ñ€Ð°Ð·Ð´ÐµÐ»Ð°: макÑимум: %(max).4fs, минимум: %(min).4fs, Ñреднее: %(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "Проход запущен; возможных контейнеров: %s; возможных объектов: %s"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "Проход выполнен за %ds; уÑтарело объектов: %d"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "Проход выполнÑетÑÑ Ð´Ð¾ наÑтоÑщего времени %ds; уÑтарело объектов: %d"
-
msgid "Path required in X-Container-Sync-To"
msgstr "ТребуетÑÑ Ð¿ÑƒÑ‚ÑŒ в X-Container-Sync-To"
@@ -1003,10 +955,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "ÐÐµÐ¾Ð±Ñ€Ð°Ð±Ð°Ñ‚Ñ‹Ð²Ð°ÐµÐ¼Ð°Ñ Ð¸ÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑитуациÑ"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr "Ðе удалоÑÑŒ загрузить клиент из конфигурации: %r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "Ðе удалоÑÑŒ найти %s в libc. ОÑтавлено как no-op."
diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot
deleted file mode 100644
index b4b78d11e..000000000
--- a/swift/locale/swift.pot
+++ /dev/null
@@ -1,1370 +0,0 @@
-# Translations template for swift.
-# Copyright (C) 2016 ORGANIZATION
-# This file is distributed under the same license as the swift project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
-#
-#, fuzzy
-msgid ""
-msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-18 06:31+0000\n"
-"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
-"Language-Team: LANGUAGE <LL@li.org>\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
-
-#: swift/account/auditor.py:59
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed"
-" audit"
-msgstr ""
-
-#: swift/account/auditor.py:82
-msgid "Begin account audit pass."
-msgstr ""
-
-#: swift/account/auditor.py:88 swift/container/auditor.py:86
-msgid "ERROR auditing"
-msgstr ""
-
-#: swift/account/auditor.py:93
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr ""
-
-#: swift/account/auditor.py:99
-msgid "Begin account audit \"once\" mode"
-msgstr ""
-
-#: swift/account/auditor.py:104
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr ""
-
-#: swift/account/auditor.py:123
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of"
-" %(key)s across policies (%(sum)s)"
-msgstr ""
-
-#: swift/account/auditor.py:148
-#, python-format
-msgid "Audit Failed for %(path)s: %(err)s"
-msgstr ""
-
-#: swift/account/auditor.py:153
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr ""
-
-#: swift/account/reaper.py:139 swift/common/utils.py:2392
-#: swift/obj/diskfile.py:361 swift/obj/updater.py:88 swift/obj/updater.py:132
-#, python-format
-msgid "Skipping %s as it is not mounted"
-msgstr ""
-
-#: swift/account/reaper.py:143
-msgid "Exception in top-level account reaper loop"
-msgstr ""
-
-#: swift/account/reaper.py:146
-#, python-format
-msgid "Devices pass completed: %.02fs"
-msgstr ""
-
-#: swift/account/reaper.py:256
-#, python-format
-msgid "Beginning pass on account %s"
-msgstr ""
-
-#: swift/account/reaper.py:281
-#, python-format
-msgid "Exception with containers for account %s"
-msgstr ""
-
-#: swift/account/reaper.py:288
-#, python-format
-msgid "Exception with account %s"
-msgstr ""
-
-#: swift/account/reaper.py:289
-#, python-format
-msgid "Incomplete pass on account %s"
-msgstr ""
-
-#: swift/account/reaper.py:291
-#, python-format
-msgid ", %s containers deleted"
-msgstr ""
-
-#: swift/account/reaper.py:293
-#, python-format
-msgid ", %s objects deleted"
-msgstr ""
-
-#: swift/account/reaper.py:295
-#, python-format
-msgid ", %s containers remaining"
-msgstr ""
-
-#: swift/account/reaper.py:298
-#, python-format
-msgid ", %s objects remaining"
-msgstr ""
-
-#: swift/account/reaper.py:300
-#, python-format
-msgid ", %s containers possibly remaining"
-msgstr ""
-
-#: swift/account/reaper.py:303
-#, python-format
-msgid ", %s objects possibly remaining"
-msgstr ""
-
-#: swift/account/reaper.py:306
-msgid ", return codes: "
-msgstr ""
-
-#: swift/account/reaper.py:310
-#, python-format
-msgid ", elapsed: %.02fs"
-msgstr ""
-
-#: swift/account/reaper.py:317
-#, python-format
-msgid "Account %(account)s has not been reaped since %(time)s"
-msgstr ""
-
-#: swift/account/reaper.py:376 swift/account/reaper.py:430
-#: swift/account/reaper.py:506 swift/container/updater.py:308
-#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr ""
-
-#: swift/account/reaper.py:383 swift/account/reaper.py:439
-#: swift/account/reaper.py:517
-#, python-format
-msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
-msgstr ""
-
-#: swift/account/reaper.py:400
-#, python-format
-msgid "Exception with objects for container %(container)s for account %(account)s"
-msgstr ""
-
-#: swift/account/server.py:276 swift/container/server.py:607
-#: swift/obj/server.py:1038
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr ""
-
-#: swift/common/bufferedhttp.py:206 swift/common/bufferedhttp.py:211
-#, python-format
-msgid "Error encoding to UTF-8: %s"
-msgstr ""
-
-#: swift/common/container_sync_realms.py:60
-#: swift/common/container_sync_realms.py:69
-#, python-format
-msgid "Could not load %r: %s"
-msgstr ""
-
-#: swift/common/container_sync_realms.py:82
-#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr ""
-
-#: swift/common/db.py:353
-#, python-format
-msgid "Quarantined %(db_dir)s to %(quar_path)s due to %(exc_hint)s database"
-msgstr ""
-
-#: swift/common/db.py:410
-msgid "Broker error trying to rollback locked connection"
-msgstr ""
-
-#: swift/common/db.py:613
-#, python-format
-msgid "Invalid pending entry %(file)s: %(entry)s"
-msgstr ""
-
-#: swift/common/db_replicator.py:144
-#, python-format
-msgid "ERROR reading HTTP response from %s"
-msgstr ""
-
-#: swift/common/db_replicator.py:208
-#, python-format
-msgid "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
-msgstr ""
-
-#: swift/common/db_replicator.py:214
-#, python-format
-msgid "Removed %(remove)d dbs"
-msgstr ""
-
-#: swift/common/db_replicator.py:215 swift/obj/replicator.py:517
-#, python-format
-msgid "%(success)s successes, %(failure)s failures"
-msgstr ""
-
-#: swift/common/db_replicator.py:262
-#, python-format
-msgid "ERROR rsync failed with %(code)s: %(args)s"
-msgstr ""
-
-#: swift/common/db_replicator.py:326
-#, python-format
-msgid "ERROR Bad response %(status)s from %(host)s"
-msgstr ""
-
-#: swift/common/db_replicator.py:496 swift/common/db_replicator.py:766
-#, python-format
-msgid "Quarantining DB %s"
-msgstr ""
-
-#: swift/common/db_replicator.py:499
-#, python-format
-msgid "ERROR reading db %s"
-msgstr ""
-
-#: swift/common/db_replicator.py:552
-#, python-format
-msgid "ERROR Remote drive not mounted %s"
-msgstr ""
-
-#: swift/common/db_replicator.py:554
-#, python-format
-msgid "ERROR syncing %(file)s with node %(node)s"
-msgstr ""
-
-#: swift/common/db_replicator.py:593
-#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr ""
-
-#: swift/common/db_replicator.py:621
-msgid "ERROR Failed to get my own IPs?"
-msgstr ""
-
-#: swift/common/db_replicator.py:637
-#, python-format
-msgid "Skipping %(device)s as it is not mounted"
-msgstr ""
-
-#: swift/common/db_replicator.py:650
-msgid "Beginning replication run"
-msgstr ""
-
-#: swift/common/db_replicator.py:655
-msgid "Replication run OVER"
-msgstr ""
-
-#: swift/common/db_replicator.py:668
-msgid "ERROR trying to replicate"
-msgstr ""
-
-#: swift/common/internal_client.py:196
-#, python-format
-msgid "Unexpected response: %s"
-msgstr ""
-
-#: swift/common/manager.py:68
-msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?"
-msgstr ""
-
-#: swift/common/manager.py:75
-msgid "WARNING: Unable to modify memory limit. Running as non-root?"
-msgstr ""
-
-#: swift/common/manager.py:82
-msgid "WARNING: Unable to modify max process limit. Running as non-root?"
-msgstr ""
-
-#: swift/common/manager.py:241
-msgid ""
-"\n"
-"user quit"
-msgstr ""
-
-#: swift/common/manager.py:278 swift/common/manager.py:630
-#, python-format
-msgid "No %s running"
-msgstr ""
-
-#: swift/common/manager.py:291
-#, python-format
-msgid "%(server)s (%(pid)s) appears to have stopped"
-msgstr ""
-
-#: swift/common/manager.py:304
-#, python-format
-msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing"
-msgstr ""
-
-#: swift/common/manager.py:309 swift/common/manager.py:565
-#, python-format
-msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s"
-msgstr ""
-
-#: swift/common/manager.py:321
-#, python-format
-msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up"
-msgstr ""
-
-#: swift/common/manager.py:506
-#, python-format
-msgid "Unable to locate config number %(number)s for %(server)s"
-msgstr ""
-
-#: swift/common/manager.py:510
-#, python-format
-msgid "Unable to locate config for %s"
-msgstr ""
-
-#: swift/common/manager.py:513
-msgid "Found configs:"
-msgstr ""
-
-#: swift/common/manager.py:560
-#, python-format
-msgid "Removing pid file %s with invalid pid"
-msgstr ""
-
-#: swift/common/manager.py:571
-#, python-format
-msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d"
-msgstr ""
-
-#: swift/common/manager.py:578
-#, python-format
-msgid "Removing stale pid file %s"
-msgstr ""
-
-#: swift/common/manager.py:581
-#, python-format
-msgid "No permission to signal PID %d"
-msgstr ""
-
-#: swift/common/manager.py:626
-#, python-format
-msgid "%(server)s #%(number)d not running (%(conf)s)"
-msgstr ""
-
-#: swift/common/manager.py:634 swift/common/manager.py:728
-#, python-format
-msgid "%(server)s running (%(pid)s - %(conf)s)"
-msgstr ""
-
-#: swift/common/manager.py:732
-#, python-format
-msgid "%(server)s running (%(pid)s - %(pid_file)s)"
-msgstr ""
-
-#: swift/common/manager.py:737
-#, python-format
-msgid "%s already started..."
-msgstr ""
-
-#: swift/common/manager.py:746
-#, python-format
-msgid "Running %s once"
-msgstr ""
-
-#: swift/common/manager.py:748
-#, python-format
-msgid "Starting %s"
-msgstr ""
-
-#: swift/common/manager.py:755
-#, python-format
-msgid "%s does not exist"
-msgstr ""
-
-#: swift/common/memcached.py:166
-#, python-format
-msgid "Timeout %(action)s to memcached: %(server)s"
-msgstr ""
-
-#: swift/common/memcached.py:169
-#, python-format
-msgid "Error %(action)s to memcached: %(server)s"
-msgstr ""
-
-#: swift/common/memcached.py:194
-#, python-format
-msgid "Error limiting server %s"
-msgstr ""
-
-#: swift/common/request_helpers.py:109
-#, python-format
-msgid "No policy with index %s"
-msgstr ""
-
-#: swift/common/request_helpers.py:456
-msgid "ERROR: An error occurred while retrieving segments"
-msgstr ""
-
-#: swift/common/utils.py:397
-#, python-format
-msgid "Unable to locate %s in libc. Leaving as a no-op."
-msgstr ""
-
-#: swift/common/utils.py:591
-msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
-msgstr ""
-
-#: swift/common/utils.py:675
-#, python-format
-msgid "Unable to perform fsync() on directory %(dir)s: %(err)s"
-msgstr ""
-
-#: swift/common/utils.py:1271
-#, python-format
-msgid "%s: Connection reset by peer"
-msgstr ""
-
-#: swift/common/utils.py:1273 swift/common/utils.py:1284
-#, python-format
-msgid "%(type)s: %(value)s"
-msgstr ""
-
-#: swift/common/utils.py:1536
-msgid "Connection refused"
-msgstr ""
-
-#: swift/common/utils.py:1538
-msgid "Host unreachable"
-msgstr ""
-
-#: swift/common/utils.py:1540
-msgid "Connection timeout"
-msgstr ""
-
-#: swift/common/utils.py:1818
-msgid "UNCAUGHT EXCEPTION"
-msgstr ""
-
-#: swift/common/utils.py:1873
-msgid "Error: missing config path argument"
-msgstr ""
-
-#: swift/common/utils.py:1878
-#, python-format
-msgid "Error: unable to locate %s"
-msgstr ""
-
-#: swift/common/utils.py:2250
-#, python-format
-msgid "Unable to read config from %s"
-msgstr ""
-
-#: swift/common/utils.py:2256
-#, python-format
-msgid "Unable to find %(section)s config section in %(conf)s"
-msgstr ""
-
-#: swift/common/utils.py:2641
-#, python-format
-msgid "Invalid X-Container-Sync-To format %r"
-msgstr ""
-
-#: swift/common/utils.py:2646
-#, python-format
-msgid "No realm key for %r"
-msgstr ""
-
-#: swift/common/utils.py:2650
-#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr ""
-
-#: swift/common/utils.py:2659
-#, python-format
-msgid ""
-"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
-"\"https\"."
-msgstr ""
-
-#: swift/common/utils.py:2663
-msgid "Path required in X-Container-Sync-To"
-msgstr ""
-
-#: swift/common/utils.py:2666
-msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
-msgstr ""
-
-#: swift/common/utils.py:2671
-#, python-format
-msgid "Invalid host %r in X-Container-Sync-To"
-msgstr ""
-
-#: swift/common/utils.py:2865
-msgid "Exception dumping recon cache"
-msgstr ""
-
-#: swift/common/wsgi.py:199
-#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr ""
-
-#: swift/common/wsgi.py:209
-msgid ""
-"WARNING: SSL should only be enabled for testing purposes. Use external "
-"SSL termination for a production deployment."
-msgstr ""
-
-#: swift/common/middleware/catch_errors.py:43
-msgid "Error: An error occurred"
-msgstr ""
-
-#: swift/common/middleware/cname_lookup.py:146
-#, python-format
-msgid "Mapped %(given_domain)s to %(found_domain)s"
-msgstr ""
-
-#: swift/common/middleware/cname_lookup.py:158
-#, python-format
-msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
-msgstr ""
-
-#: swift/common/middleware/ratelimit.py:248
-#, python-format
-msgid "Returning 497 because of blacklisting: %s"
-msgstr ""
-
-#: swift/common/middleware/ratelimit.py:263
-#, python-format
-msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s"
-msgstr ""
-
-#: swift/common/middleware/ratelimit.py:271
-#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-
-#: swift/common/middleware/ratelimit.py:293
-msgid "Warning: Cannot ratelimit without a memcached client"
-msgstr ""
-
-#: swift/common/middleware/recon.py:85
-msgid "Error reading recon cache file"
-msgstr ""
-
-#: swift/common/middleware/recon.py:87
-msgid "Error parsing recon cache file"
-msgstr ""
-
-#: swift/common/middleware/recon.py:89
-msgid "Error retrieving recon data"
-msgstr ""
-
-#: swift/common/middleware/recon.py:163
-msgid "Error listing devices"
-msgstr ""
-
-#: swift/common/middleware/recon.py:265
-msgid "Error reading ringfile"
-msgstr ""
-
-#: swift/common/middleware/recon.py:279
-msgid "Error reading swift.conf"
-msgstr ""
-
-#: swift/common/middleware/xprofile.py:226
-#, python-format
-msgid "Error on render profiling results: %s"
-msgstr ""
-
-#: swift/common/middleware/x_profile/exceptions.py:25
-#, python-format
-msgid "Profiling Error: %s"
-msgstr ""
-
-#: swift/common/middleware/x_profile/html_viewer.py:306
-#, python-format
-msgid "method %s is not allowed."
-msgstr ""
-
-#: swift/common/middleware/x_profile/html_viewer.py:317
-#, python-format
-msgid "Can not load profile data from %s."
-msgstr ""
-
-#: swift/common/middleware/x_profile/html_viewer.py:369
-#: swift/common/middleware/x_profile/html_viewer.py:399
-msgid "no log file found"
-msgstr ""
-
-#: swift/common/middleware/x_profile/html_viewer.py:392
-#, python-format
-msgid "Data download error: %s"
-msgstr ""
-
-#: swift/common/middleware/x_profile/html_viewer.py:397
-msgid "python-matplotlib not installed."
-msgstr ""
-
-#: swift/common/middleware/x_profile/html_viewer.py:433
-#, python-format
-msgid "plotting results failed due to %s"
-msgstr ""
-
-#: swift/common/middleware/x_profile/html_viewer.py:444
-msgid "The file type are forbidden to access!"
-msgstr ""
-
-#: swift/common/middleware/x_profile/html_viewer.py:465
-#, python-format
-msgid "Can not access the file %s."
-msgstr ""
-
-#: swift/common/middleware/x_profile/profile_model.py:128
-msgid "odfpy not installed."
-msgstr ""
-
-#: swift/container/auditor.py:58
-#, python-format
-msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr ""
-
-#: swift/container/auditor.py:80
-msgid "Begin container audit pass."
-msgstr ""
-
-#: swift/container/auditor.py:91
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr ""
-
-#: swift/container/auditor.py:97
-msgid "Begin container audit \"once\" mode"
-msgstr ""
-
-#: swift/container/auditor.py:102
-#, python-format
-msgid "Container audit \"once\" mode completed: %.02fs"
-msgstr ""
-
-#: swift/container/auditor.py:123
-#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr ""
-
-#: swift/container/server.py:186
-#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-
-#: swift/container/server.py:231
-#, python-format
-msgid ""
-"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): Response %(status)s %(reason)s"
-msgstr ""
-
-#: swift/container/server.py:240
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr ""
-
-#: swift/container/sync.py:218
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default"
-" configuration, See internal-client.conf-sample for options"
-msgstr ""
-
-#: swift/container/sync.py:231
-#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr ""
-
-#: swift/container/sync.py:253
-msgid "Begin container sync \"once\" mode"
-msgstr ""
-
-#: swift/container/sync.py:262
-#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr ""
-
-#: swift/container/sync.py:270
-#, python-format
-msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], "
-"%(skip)s skipped, %(fail)s failed"
-msgstr ""
-
-#: swift/container/sync.py:336
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr ""
-
-#: swift/container/sync.py:390
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr ""
-
-#: swift/container/sync.py:539
-#, python-format
-msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
-msgstr ""
-
-#: swift/container/sync.py:564
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr ""
-
-#: swift/container/sync.py:570
-#, python-format
-msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr ""
-
-#: swift/container/sync.py:577 swift/container/sync.py:584
-#, python-format
-msgid "ERROR Syncing %(db_file)s %(row)s"
-msgstr ""
-
-#: swift/container/updater.py:78
-#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr ""
-
-#: swift/container/updater.py:92 swift/obj/reconstructor.py:822
-#: swift/obj/replicator.py:601 swift/obj/replicator.py:718
-#, python-format
-msgid "%s is not mounted"
-msgstr ""
-
-#: swift/container/updater.py:111
-#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr ""
-
-#: swift/container/updater.py:121
-msgid "Begin container update sweep"
-msgstr ""
-
-#: swift/container/updater.py:156
-#, python-format
-msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-
-#: swift/container/updater.py:170
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr ""
-
-#: swift/container/updater.py:182
-msgid "Begin container update single threaded sweep"
-msgstr ""
-
-#: swift/container/updater.py:190
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-
-#: swift/container/updater.py:245
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr ""
-
-#: swift/container/updater.py:254
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr ""
-
-#: swift/container/updater.py:296
-#, python-format
-msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later): "
-msgstr ""
-
-#: swift/obj/auditor.py:104
-#, python-format
-msgid " - parallel, %s"
-msgstr ""
-
-#: swift/obj/auditor.py:106
-#, python-format
-msgid " - %s"
-msgstr ""
-
-#: swift/obj/auditor.py:107
-#, python-format
-msgid "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)"
-msgstr ""
-
-#: swift/obj/auditor.py:138
-#, python-format
-msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d "
-"passed, %(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f,"
-" bytes/sec: %(brate).2f, Total time: %(total).2f, Auditing time: "
-"%(audit).2f, Rate: %(audit_rate).2f"
-msgstr ""
-
-#: swift/obj/auditor.py:172
-#, python-format
-msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. "
-"Total quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, "
-"Rate: %(audit_rate).2f"
-msgstr ""
-
-#: swift/obj/auditor.py:187
-#, python-format
-msgid "Object audit stats: %s"
-msgstr ""
-
-#: swift/obj/auditor.py:218
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr ""
-
-#: swift/obj/auditor.py:258
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr ""
-
-#: swift/obj/auditor.py:319
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr ""
-
-#: swift/obj/auditor.py:397 swift/obj/auditor.py:418
-#, python-format
-msgid "ERROR auditing: %s"
-msgstr ""
-
-#: swift/obj/diskfile.py:370
-#, python-format
-msgid "Skipping %(dir)s: %(err)s"
-msgstr ""
-
-#: swift/obj/diskfile.py:380 swift/obj/updater.py:163
-#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr ""
-
-#: swift/obj/diskfile.py:422
-#, python-format
-msgid "Cannot read %(auditor_status)s (%(err)s)"
-msgstr ""
-
-#: swift/obj/diskfile.py:428
-#, python-format
-msgid "Loading JSON from %(auditor_status)s failed (%(err)s)"
-msgstr ""
-
-#: swift/obj/diskfile.py:444
-#, python-format
-msgid "Cannot write %(auditor_status)s (%(err)s)"
-msgstr ""
-
-#: swift/obj/diskfile.py:918
-#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-
-#: swift/obj/diskfile.py:1038
-msgid "Error hashing suffix"
-msgstr ""
-
-#: swift/obj/diskfile.py:1202
-#, python-format
-msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr ""
-
-#: swift/obj/diskfile.py:1455
-#, python-format
-msgid "Problem cleaning up %s"
-msgstr ""
-
-#: swift/obj/diskfile.py:1800
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr ""
-
-#: swift/obj/diskfile.py:2128
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata "
-"%(meta)s"
-msgstr ""
-
-#: swift/obj/diskfile.py:2536
-#, python-format
-msgid "No space left on device for %(file)s (%(err)s)"
-msgstr ""
-
-#: swift/obj/diskfile.py:2545
-#, python-format
-msgid "Problem cleaning up %(datadir)s (%(err)s)"
-msgstr ""
-
-#: swift/obj/diskfile.py:2548
-#, python-format
-msgid "Problem writing durable state file %(file)s (%(err)s)"
-msgstr ""
-
-#: swift/obj/expirer.py:80
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr ""
-
-#: swift/obj/expirer.py:87
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr ""
-
-#: swift/obj/expirer.py:171
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr ""
-
-#: swift/obj/expirer.py:197
-#, python-format
-msgid "Exception while deleting container %(container)s %(err)s"
-msgstr ""
-
-#: swift/obj/expirer.py:203 swift/obj/expirer.py:220
-msgid "Unhandled exception"
-msgstr ""
-
-#: swift/obj/expirer.py:270
-#, python-format
-msgid "Exception while deleting object %(container)s %(obj)s %(err)s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:213 swift/obj/reconstructor.py:499
-#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:221
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:328
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:355
-#, python-format
-msgid ""
-"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of "
-"%(device)d/%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-
-#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:522
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
-"synced"
-msgstr ""
-
-#: swift/obj/reconstructor.py:383 swift/obj/replicator.py:529
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr ""
-
-#: swift/obj/reconstructor.py:391
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr ""
-
-#: swift/obj/reconstructor.py:420 swift/obj/replicator.py:566
-msgid "Lockup detected.. killing live coros."
-msgstr ""
-
-#: swift/obj/reconstructor.py:467
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:492
-#, python-format
-msgid "%s responded as unmounted"
-msgstr ""
-
-#: swift/obj/reconstructor.py:893 swift/obj/replicator.py:372
-#, python-format
-msgid "Removing partition: %s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:909
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr ""
-
-#: swift/obj/reconstructor.py:928
-msgid "Exception in top-levelreconstruction loop"
-msgstr ""
-
-#: swift/obj/reconstructor.py:938
-msgid "Running object reconstructor in script mode."
-msgstr ""
-
-#: swift/obj/reconstructor.py:947
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr ""
-
-#: swift/obj/reconstructor.py:954
-msgid "Starting object reconstructor in daemon mode."
-msgstr ""
-
-#: swift/obj/reconstructor.py:958
-msgid "Starting object reconstruction pass."
-msgstr ""
-
-#: swift/obj/reconstructor.py:963
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr ""
-
-#: swift/obj/replicator.py:185
-#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr ""
-
-#: swift/obj/replicator.py:199
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr ""
-
-#: swift/obj/replicator.py:206 swift/obj/replicator.py:210
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr ""
-
-#: swift/obj/replicator.py:338
-#, python-format
-msgid "Removing %s objects"
-msgstr ""
-
-#: swift/obj/replicator.py:359
-msgid "Error syncing handoff partition"
-msgstr ""
-
-#: swift/obj/replicator.py:437
-#, python-format
-msgid "%(ip)s/%(device)s responded as unmounted"
-msgstr ""
-
-#: swift/obj/replicator.py:444
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr ""
-
-#: swift/obj/replicator.py:488
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr ""
-
-#: swift/obj/replicator.py:493
-msgid "Error syncing partition"
-msgstr ""
-
-#: swift/obj/replicator.py:508
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-
-#: swift/obj/replicator.py:537
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr ""
-
-#: swift/obj/replicator.py:724
-msgid ""
-"Handoffs first mode still has handoffs remaining. Aborting current "
-"replication pass."
-msgstr ""
-
-#: swift/obj/replicator.py:730
-msgid "Ring change detected. Aborting current replication pass."
-msgstr ""
-
-#: swift/obj/replicator.py:758
-msgid "Exception in top-level replication loop"
-msgstr ""
-
-#: swift/obj/replicator.py:768
-msgid "Running object replicator in script mode."
-msgstr ""
-
-#: swift/obj/replicator.py:786
-#, python-format
-msgid "Object replication complete (once). (%.02f minutes)"
-msgstr ""
-
-#: swift/obj/replicator.py:797
-msgid "Starting object replicator in daemon mode."
-msgstr ""
-
-#: swift/obj/replicator.py:801
-msgid "Starting object replication pass."
-msgstr ""
-
-#: swift/obj/replicator.py:806
-#, python-format
-msgid "Object replication complete. (%.02f minutes)"
-msgstr ""
-
-#: swift/obj/server.py:241
-#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d"
-" response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-
-#: swift/obj/server.py:248
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for "
-"async update later)"
-msgstr ""
-
-#: swift/obj/server.py:284
-#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-
-#: swift/obj/updater.py:63
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr ""
-
-#: swift/obj/updater.py:78
-msgid "Begin object update sweep"
-msgstr ""
-
-#: swift/obj/updater.py:105
-#, python-format
-msgid ""
-"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s"
-" successes, %(fail)s failures"
-msgstr ""
-
-#: swift/obj/updater.py:114
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr ""
-
-#: swift/obj/updater.py:123
-msgid "Begin object update single threaded sweep"
-msgstr ""
-
-#: swift/obj/updater.py:137
-#, python-format
-msgid ""
-"Object update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures"
-msgstr ""
-
-#: swift/obj/updater.py:181
-#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr ""
-
-#: swift/obj/updater.py:211
-#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr ""
-
-#: swift/obj/updater.py:276
-#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr ""
-
-#: swift/proxy/server.py:418
-msgid "ERROR Unhandled exception in request"
-msgstr ""
-
-#: swift/proxy/server.py:473
-#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr ""
-
-#: swift/proxy/server.py:490 swift/proxy/server.py:508
-#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr ""
-
-#: swift/proxy/server.py:531
-#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr ""
-
-#: swift/proxy/controllers/account.py:67
-msgid "Account"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:813 swift/proxy/controllers/base.py:852
-#: swift/proxy/controllers/base.py:948 swift/proxy/controllers/obj.py:340
-#: swift/proxy/controllers/obj.py:885 swift/proxy/controllers/obj.py:934
-#: swift/proxy/controllers/obj.py:948 swift/proxy/controllers/obj.py:1764
-#: swift/proxy/controllers/obj.py:2003 swift/proxy/controllers/obj.py:2166
-#: swift/proxy/controllers/obj.py:2400
-msgid "Object"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:814 swift/proxy/controllers/base.py:853
-msgid "Trying to read during GET (retrying)"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:949
-msgid "Trying to read during GET"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:953
-#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:958
-msgid "Client disconnected on read"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:960
-msgid "Trying to send to client"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:1002 swift/proxy/controllers/base.py:1441
-#, python-format
-msgid "Trying to %(method)s %(path)s"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:1041 swift/proxy/controllers/base.py:1429
-#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:925
-#: swift/proxy/controllers/obj.py:2158 swift/proxy/controllers/obj.py:2445
-msgid "ERROR Insufficient Storage"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:1044
-#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:1432
-#, python-format
-msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:1562
-#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr ""
-
-#: swift/proxy/controllers/container.py:100
-msgid "Container"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:341
-#, python-format
-msgid "Trying to get final status of PUT to %s"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:368 swift/proxy/controllers/obj.py:2450
-#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:579
-#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:592
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:929 swift/proxy/controllers/obj.py:2161
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:935 swift/proxy/controllers/obj.py:2167
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:949 swift/proxy/controllers/obj.py:1765
-#, python-format
-msgid "Trying to write to %s"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:1000 swift/proxy/controllers/obj.py:2332
-#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:1008 swift/proxy/controllers/obj.py:2338
-msgid "Client disconnected without sending last chunk"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2345
-msgid "ERROR Exception causing client disconnect"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:1017 swift/proxy/controllers/obj.py:2349
-#, python-format
-msgid "ERROR Exception transferring data to object servers %s"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:1023 swift/proxy/controllers/obj.py:2263
-msgid "Client disconnected without sending enough data"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:1069
-#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:1073 swift/proxy/controllers/obj.py:2309
-#: swift/proxy/controllers/obj.py:2534
-msgid "Object PUT"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:2302
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:2401
-#, python-format
-msgid "Trying to get %(status_type)s status of PUT to %(path)s"
-msgstr ""
-
diff --git a/swift/locale/tr_TR/LC_MESSAGES/swift.po b/swift/locale/tr_TR/LC_MESSAGES/swift.po
index 4e0582008..3b4b8abe1 100644
--- a/swift/locale/tr_TR/LC_MESSAGES/swift.po
+++ b/swift/locale/tr_TR/LC_MESSAGES/swift.po
@@ -7,9 +7,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -260,14 +260,6 @@ msgstr ""
"%(success)s başarılı, %(fail)s başarısız, %(no_change)s değişiklik yok"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "%s:%s'e bağlanılamadı, %s saniye beklendi"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "%r yüklenemedi: %s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "Veri indirme hatası: %s"
@@ -276,10 +268,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "Aygıtlar geçişi tamamlandı: %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "Dizin %r geçerli bir ilkeye eşleştirilmemiş (%s)"
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "HATA %(db_file)s: %(validate_sync_to_err)s"
@@ -308,14 +296,6 @@ msgstr ""
"denenecek): Yanıt %(status)s %(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"HATA Hesap güncelleme başarısız: istekte farklı sayıda istemci ve aygıt "
-"var: \"%s\" \"%s\""
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "HATA %(host)s dan kötü yanıt %(status)s"
@@ -332,14 +312,6 @@ msgstr ""
"kaydediliyor): %(ip)s:%(port)s/%(dev)s den %(status)d yanıtı"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr ""
-"HATA Kap güncelleme başarısız: istekte farklı sayıda istemci ve aygıt var: "
-"\"%s\" e karşı \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "HATA hesap bilgisi %s alınamadı"
@@ -489,10 +461,6 @@ msgid "Error hashing suffix"
msgstr "Sonek özetini çıkarmada hata"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "mtime_check_interval ile %r de hata: %s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "%s sunucusu sınırlandırılırken hata"
@@ -634,10 +602,6 @@ msgid "No %s running"
msgstr "Çalışan %s yok"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "%r %r için küme uç noktası yok"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "%d PID'ine sinyalleme izni yok"
@@ -750,18 +714,6 @@ msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
"Bölüm zamanları: azami %(max).4fs, asgari %(min).4fs, ortalama %(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "Geçiş başlıyor; %s olası kap; %s olası nesne"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "Geçiş %ds de tamamlandı; %d nesnenin süresi doldu"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "Şimdiye kadarki geçiş %ds; %d nesnenin süresi doldu"
-
msgid "Path required in X-Container-Sync-To"
msgstr "X-Container-Sync-To'de yol gerekli"
@@ -948,10 +900,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "YAKALANMAYAN Ä°STÄ°SNA"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr "Yapılandırmadan dahili istemci yüklenemedi: %r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "%s libc'de bulunamadı. No-op olarak çıkılıyor."
diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po
index a72ca8d57..b47427299 100644
--- a/swift/locale/zh_CN/LC_MESSAGES/swift.po
+++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po
@@ -7,9 +7,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -258,14 +258,6 @@ msgstr ""
"%(fail)s 失败, %(no_change)s 无更改"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "å°è¯•è¿‡%s秒åŽæ— æ³•æ†ç»‘%s:%s"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "无法下载%r: %s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "æ•°æ®ä¸‹è½½é”™è¯¯ï¼š%s"
@@ -274,10 +266,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "设备通过完æˆï¼š %.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "目录 %r 未映射至有效策略 (%s)"
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "错误 %(db_file)s: %(validate_sync_to_err)s"
@@ -306,12 +294,6 @@ msgstr ""
"%(status)s %(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr "出现错误 è´¦å·æ›´æ–°å¤±è´¥ï¼šæœ¬æœºæ•°é‡ä¸Žè®¾å¤‡æ•°é‡ä¸ç¬¦: \"%s\" vs \"%s\""
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "失败å“应错误%(status)sæ¥è‡ª%(host)s"
@@ -328,12 +310,6 @@ msgstr ""
"%(dev)s"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr "错误 容器更新失败:主机数é‡å’Œè®¾å¤‡æ•°é‡ä¸ç¬¦åˆè¯·æ±‚: \"%s\" vs \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "错误:无法获å–è´¦å·ä¿¡æ¯%s"
@@ -480,10 +456,6 @@ msgid "Error hashing suffix"
msgstr "执行HashingåŽç¼€æ—¶å‘生错误"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "%r中mtime_check_interval出现错误:%s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "æœåŠ¡å™¨å‡ºçŽ°é”™è¯¯%s "
@@ -628,10 +600,6 @@ msgid "No %s running"
msgstr "æ— %sè´¦å·è¿è¡Œ"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "%r %r的集群节点ä¸å­˜åœ¨"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "æ— æƒé™å‘é€ä¿¡å·PID%d"
@@ -753,18 +721,6 @@ msgstr "在X-Container-Sync-To中,å˜é‡ï¼ŒæŸ¥è¯¢å’Œç¢Žç‰‡ä¸è¢«å…许"
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "开始通过;%så¯èƒ½å®¹å™¨ï¼›%så¯èƒ½å¯¹è±¡"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "%ds通过完æˆ; %d对象过期"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "%dsç›®å‰é€šè¿‡ï¼›%d对象过期"
-
msgid "Path required in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中路径是必须的"
@@ -946,10 +902,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "未æ•èŽ·çš„异常"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr "无法从é…置装入内部客户机:%r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "无法查询到%s ä¿ç•™ä¸ºno-op"
diff --git a/swift/locale/zh_TW/LC_MESSAGES/swift.po b/swift/locale/zh_TW/LC_MESSAGES/swift.po
index b9f27fbe9..64e38b13b 100644
--- a/swift/locale/zh_TW/LC_MESSAGES/swift.po
+++ b/swift/locale/zh_TW/LC_MESSAGES/swift.po
@@ -6,9 +6,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.7.1.dev50\n"
+"Project-Id-Version: swift 2.7.1.dev169\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2016-04-17 21:20+0000\n"
+"POT-Creation-Date: 2016-06-02 04:58+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -258,14 +258,6 @@ msgstr ""
"功,%(fail)s 個失敗,%(no_change)s 個無變更"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "嘗試 %s 秒後ä»ç„¡æ³•é€£çµè‡³ %s:%s"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "無法載入 %r:%s"
-
-#, python-format
msgid "Data download error: %s"
msgstr "資料下載錯誤:%s"
@@ -274,10 +266,6 @@ msgid "Devices pass completed: %.02fs"
msgstr "è£ç½®é€šéŽå·²å®Œæˆï¼š%.02fs"
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr "目錄 %r 未å°æ˜ è‡³æœ‰æ•ˆçš„原則 (%s)"
-
-#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "錯誤:%(db_file)s:%(validate_sync_to_err)s"
@@ -306,12 +294,6 @@ msgstr ""
"%(status)s %(reason)s"
#, python-format
-msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr "錯誤:帳戶更新失敗:è¦æ±‚中的主機與è£ç½®æ•¸ç›®ä¸åŒï¼š\"%s\" å° \"%s\""
-
-#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "錯誤:來自 %(host)s 的回應 %(status)s ä¸ç•¶"
@@ -328,12 +310,6 @@ msgstr ""
"%(ip)s:%(port)s/%(dev)s)"
#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr "錯誤:儲存器更新失敗:è¦æ±‚中的主機與è£ç½®æ•¸ç›®ä¸åŒï¼š\"%s\" å° \"%s\""
-
-#, python-format
msgid "ERROR Could not get account info %s"
msgstr "錯誤:無法å–得帳戶資訊 %s"
@@ -481,10 +457,6 @@ msgid "Error hashing suffix"
msgstr "æ··åˆå­—尾時發生錯誤"
#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "在 mtime_check_interval 中,%r 發生錯誤:%s"
-
-#, python-format
msgid "Error limiting server %s"
msgstr "é™åˆ¶ä¼ºæœå™¨ %s 時發生錯誤"
@@ -629,10 +601,6 @@ msgid "No %s running"
msgstr "沒有 %s 在執行中"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "沒有 %r %r çš„å¢é›†ç«¯é»ž"
-
-#, python-format
msgid "No permission to signal PID %d"
msgstr "沒有信號 PID %d 的許å¯æ¬Š"
@@ -755,18 +723,6 @@ msgstr "X-Container-Sync-To 中ä¸å®¹è¨±åƒæ•¸ã€æŸ¥è©¢åŠç‰‡æ®µ"
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "分割å€æ™‚間:最大 %(max).4fsï¼Œæœ€å° %(min).4fs,中間 %(med).4fs"
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "通éŽæ­£åœ¨é–‹å§‹ï¼›%s 個å¯èƒ½å„²å­˜å™¨ï¼›%s 個å¯èƒ½ç‰©ä»¶"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "已完æˆé€šéŽ %ds 個;%d 個物件已éŽæœŸ"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "ç›®å‰ç‚ºæ­¢é€šéŽ %ds 個;%d 個物件éŽæœŸ"
-
msgid "Path required in X-Container-Sync-To"
msgstr "X-Container-Sync-To 中需è¦è·¯å¾‘"
@@ -951,10 +907,6 @@ msgid "UNCAUGHT EXCEPTION"
msgstr "未æ•æ‰çš„異常狀æ³"
#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr "無法從é…置載入內部用戶端:%r (%s)"
-
-#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "在 libc 中找ä¸åˆ° %s。ä¿ç•™ç‚º no-op。"
diff --git a/swift/obj/server.py b/swift/obj/server.py
index 7b025cdcf..0188ad00d 100644
--- a/swift/obj/server.py
+++ b/swift/obj/server.py
@@ -559,8 +559,8 @@ class ObjectController(BaseStorageServer):
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
- if (content_type_headers['Content-Type-Timestamp'] !=
- disk_file.data_timestamp):
+ if (content_type_headers['Content-Type-Timestamp']
+ != disk_file.data_timestamp):
# Current content-type is not from the datafile, but the datafile
# content-type may have a swift_bytes param that was appended by
# SLO and we must continue to send that with the container update.
@@ -583,15 +583,19 @@ class ObjectController(BaseStorageServer):
'x-meta-timestamp': metadata['X-Timestamp'],
'x-etag': orig_metadata['ETag']})
+ # Special cases for backwards compatibility.
+ # For EC policy, send X-Object-Sysmeta-Ec-Etag which is same as the
+ # X-Backend-Container-Update-Override-Etag value sent with the original
+ # PUT. Similarly send X-Object-Sysmeta-Ec-Content-Length which is the
+ # same as the X-Backend-Container-Update-Override-Size value. We have
+ # to send Etag and size with a POST container update because the
+ # original PUT container update may have failed or be in async_pending.
if 'X-Object-Sysmeta-Ec-Etag' in orig_metadata:
- # Special case for backwards compatibility.
- # For EC policy, send X-Object-Sysmeta-Ec-Etag which is same as the
- # X-Backend-Container-Update-Override-Etag value sent with the
- # original PUT. We have to send Etag (and size etc) with a POST
- # container update because the original PUT container update may
- # have failed or be in async_pending.
update_headers['X-Etag'] = orig_metadata[
'X-Object-Sysmeta-Ec-Etag']
+ if 'X-Object-Sysmeta-Ec-Content-Length' in orig_metadata:
+ update_headers['X-Size'] = orig_metadata[
+ 'X-Object-Sysmeta-Ec-Content-Length']
self._check_container_override(update_headers, orig_metadata)
diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py
index 2629ff765..6eefa4f25 100644
--- a/swift/proxy/controllers/obj.py
+++ b/swift/proxy/controllers/obj.py
@@ -1793,7 +1793,7 @@ def trailing_metadata(policy, client_obj_hasher,
fragment_archive_index):
return {
# etag and size values are being added twice here.
- # The container update header is used to update the container db
+ # The container override header is used to update the container db
# with these values as they represent the correct etag and size for
# the whole object and not just the FA.
# The object sysmeta headers will be saved on each FA of the object.
diff --git a/test/probe/common.py b/test/probe/common.py
index d792ccf9b..5bb4b5d54 100644
--- a/test/probe/common.py
+++ b/test/probe/common.py
@@ -58,18 +58,16 @@ def get_server_number(ipport, ipport2server):
return server, number
-def start_server(ipport, ipport2server, pids, check=True):
+def start_server(ipport, ipport2server):
server, number = get_server_number(ipport, ipport2server)
err = Manager([server]).start(number=number, wait=False)
if err:
raise Exception('unable to start %s' % (
server if not number else '%s%s' % (server, number)))
- if check:
- return check_server(ipport, ipport2server, pids)
- return None
+ return check_server(ipport, ipport2server)
-def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT):
+def check_server(ipport, ipport2server):
server = ipport2server[ipport]
if server[:-1] in ('account', 'container', 'object'):
if int(server[-1]) > 4:
@@ -79,7 +77,7 @@ def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT):
path += '/3'
elif server[:-1] == 'object':
path += '/3/4'
- try_until = time() + timeout
+ try_until = time() + CHECK_SERVER_TIMEOUT
while True:
try:
conn = HTTPConnection(*ipport)
@@ -95,11 +93,11 @@ def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT):
if time() > try_until:
print(err)
print('Giving up on %s:%s after %s seconds.' % (
- server, ipport, timeout))
+ server, ipport, CHECK_SERVER_TIMEOUT))
raise err
sleep(0.1)
else:
- try_until = time() + timeout
+ try_until = time() + CHECK_SERVER_TIMEOUT
while True:
try:
url, token = get_auth('http://%s:%d/auth/v1.0' % ipport,
@@ -116,7 +114,7 @@ def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT):
return None
-def kill_server(ipport, ipport2server, pids):
+def kill_server(ipport, ipport2server):
server, number = get_server_number(ipport, ipport2server)
err = Manager([server]).kill(number=number)
if err:
@@ -136,7 +134,7 @@ def kill_server(ipport, ipport2server, pids):
sleep(0.1)
-def kill_nonprimary_server(primary_nodes, ipport2server, pids):
+def kill_nonprimary_server(primary_nodes, ipport2server):
primary_ipports = [(n['ip'], n['port']) for n in primary_nodes]
for ipport, server in ipport2server.items():
if ipport in primary_ipports:
@@ -146,7 +144,7 @@ def kill_nonprimary_server(primary_nodes, ipport2server, pids):
raise Exception('Cannot figure out server type for %r' % primary_nodes)
for ipport, server in list(ipport2server.items()):
if server[:-1] == server_type and ipport not in primary_ipports:
- kill_server(ipport, ipport2server, pids)
+ kill_server(ipport, ipport2server)
return ipport
@@ -322,7 +320,6 @@ class ProbeTest(unittest.TestCase):
def setUp(self):
resetswift()
- self.pids = {}
try:
self.ipport2server = {}
self.configs = defaultdict(dict)
@@ -354,11 +351,11 @@ class ProbeTest(unittest.TestCase):
Manager(['main']).start(wait=False)
for ipport in self.ipport2server:
- check_server(ipport, self.ipport2server, self.pids)
+ check_server(ipport, self.ipport2server)
proxy_ipport = ('127.0.0.1', 8080)
self.ipport2server[proxy_ipport] = 'proxy'
self.url, self.token, self.account = check_server(
- proxy_ipport, self.ipport2server, self.pids)
+ proxy_ipport, self.ipport2server)
self.replicators = Manager(
['account-replicator', 'container-replicator',
'object-replicator'])
@@ -443,10 +440,13 @@ class ProbeTest(unittest.TestCase):
swift_dir = /etc/swift
[pipeline:main]
- pipeline = catch_errors cache proxy-server
+ pipeline = catch_errors cache copy proxy-server
[app:proxy-server]
use = egg:swift#proxy
+
+ [filter:copy]
+ use = egg:swift#copy
object_post_as_copy = %s
[filter:cache]
diff --git a/test/probe/test_account_failures.py b/test/probe/test_account_failures.py
index 9f7adc0a3..23292ed2f 100755
--- a/test/probe/test_account_failures.py
+++ b/test/probe/test_account_failures.py
@@ -97,9 +97,8 @@ class TestAccountFailures(ReplProbeTest):
self.assertTrue(found2)
apart, anodes = self.account_ring.get_nodes(self.account)
- kill_nonprimary_server(anodes, self.ipport2server, self.pids)
- kill_server((anodes[0]['ip'], anodes[0]['port']),
- self.ipport2server, self.pids)
+ kill_nonprimary_server(anodes, self.ipport2server)
+ kill_server((anodes[0]['ip'], anodes[0]['port']), self.ipport2server)
# Kill account servers excepting two of the primaries
# Delete container1
@@ -147,8 +146,7 @@ class TestAccountFailures(ReplProbeTest):
self.assertTrue(found2)
# Restart other primary account server
- start_server((anodes[0]['ip'], anodes[0]['port']),
- self.ipport2server, self.pids)
+ start_server((anodes[0]['ip'], anodes[0]['port']), self.ipport2server)
# Assert that server doesn't know about container1's deletion or the
# new container2/object2 yet
diff --git a/test/probe/test_container_failures.py b/test/probe/test_container_failures.py
index d8c132c53..7451833fa 100755
--- a/test/probe/test_container_failures.py
+++ b/test/probe/test_container_failures.py
@@ -49,16 +49,14 @@ class TestContainerFailures(ReplProbeTest):
client.put_container(self.url, self.token, container1)
# Kill container1 servers excepting two of the primaries
- kill_nonprimary_server(cnodes, self.ipport2server, self.pids)
- kill_server((cnodes[0]['ip'], cnodes[0]['port']),
- self.ipport2server, self.pids)
+ kill_nonprimary_server(cnodes, self.ipport2server)
+ kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
# Delete container1
client.delete_container(self.url, self.token, container1)
# Restart other container1 primary server
- start_server((cnodes[0]['ip'], cnodes[0]['port']),
- self.ipport2server, self.pids)
+ start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
# Create container1/object1 (allowed because at least server thinks the
# container exists)
@@ -89,12 +87,11 @@ class TestContainerFailures(ReplProbeTest):
client.put_container(self.url, self.token, container1)
# Kill container1 servers excepting one of the primaries
- cnp_ipport = kill_nonprimary_server(cnodes, self.ipport2server,
- self.pids)
+ cnp_ipport = kill_nonprimary_server(cnodes, self.ipport2server)
kill_server((cnodes[0]['ip'], cnodes[0]['port']),
- self.ipport2server, self.pids)
+ self.ipport2server)
kill_server((cnodes[1]['ip'], cnodes[1]['port']),
- self.ipport2server, self.pids)
+ self.ipport2server)
# Delete container1 directly to the one primary still up
direct_client.direct_delete_container(cnodes[2], cpart, self.account,
@@ -102,10 +99,10 @@ class TestContainerFailures(ReplProbeTest):
# Restart other container1 servers
start_server((cnodes[0]['ip'], cnodes[0]['port']),
- self.ipport2server, self.pids)
+ self.ipport2server)
start_server((cnodes[1]['ip'], cnodes[1]['port']),
- self.ipport2server, self.pids)
- start_server(cnp_ipport, self.ipport2server, self.pids)
+ self.ipport2server)
+ start_server(cnp_ipport, self.ipport2server)
# Get to a final state
self.get_to_final_state()
diff --git a/test/probe/test_container_merge_policy_index.py b/test/probe/test_container_merge_policy_index.py
index 829329a7e..1bd405a49 100644
--- a/test/probe/test_container_merge_policy_index.py
+++ b/test/probe/test_container_merge_policy_index.py
@@ -46,6 +46,24 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'container')
+ def _get_object_patiently(self, policy_index):
+ # use proxy to access object (bad container info might be cached...)
+ timeout = time.time() + TIMEOUT
+ while time.time() < timeout:
+ try:
+ return client.get_object(self.url, self.token,
+ self.container_name,
+ self.object_name)
+ except ClientException as err:
+ if err.http_status != HTTP_NOT_FOUND:
+ raise
+ time.sleep(1)
+ else:
+ self.fail('could not HEAD /%s/%s/%s/ from policy %s '
+ 'after %s seconds.' % (
+ self.account, self.container_name, self.object_name,
+ int(policy_index), TIMEOUT))
+
def test_merge_storage_policy_index(self):
# generic split brain
self.brain.stop_primary_half()
@@ -53,7 +71,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.brain.start_primary_half()
self.brain.stop_handoff_half()
self.brain.put_container()
- self.brain.put_object()
+ client.put_object(self.url, self.token, self.container_name,
+ self.object_name, contents='VERIFY',
+ headers={'x-object-meta-test': 'custom-meta'})
self.brain.start_handoff_half()
# make sure we have some manner of split brain
container_part, container_nodes = self.container_ring.get_nodes(
@@ -127,24 +147,10 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.fail('Found /%s/%s/%s in %s' % (
self.account, self.container_name, self.object_name,
orig_policy_index))
- # use proxy to access object (bad container info might be cached...)
- timeout = time.time() + TIMEOUT
- while time.time() < timeout:
- try:
- metadata = client.head_object(self.url, self.token,
- self.container_name,
- self.object_name)
- except ClientException as err:
- if err.http_status != HTTP_NOT_FOUND:
- raise
- time.sleep(1)
- else:
- break
- else:
- self.fail('could not HEAD /%s/%s/%s/ from policy %s '
- 'after %s seconds.' % (
- self.account, self.container_name, self.object_name,
- expected_policy_index, TIMEOUT))
+ # verify that the object data read by external client is correct
+ headers, data = self._get_object_patiently(expected_policy_index)
+ self.assertEqual('VERIFY', data)
+ self.assertEqual('custom-meta', headers['x-object-meta-test'])
def test_reconcile_delete(self):
# generic split brain
@@ -399,17 +405,19 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.assertEqual(2, len(old_container_node_ids))
# hopefully memcache still has the new policy cached
- self.brain.put_object()
+ client.put_object(self.url, self.token, self.container_name,
+ self.object_name, contents='VERIFY',
+ headers={'x-object-meta-test': 'custom-meta'})
# double-check object correctly written to new policy
conf_files = []
for server in Manager(['container-reconciler']).servers:
conf_files.extend(server.conf_files())
conf_file = conf_files[0]
- client = InternalClient(conf_file, 'probe-test', 3)
- client.get_object_metadata(
+ int_client = InternalClient(conf_file, 'probe-test', 3)
+ int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
- client.get_object_metadata(
+ int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
@@ -423,9 +431,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
tuple(server.once(number=n + 1) for n in old_container_node_ids)
# verify entry in the queue for the "misplaced" new_policy
- for container in client.iter_containers('.misplaced_objects'):
- for obj in client.iter_objects('.misplaced_objects',
- container['name']):
+ for container in int_client.iter_containers('.misplaced_objects'):
+ for obj in int_client.iter_objects('.misplaced_objects',
+ container['name']):
expected = '%d:/%s/%s/%s' % (new_policy, self.account,
self.container_name,
self.object_name)
@@ -434,12 +442,12 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
Manager(['container-reconciler']).once()
# verify object in old_policy
- client.get_object_metadata(
+ int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# verify object is *not* in new_policy
- client.get_object_metadata(
+ int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
@@ -447,10 +455,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.get_to_final_state()
# verify entry in the queue
- client = InternalClient(conf_file, 'probe-test', 3)
- for container in client.iter_containers('.misplaced_objects'):
- for obj in client.iter_objects('.misplaced_objects',
- container['name']):
+ for container in int_client.iter_containers('.misplaced_objects'):
+ for obj in int_client.iter_objects('.misplaced_objects',
+ container['name']):
expected = '%d:/%s/%s/%s' % (old_policy, self.account,
self.container_name,
self.object_name)
@@ -459,21 +466,26 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
Manager(['container-reconciler']).once()
# and now it flops back
- client.get_object_metadata(
+ int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
- client.get_object_metadata(
+ int_client.get_object_metadata(
self.account, self.container_name, self.object_name,
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# make sure the queue is settled
self.get_to_final_state()
- for container in client.iter_containers('.misplaced_objects'):
- for obj in client.iter_objects('.misplaced_objects',
- container['name']):
+ for container in int_client.iter_containers('.misplaced_objects'):
+ for obj in int_client.iter_objects('.misplaced_objects',
+ container['name']):
self.fail('Found unexpected object %r in the queue' % obj)
+ # verify that the object data read by external client is correct
+ headers, data = self._get_object_patiently(int(new_policy))
+ self.assertEqual('VERIFY', data)
+ self.assertEqual('custom-meta', headers['x-object-meta-test'])
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/probe/test_empty_device_handoff.py b/test/probe/test_empty_device_handoff.py
index c3138be05..e1f8ade50 100755
--- a/test/probe/test_empty_device_handoff.py
+++ b/test/probe/test_empty_device_handoff.py
@@ -58,8 +58,7 @@ class TestEmptyDevice(ReplProbeTest):
onode = onodes[0]
# Kill one container/obj primary server
- kill_server((onode['ip'], onode['port']),
- self.ipport2server, self.pids)
+ kill_server((onode['ip'], onode['port']), self.ipport2server)
# Delete the default data directory for objects on the primary server
obj_dir = '%s/%s' % (self._get_objects_dir(onode),
@@ -74,11 +73,17 @@ class TestEmptyDevice(ReplProbeTest):
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
+ # Stash the on disk data from a primary for future comparison with the
+ # handoff - this may not equal 'VERIFY' if for example the proxy has
+ # crypto enabled
+ direct_get_data = direct_client.direct_get_object(
+ onodes[1], opart, self.account, container, obj, headers={
+ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
+
# Kill other two container/obj primary servers
# to ensure GET handoff works
for node in onodes[1:]:
- kill_server((node['ip'], node['port']),
- self.ipport2server, self.pids)
+ kill_server((node['ip'], node['port']), self.ipport2server)
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
@@ -87,8 +92,7 @@ class TestEmptyDevice(ReplProbeTest):
'returned: %s' % repr(odata))
# Restart those other two container/obj primary servers
for node in onodes[1:]:
- start_server((node['ip'], node['port']),
- self.ipport2server, self.pids)
+ start_server((node['ip'], node['port']), self.ipport2server)
self.assertFalse(os.path.exists(obj_dir))
# We've indirectly verified the handoff node has the object, but
# let's directly verify it.
@@ -98,9 +102,7 @@ class TestEmptyDevice(ReplProbeTest):
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj,
headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
- if odata != 'VERIFY':
- raise Exception('Direct object GET did not return VERIFY, instead '
- 'it returned: %s' % repr(odata))
+ self.assertEqual(direct_get_data, odata)
# Assert container listing (via proxy and directly) has container/obj
objs = [o['name'] for o in
@@ -127,8 +129,7 @@ class TestEmptyDevice(ReplProbeTest):
missing)
# Bring the first container/obj primary server back up
- start_server((onode['ip'], onode['port']),
- self.ipport2server, self.pids)
+ start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert that it doesn't have container/obj yet
self.assertFalse(os.path.exists(obj_dir))
@@ -159,9 +160,7 @@ class TestEmptyDevice(ReplProbeTest):
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
- if odata != 'VERIFY':
- raise Exception('Direct object GET did not return VERIFY, instead '
- 'it returned: %s' % repr(odata))
+ self.assertEqual(direct_get_data, odata)
# Assert the handoff server no longer has container/obj
try:
diff --git a/test/probe/test_object_async_update.py b/test/probe/test_object_async_update.py
index 3553e2666..0738c933d 100755
--- a/test/probe/test_object_async_update.py
+++ b/test/probe/test_object_async_update.py
@@ -14,21 +14,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import os
-import shutil
-
from io import StringIO
-from tempfile import mkdtemp
-from textwrap import dedent
from unittest import main
from uuid import uuid4
from swiftclient import client
-from swift.common import direct_client, internal_client
+from swift.common import direct_client
from swift.common.manager import Manager
from test.probe.common import kill_nonprimary_server, \
- kill_server, ReplProbeTest, start_server
+ kill_server, ReplProbeTest, start_server, ECProbeTest
class TestObjectAsyncUpdate(ReplProbeTest):
@@ -41,17 +36,15 @@ class TestObjectAsyncUpdate(ReplProbeTest):
# Kill container servers excepting two of the primaries
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
- kill_nonprimary_server(cnodes, self.ipport2server, self.pids)
- kill_server((cnode['ip'], cnode['port']),
- self.ipport2server, self.pids)
+ kill_nonprimary_server(cnodes, self.ipport2server)
+ kill_server((cnode['ip'], cnode['port']), self.ipport2server)
# Create container/obj
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, '')
# Restart other primary server
- start_server((cnode['ip'], cnode['port']),
- self.ipport2server, self.pids)
+ start_server((cnode['ip'], cnode['port']), self.ipport2server)
# Assert it does not know about container/obj
self.assertFalse(direct_client.direct_get_container(
@@ -73,39 +66,9 @@ class TestUpdateOverrides(ReplProbeTest):
Verify that the update override headers take effect and override
values propagate to the container server.
"""
- def setUp(self):
- """
- Reset all environment and start all servers.
- """
- super(TestUpdateOverrides, self).setUp()
- self.tempdir = mkdtemp()
- conf_path = os.path.join(self.tempdir, 'internal_client.conf')
- conf_body = """
- [DEFAULT]
- swift_dir = /etc/swift
-
- [pipeline:main]
- pipeline = catch_errors cache proxy-server
-
- [app:proxy-server]
- use = egg:swift#proxy
-
- [filter:cache]
- use = egg:swift#memcache
-
- [filter:catch_errors]
- use = egg:swift#catch_errors
- """
- with open(conf_path, 'w') as f:
- f.write(dedent(conf_body))
- self.int_client = internal_client.InternalClient(conf_path, 'test', 1)
-
- def tearDown(self):
- super(TestUpdateOverrides, self).tearDown()
- shutil.rmtree(self.tempdir)
-
def _test_update_override_headers(self, override_headers):
# verify that update override headers are sent in container updates
+ int_client = self.make_internal_client()
container_name = 'c-%s' % uuid4()
obj_name = 'o-%s' % uuid4()
client.put_container(self.url, self.token, container_name,
@@ -113,24 +76,26 @@ class TestUpdateOverrides(ReplProbeTest):
self.policy.name})
override_headers['Content-Type'] = 'text/plain'
- self.int_client.upload_object(StringIO(u'stuff'), self.account,
- container_name, obj_name,
- override_headers)
+ int_client.upload_object(StringIO(u'stuff'), self.account,
+ container_name, obj_name,
+ override_headers)
# Run the object-updaters to be sure updates are done
Manager(['object-updater']).once()
- meta = self.int_client.get_object_metadata(
+ meta = int_client.get_object_metadata(
self.account, container_name, obj_name)
self.assertEqual('text/plain', meta['content-type'])
self.assertEqual('c13d88cb4cb02003daedb8a84e5d272a', meta['etag'])
+ self.assertEqual('5', meta['content-length'])
- obj_iter = self.int_client.iter_objects(self.account, container_name)
+ obj_iter = int_client.iter_objects(self.account, container_name)
for obj in obj_iter:
if obj['name'] == obj_name:
self.assertEqual('override-etag', obj['hash'])
self.assertEqual('override-type', obj['content_type'])
+ self.assertEqual(1999, obj['bytes'])
break
else:
self.fail('Failed to find object %s in listing for %s' %
@@ -140,7 +105,8 @@ class TestUpdateOverrides(ReplProbeTest):
headers = {
'X-Object-Sysmeta-Container-Update-Override-Etag': 'override-etag',
'X-Object-Sysmeta-Container-Update-Override-Content-Type':
- 'override-type'
+ 'override-type',
+ 'X-Object-Sysmeta-Container-Update-Override-Size': '1999'
}
self._test_update_override_headers(headers)
@@ -148,9 +114,167 @@ class TestUpdateOverrides(ReplProbeTest):
# older proxies used these headers to override container update values
headers = {
'X-Backend-Container-Update-Override-Etag': 'override-etag',
- 'X-Backend-Container-Update-Override-Content-Type': 'override-type'
+ 'X-Backend-Container-Update-Override-Content-Type':
+ 'override-type',
+ 'X-Backend-Container-Update-Override-Size': '1999'
}
self._test_update_override_headers(headers)
+
+class TestUpdateOverridesEC(ECProbeTest):
+ # verify that the container update overrides used with EC policies make
+ # it to the container servers when container updates are sync or async
+ # and possibly re-ordered with respect to object PUT and POST requests.
+ def test_async_update_after_PUT(self):
+ cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
+ client.put_container(self.url, self.token, 'c1',
+ headers={'X-Storage-Policy':
+ self.policy.name})
+
+ # put an object while one container server is stopped so that we force
+ # an async update to it
+ kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
+ content = u'stuff'
+ client.put_object(self.url, self.token, 'c1', 'o1', contents=content)
+ meta = client.head_object(self.url, self.token, 'c1', 'o1')
+
+ # re-start the container server and assert that it does not yet know
+ # about the object
+ start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
+ self.assertFalse(direct_client.direct_get_container(
+ cnodes[0], cpart, self.account, 'c1')[1])
+
+ # Run the object-updaters to be sure updates are done
+ Manager(['object-updater']).once()
+
+ # check the re-started container server got same update as others.
+ # we cannot assert the actual etag value because it may be encrypted
+ listing_etags = set()
+ for cnode in cnodes:
+ obj = direct_client.direct_get_container(
+ cnode, cpart, self.account, 'c1')[1][0]
+ self.assertEqual(len(content), obj['bytes'])
+ listing_etags.add(obj['hash'])
+ self.assertEqual(1, len(listing_etags))
+
+ # check that listing meta returned to client is consistent with object
+ # meta returned to client
+ hdrs, listing = client.get_container(self.url, self.token, 'c1')
+ self.assertEqual(1, len(listing))
+ self.assertEqual('o1', listing[0]['name'])
+ self.assertEqual(len(content), listing[0]['bytes'])
+ self.assertEqual(meta['etag'], listing[0]['hash'])
+
+ def test_update_during_POST_only(self):
+ # verify correct update values when PUT update is missed but then a
+ # POST update succeeds *before* the PUT async pending update is sent
+ cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
+ client.put_container(self.url, self.token, 'c1',
+ headers={'X-Storage-Policy':
+ self.policy.name})
+
+ # put an object while one container server is stopped so that we force
+ # an async update to it
+ kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
+ content = u'stuff'
+ client.put_object(self.url, self.token, 'c1', 'o1', contents=content)
+ meta = client.head_object(self.url, self.token, 'c1', 'o1')
+
+ # re-start the container server and assert that it does not yet know
+ # about the object
+ start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
+ self.assertFalse(direct_client.direct_get_container(
+ cnodes[0], cpart, self.account, 'c1')[1])
+
+ # use internal client for POST so we can force fast-post mode
+ int_client = self.make_internal_client(object_post_as_copy=False)
+ int_client.set_object_metadata(
+ self.account, 'c1', 'o1', {'X-Object-Meta-Fruit': 'Tomato'})
+ self.assertEqual(
+ 'Tomato',
+ int_client.get_object_metadata(self.account, 'c1', 'o1')
+ ['x-object-meta-fruit']) # sanity
+
+ # check the re-started container server got same update as others.
+ # we cannot assert the actual etag value because it may be encrypted
+ listing_etags = set()
+ for cnode in cnodes:
+ obj = direct_client.direct_get_container(
+ cnode, cpart, self.account, 'c1')[1][0]
+ self.assertEqual(len(content), obj['bytes'])
+ listing_etags.add(obj['hash'])
+ self.assertEqual(1, len(listing_etags))
+
+ # check that listing meta returned to client is consistent with object
+ # meta returned to client
+ hdrs, listing = client.get_container(self.url, self.token, 'c1')
+ self.assertEqual(1, len(listing))
+ self.assertEqual('o1', listing[0]['name'])
+ self.assertEqual(len(content), listing[0]['bytes'])
+ self.assertEqual(meta['etag'], listing[0]['hash'])
+
+ # Run the object-updaters to send the async pending from the PUT
+ Manager(['object-updater']).once()
+
+ # check container listing metadata is still correct
+ for cnode in cnodes:
+ obj = direct_client.direct_get_container(
+ cnode, cpart, self.account, 'c1')[1][0]
+ self.assertEqual(len(content), obj['bytes'])
+ listing_etags.add(obj['hash'])
+ self.assertEqual(1, len(listing_etags))
+
+ def test_async_updates_after_PUT_and_POST(self):
+ # verify correct update values when PUT update and POST updates are
+ # missed but then async updates are sent
+ cpart, cnodes = self.container_ring.get_nodes(self.account, 'c1')
+ client.put_container(self.url, self.token, 'c1',
+ headers={'X-Storage-Policy':
+ self.policy.name})
+
+ # PUT and POST to object while one container server is stopped so that
+ # we force async updates to it
+ kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
+ content = u'stuff'
+ client.put_object(self.url, self.token, 'c1', 'o1', contents=content)
+ meta = client.head_object(self.url, self.token, 'c1', 'o1')
+
+ # use internal client for POST so we can force fast-post mode
+ int_client = self.make_internal_client(object_post_as_copy=False)
+ int_client.set_object_metadata(
+ self.account, 'c1', 'o1', {'X-Object-Meta-Fruit': 'Tomato'})
+ self.assertEqual(
+ 'Tomato',
+ int_client.get_object_metadata(self.account, 'c1', 'o1')
+ ['x-object-meta-fruit']) # sanity
+
+ # re-start the container server and assert that it does not yet know
+ # about the object
+ start_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server)
+ self.assertFalse(direct_client.direct_get_container(
+ cnodes[0], cpart, self.account, 'c1')[1])
+
+ # Run the object-updaters to send the async pendings
+ Manager(['object-updater']).once()
+
+ # check the re-started container server got same update as others.
+ # we cannot assert the actual etag value because it may be encrypted
+ listing_etags = set()
+ for cnode in cnodes:
+ obj = direct_client.direct_get_container(
+ cnode, cpart, self.account, 'c1')[1][0]
+ self.assertEqual(len(content), obj['bytes'])
+ listing_etags.add(obj['hash'])
+ self.assertEqual(1, len(listing_etags))
+
+ # check that listing meta returned to client is consistent with object
+ # meta returned to client
+ hdrs, listing = client.get_container(self.url, self.token, 'c1')
+ self.assertEqual(1, len(listing))
+ self.assertEqual('o1', listing[0]['name'])
+ self.assertEqual(len(content), listing[0]['bytes'])
+ self.assertEqual(meta['etag'], listing[0]['hash'])
+
+
if __name__ == '__main__':
main()
diff --git a/test/probe/test_object_failures.py b/test/probe/test_object_failures.py
index ba5317774..1850b2750 100755
--- a/test/probe/test_object_failures.py
+++ b/test/probe/test_object_failures.py
@@ -77,6 +77,12 @@ class TestObjectFailures(ReplProbeTest):
obj = 'object-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'VERIFY')
+ # Stash the on disk data for future comparison - this may not equal
+ # 'VERIFY' if for example the proxy has crypto enabled
+ backend_data = direct_client.direct_get_object(
+ onode, opart, self.account, container, obj, headers={
+ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
+
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
@@ -84,7 +90,7 @@ class TestObjectFailures(ReplProbeTest):
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
- self.assertEqual(odata, 'VERIFY')
+ self.assertEqual(odata, backend_data)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
@@ -98,14 +104,19 @@ class TestObjectFailures(ReplProbeTest):
obj = 'object-range-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'RANGE')
+ # Stash the on disk data for future comparison - this may not equal
+ # 'VERIFY' if for example the proxy has crypto enabled
+ backend_data = direct_client.direct_get_object(
+ onode, opart, self.account, container, obj, headers={
+ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx}
- for header, result in [({'Range': 'bytes=0-2'}, 'RAN'),
- ({'Range': 'bytes=1-11'}, 'ANGE'),
- ({'Range': 'bytes=0-11'}, 'RANGE')]:
+ for header, result in [({'Range': 'bytes=0-2'}, backend_data[0:3]),
+ ({'Range': 'bytes=1-11'}, backend_data[1:]),
+ ({'Range': 'bytes=0-11'}, backend_data)]:
req_headers = base_headers.copy()
req_headers.update(header)
odata = direct_client.direct_get_object(
diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py
index a360021b7..ca0b3d0e0 100755
--- a/test/probe/test_object_handoff.py
+++ b/test/probe/test_object_handoff.py
@@ -46,8 +46,7 @@ class TestObjectHandoff(ReplProbeTest):
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
- kill_server((onode['ip'], onode['port']),
- self.ipport2server, self.pids)
+ kill_server((onode['ip'], onode['port']), self.ipport2server)
# Create container/obj (goes to two primary servers and one handoff)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
@@ -56,11 +55,17 @@ class TestObjectHandoff(ReplProbeTest):
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
+ # Stash the on disk data from a primary for future comparison with the
+ # handoff - this may not equal 'VERIFY' if for example the proxy has
+ # crypto enabled
+ direct_get_data = direct_client.direct_get_object(
+ onodes[1], opart, self.account, container, obj, headers={
+ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
+
# Kill other two container/obj primary servers
# to ensure GET handoff works
for node in onodes[1:]:
- kill_server((node['ip'], node['port']),
- self.ipport2server, self.pids)
+ kill_server((node['ip'], node['port']), self.ipport2server)
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
@@ -70,8 +75,7 @@ class TestObjectHandoff(ReplProbeTest):
# Restart those other two container/obj primary servers
for node in onodes[1:]:
- start_server((node['ip'], node['port']),
- self.ipport2server, self.pids)
+ start_server((node['ip'], node['port']), self.ipport2server)
# We've indirectly verified the handoff node has the container/object,
# but let's directly verify it.
@@ -79,9 +83,7 @@ class TestObjectHandoff(ReplProbeTest):
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
- if odata != 'VERIFY':
- raise Exception('Direct object GET did not return VERIFY, instead '
- 'it returned: %s' % repr(odata))
+ self.assertEqual(direct_get_data, odata)
# drop a tempfile in the handoff's datadir, like it might have
# had if there was an rsync failure while it was previously a
@@ -114,8 +116,7 @@ class TestObjectHandoff(ReplProbeTest):
(cnode['ip'], cnode['port']))
# Bring the first container/obj primary server back up
- start_server((onode['ip'], onode['port']),
- self.ipport2server, self.pids)
+ start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert that it doesn't have container/obj yet
try:
@@ -147,9 +148,7 @@ class TestObjectHandoff(ReplProbeTest):
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
- if odata != 'VERIFY':
- raise Exception('Direct object GET did not return VERIFY, instead '
- 'it returned: %s' % repr(odata))
+ self.assertEqual(direct_get_data, odata)
# and that it does *not* have a temporary rsync dropping!
found_data_filename = False
@@ -177,8 +176,7 @@ class TestObjectHandoff(ReplProbeTest):
# Kill the first container/obj primary server again (we have two
# primaries and the handoff up now)
- kill_server((onode['ip'], onode['port']),
- self.ipport2server, self.pids)
+ kill_server((onode['ip'], onode['port']), self.ipport2server)
# Delete container/obj
try:
@@ -215,8 +213,7 @@ class TestObjectHandoff(ReplProbeTest):
(cnode['ip'], cnode['port']))
# Restart the first container/obj primary server again
- start_server((onode['ip'], onode['port']),
- self.ipport2server, self.pids)
+ start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert it still has container/obj
direct_client.direct_get_object(
@@ -279,6 +276,14 @@ class TestECObjectHandoffOverwrite(ECProbeTest):
# shutdown one of the primary data nodes
failed_primary = random.choice(onodes)
failed_primary_device_path = self.device_dir('object', failed_primary)
+ # first read its ec etag value for future reference - this may not
+ # equal old_contents.etag if for example the proxy has crypto enabled
+ req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
+ headers = direct_client.direct_head_object(
+ failed_primary, opart, self.account, container_name,
+ object_name, headers=req_headers)
+ old_backend_etag = headers['X-Object-Sysmeta-EC-Etag']
+
self.kill_drive(failed_primary_device_path)
# overwrite our object with some new data
@@ -296,13 +301,18 @@ class TestECObjectHandoffOverwrite(ECProbeTest):
failed_primary, opart, self.account, container_name,
object_name, headers=req_headers)
self.assertEqual(headers['X-Object-Sysmeta-EC-Etag'],
- old_contents.etag)
+ old_backend_etag)
# we have 1 primary with wrong old etag, and we should have 5 with
# new etag plus a handoff with the new etag, so killing 2 other
# primaries forces proxy to try to GET from all primaries plus handoff.
other_nodes = [n for n in onodes if n != failed_primary]
random.shuffle(other_nodes)
+ # grab the value of the new content's ec etag for future reference
+ headers = direct_client.direct_head_object(
+ other_nodes[0], opart, self.account, container_name,
+ object_name, headers=req_headers)
+ new_backend_etag = headers['X-Object-Sysmeta-EC-Etag']
for node in other_nodes[:2]:
self.kill_drive(self.device_dir('object', node))
@@ -320,8 +330,8 @@ class TestECObjectHandoffOverwrite(ECProbeTest):
continue
found_frags[headers['X-Object-Sysmeta-EC-Etag']] += 1
self.assertEqual(found_frags, {
- new_contents.etag: 4, # this should be enough to rebuild!
- old_contents.etag: 1,
+ new_backend_etag: 4, # this should be enough to rebuild!
+ old_backend_etag: 1,
})
# clear node error limiting
diff --git a/test/unit/common/middleware/crypto_helpers.py b/test/unit/common/middleware/crypto_helpers.py
index 72868e71f..5b6136fe3 100644
--- a/test/unit/common/middleware/crypto_helpers.py
+++ b/test/unit/common/middleware/crypto_helpers.py
@@ -21,7 +21,8 @@ from swift.common.middleware.crypto_utils import Crypto
def fetch_crypto_keys():
return {'account': 'This is an account key 012345678',
'container': 'This is a container key 01234567',
- 'object': 'This is an object key 0123456789'}
+ 'object': 'This is an object key 0123456789',
+ 'id': {'v': 'fake', 'path': '/a/c/fake'}}
def md5hex(s):
diff --git a/test/unit/common/middleware/test_decrypter.py b/test/unit/common/middleware/test_decrypter.py
index 5b987980d..f405c2f1d 100644
--- a/test/unit/common/middleware/test_decrypter.py
+++ b/test/unit/common/middleware/test_decrypter.py
@@ -528,8 +528,6 @@ class TestDecrypterObjectRequests(unittest.TestCase):
resp = req.get_response(self.decrypter)
self.assertEqual('3456789a', resp.body)
self.assertEqual('200 OK', resp.status)
- # TODO - how do we validate the range body if etag is for whole? Is
- # the test actually faking the correct Etag in response?
self.assertEqual(plaintext_etag, resp.headers['Etag'])
self.assertEqual('text/plain', resp.headers['Content-Type'])
diff --git a/test/unit/common/middleware/test_encrypter.py b/test/unit/common/middleware/test_encrypter.py
index e0a273282..48ebba741 100644
--- a/test/unit/common/middleware/test_encrypter.py
+++ b/test/unit/common/middleware/test_encrypter.py
@@ -84,6 +84,7 @@ class TestEncrypter(unittest.TestCase):
base64.b64decode(actual['body_key']['key']))
self.assertEqual(FAKE_IV,
base64.b64decode(actual['body_key']['iv']))
+ self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
# verify etag
self.assertEqual(ciphertext_etag, req_hdrs['Etag'])
@@ -119,6 +120,7 @@ class TestEncrypter(unittest.TestCase):
actual = json.loads(urllib.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().get_cipher(), actual['cipher'])
self.assertEqual(etag_iv, base64.b64decode(actual['iv']))
+ self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
# content-type is not encrypted
self.assertEqual('text/plain', req_hdrs['Content-Type'])
@@ -139,6 +141,7 @@ class TestEncrypter(unittest.TestCase):
actual = json.loads(urllib.unquote_plus(actual))
self.assertEqual(Crypto().get_cipher(), actual['cipher'])
self.assertEqual(FAKE_IV, base64.b64decode(actual['iv']))
+ self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
# sysmeta is not encrypted
self.assertEqual('do not encrypt me',
@@ -300,6 +303,7 @@ class TestEncrypter(unittest.TestCase):
base64.b64decode(actual['body_key']['key']))
self.assertEqual(FAKE_IV,
base64.b64decode(actual['body_key']['iv']))
+ self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
def test_PUT_with_etag_override_in_headers(self):
# verify handling of another middleware's
@@ -345,6 +349,7 @@ class TestEncrypter(unittest.TestCase):
actual = json.loads(urllib.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().get_cipher(), actual['cipher'])
self.assertEqual(etag_iv, base64.b64decode(actual['iv']))
+ self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
def test_PUT_with_bad_etag_in_other_footers(self):
# verify that etag supplied in footers from other middleware overrides
@@ -471,6 +476,7 @@ class TestEncrypter(unittest.TestCase):
actual = json.loads(urllib.unquote_plus(actual))
self.assertEqual(Crypto().get_cipher(), actual['cipher'])
self.assertEqual(FAKE_IV, base64.b64decode(actual['iv']))
+ self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
# sysmeta is not encrypted
self.assertEqual('do not encrypt me',
diff --git a/test/unit/common/middleware/test_encrypter_decrypter.py b/test/unit/common/middleware/test_encrypter_decrypter.py
index 64b431c6e..9f7e0a6f2 100644
--- a/test/unit/common/middleware/test_encrypter_decrypter.py
+++ b/test/unit/common/middleware/test_encrypter_decrypter.py
@@ -260,6 +260,15 @@ class TestCryptoPipelineChanges(unittest.TestCase):
self._check_match_requests('HEAD', self.crypto_app)
self._check_listing(self.crypto_app)
+ def test_put_without_crypto_post_with_crypto_read_with_crypto(self):
+ self._create_container(self.proxy_app, policy_name='one')
+ self._put_object(self.proxy_app, self.plaintext)
+ self._post_object(self.crypto_app)
+ self._check_GET_and_HEAD(self.crypto_app)
+ self._check_match_requests('GET', self.crypto_app)
+ self._check_match_requests('HEAD', self.crypto_app)
+ self._check_listing(self.crypto_app)
+
def test_write_without_crypto_read_with_crypto(self):
self._create_container(self.proxy_app, policy_name='one')
self._put_object(self.proxy_app, self.plaintext)
diff --git a/test/unit/common/middleware/test_keymaster.py b/test/unit/common/middleware/test_keymaster.py
index c7da6de7d..3c7a4f3a6 100644
--- a/test/unit/common/middleware/test_keymaster.py
+++ b/test/unit/common/middleware/test_keymaster.py
@@ -39,19 +39,19 @@ class TestKeymaster(unittest.TestCase):
def setUp(self):
super(TestKeymaster, self).setUp()
self.swift = FakeSwift()
+ self.app = keymaster.KeyMaster(self.swift, TEST_KEYMASTER_CONF)
def test_object_path(self):
self.verify_keys_for_path(
- '/v1/a/c/o', expected_keys=('object', 'container'),
+ '/a/c/o', expected_keys=('object', 'container'),
key_id=base64.b64encode('/a/c/o'))
def test_container_path(self):
self.verify_keys_for_path(
- '/v1/a/c', expected_keys=('container',))
+ '/a/c', expected_keys=('container',))
def verify_keys_for_path(self, path, expected_keys, key_id=None):
put_keys = None
- app = keymaster.KeyMaster(self.swift, TEST_KEYMASTER_CONF)
for method, resp_class, status in (
('PUT', swob.HTTPCreated, '201'),
('POST', swob.HTTPAccepted, '202'),
@@ -60,16 +60,22 @@ class TestKeymaster(unittest.TestCase):
resp_headers = {}
if key_id is not None:
resp_headers.update({'X-Object-Sysmeta-Crypto-Id': key_id})
- self.swift.register(method, path, resp_class, resp_headers, '')
- req = Request.blank(path, environ={'REQUEST_METHOD': method})
+ self.swift.register(
+ method, '/v1' + path, resp_class, resp_headers, '')
+ req = Request.blank(
+ '/v1' + path, environ={'REQUEST_METHOD': method})
start_response, calls = capture_start_response()
- app(req.environ, start_response)
+ self.app(req.environ, start_response)
self.assertEqual(1, len(calls))
self.assertTrue(calls[0][0].startswith(status))
self.assertNotIn('swift.crypto.override', req.environ)
self.assertIn(CRYPTO_KEY_CALLBACK, req.environ,
'%s not set in env' % CRYPTO_KEY_CALLBACK)
keys = req.environ.get(CRYPTO_KEY_CALLBACK)()
+ self.assertIn('id', keys)
+ id = keys.pop('id')
+ self.assertEqual(base64.b64encode(path), id['path'])
+ self.assertEqual('1', id['v'])
self.assertListEqual(sorted(expected_keys), sorted(keys.keys()),
'%s %s got keys %r, but expected %r'
% (method, path, keys.keys(), expected_keys))
@@ -85,7 +91,7 @@ class TestKeymaster(unittest.TestCase):
ref_path_parts = ('a1', 'c1', 'o1')
path = '/' + '/'.join(ref_path_parts)
ref_keys = self.verify_keys_for_path(
- '/v1' + path, expected_keys=('object', 'container'),
+ path, expected_keys=('object', 'container'),
key_id=base64.b64encode(path))
# for same path and for each differing path check that keys are unique
@@ -95,7 +101,7 @@ class TestKeymaster(unittest.TestCase):
for o in ('o1', 'o2')]:
path = '/' + '/'.join(path_parts)
keys = self.verify_keys_for_path(
- '/v1' + path, expected_keys=('object', 'container'),
+ path, expected_keys=('object', 'container'),
key_id=base64.b64encode(path))
# object keys should only be equal when complete paths are equal
self.assertEqual(path_parts == ref_path_parts,
@@ -109,98 +115,6 @@ class TestKeymaster(unittest.TestCase):
'Path %s keys:\n%s\npath %s keys\n%s' %
(ref_path_parts, ref_keys, path_parts, keys))
- def test_object_with_different_key_id(self):
- # object was put using different path; stored key_id should be used
- # to generate keys, not the GET path
- path = '/v1/a/c/o'
- key_id = base64.b64encode('/a/c/o')
- resp_headers = {'X-Object-Sysmeta-Crypto-Id': key_id}
- # first get keys when path matches key_id
- method = 'HEAD'
- self.swift.register(method, path, swob.HTTPOk, resp_headers, '')
- app = keymaster.KeyMaster(self.swift, TEST_KEYMASTER_CONF)
- req = Request.blank(path, environ={'REQUEST_METHOD': method})
- start_response, calls = capture_start_response()
- app(req.environ, start_response)
- self.assertEqual(1, len(calls))
- self.assertEqual('200 OK', calls[0][0])
- self.assertIn(CRYPTO_KEY_CALLBACK, req.environ)
- expected_keys = req.environ.get(CRYPTO_KEY_CALLBACK)()
-
- # now change path but verify that keys match key_id, not path
- path = '/v1/a/got/relocated'
- for method in ('HEAD', 'GET'):
- self.swift.register(method, path, swob.HTTPOk, resp_headers, '')
- app = keymaster.KeyMaster(self.swift, TEST_KEYMASTER_CONF)
- req = Request.blank(path, environ={'REQUEST_METHOD': method})
- start_response, calls = capture_start_response()
- app(req.environ, start_response)
- self.assertEqual(1, len(calls))
- self.assertEqual('200 OK', calls[0][0])
- self.assertIn(CRYPTO_KEY_CALLBACK, req.environ)
- actual_keys = req.environ.get(CRYPTO_KEY_CALLBACK)()
- self.assertDictEqual(expected_keys, actual_keys)
-
- def test_object_with_no_key_id(self):
- # object was not put using keymaster so has no key id, that's ok
- for method in ('HEAD', 'GET'):
- path = '/v1/a/c/o'
- self.swift.register(method, path, swob.HTTPOk, {}, '')
- app = keymaster.KeyMaster(self.swift, TEST_KEYMASTER_CONF)
- req = Request.blank(path, environ={'REQUEST_METHOD': method})
- start_response, calls = capture_start_response()
- app(req.environ, start_response)
- self.assertEqual(1, len(calls))
- self.assertEqual('200 OK', calls[0][0])
- self.assertIn('swift.crypto.override', req.environ)
- self.assertNotIn(CRYPTO_KEY_CALLBACK, req.environ)
-
- def test_object_with_no_key_id_but_crypto_meta(self):
- # object should have a key id if it has any
- # x-object-sysmeta-crypto-meta or
- # x-object-transient-sysmeta-crypto-meta- header
- path = '/v1/a/c/o'
- for method in ('HEAD', 'GET'):
- # object has x-object-transient-sysmeta-crypto header but no key id
- self.swift.register(
- method, path, swob.HTTPOk,
- {'x-object-transient-sysmeta-crypto-meta-foo': 'gotcha',
- 'x-object-meta-foo': 'ciphertext of user meta value'},
- '')
- app = keymaster.KeyMaster(self.swift, TEST_KEYMASTER_CONF)
- req = Request.blank(path, environ={'REQUEST_METHOD': method})
- start_response, calls = capture_start_response()
- app(req.environ, start_response)
- self.assertEqual(1, len(calls))
- self.assertEqual('422 Unprocessable Entity', calls[0][0])
-
- # object has x-object-sysmeta-crypto-meta but no key id
- self.swift.register(
- method, path, swob.HTTPOk,
- {'x-object-sysmeta-crypto-meta': 'gotcha'},
- '')
- req = Request.blank(path, environ={'REQUEST_METHOD': method})
- start_response, calls = capture_start_response()
- app(req.environ, start_response)
- self.assertEqual(1, len(calls))
- self.assertEqual('422 Unprocessable Entity', calls[0][0])
-
- # but "crypto-meta" in other headers is ok
- path = '/v1/a/c/o'
- for method in ('HEAD', 'GET'):
- self.swift.register(method, path, swob.HTTPOk,
- {'x-object-sysmeta-foo-crypto-meta': 'ok',
- 'x-object-sysmeta-foo-crypto-metabolic': 'ok',
- 'x-object-meta-crypto-meta': 'no probs',
- 'crypto-meta': 'pas de problem'},
- '')
- app = keymaster.KeyMaster(self.swift, TEST_KEYMASTER_CONF)
- req = Request.blank(path, environ={'REQUEST_METHOD': method})
- start_response, calls = capture_start_response()
- app(req.environ, start_response)
- self.assertEqual(1, len(calls))
- self.assertEqual('200 OK', calls[0][0])
-
def test_filter(self):
factory = keymaster.filter_factory(TEST_KEYMASTER_CONF)
self.assertTrue(callable(factory))
diff --git a/test/unit/common/middleware/test_tempurl.py b/test/unit/common/middleware/test_tempurl.py
index 0d5ed0711..c845b408e 100644
--- a/test/unit/common/middleware/test_tempurl.py
+++ b/test/unit/common/middleware/test_tempurl.py
@@ -30,9 +30,10 @@
import hmac
import itertools
+import mock
import unittest
from hashlib import sha1
-from time import time
+from time import time, strftime, gmtime
from swift.common.middleware import tempauth, tempurl
from swift.common.header_key_dict import HeaderKeyDict
@@ -135,6 +136,9 @@ class TestTempURL(unittest.TestCase):
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="o"; ' + "filename*=UTF-8''o")
+ self.assertEqual(resp.headers['expires'],
+ strftime('%a, %d %b %Y %H:%M:%S GMT',
+ gmtime(expires)))
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
@@ -183,9 +187,10 @@ class TestTempURL(unittest.TestCase):
for sig in (sig1, sig2):
self.assert_valid_sig(expires, path, account_keys, sig, environ)
- def test_get_valid_with_filename(self):
+ @mock.patch('swift.common.middleware.tempurl.time', return_value=0)
+ def test_get_valid_with_filename(self, mock_time):
method = 'GET'
- expires = int(time() + 86400)
+ expires = (((24 + 1) * 60 + 1) * 60) + 1
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
@@ -199,10 +204,13 @@ class TestTempURL(unittest.TestCase):
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="bob %22killer%22.txt"; ' +
"filename*=UTF-8''bob%20%22killer%22.txt")
+ self.assertIn('expires', resp.headers)
+ self.assertEqual('Fri, 02 Jan 1970 01:01:01 GMT',
+ resp.headers['expires'])
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
- def test_head_valid(self):
+ def test_head_valid_with_filename(self):
method = 'HEAD'
expires = int(time() + 86400)
path = '/v1/a/c/o'
@@ -211,11 +219,14 @@ class TestTempURL(unittest.TestCase):
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'HEAD',
- 'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s'
- % (sig, expires)})
+ 'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
+ 'filename=bob_killer.txt' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
+ self.assertEqual(resp.headers['content-disposition'],
+ 'attachment; filename="bob_killer.txt"; ' +
+ "filename*=UTF-8''bob_killer.txt")
def test_head_and_get_headers_match(self):
method = 'HEAD'
@@ -242,9 +253,10 @@ class TestTempURL(unittest.TestCase):
get_resp = get_req.get_response(self.tempurl)
self.assertEqual(resp.headers, get_resp.headers)
- def test_get_valid_with_filename_and_inline(self):
+ @mock.patch('swift.common.middleware.tempurl.time', return_value=0)
+ def test_get_valid_with_filename_and_inline(self, mock_time):
method = 'GET'
- expires = int(time() + 86400)
+ expires = 1
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
@@ -256,6 +268,9 @@ class TestTempURL(unittest.TestCase):
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'], 'inline')
+ self.assertIn('expires', resp.headers)
+ self.assertEqual('Thu, 01 Jan 1970 00:00:01 GMT',
+ resp.headers['expires'])
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
@@ -273,6 +288,7 @@ class TestTempURL(unittest.TestCase):
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'], 'inline')
+ self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
@@ -292,6 +308,7 @@ class TestTempURL(unittest.TestCase):
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="a%0D%0Ab"; ' +
"filename*=UTF-8''a%0D%0Ab")
+ self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
@@ -311,6 +328,7 @@ class TestTempURL(unittest.TestCase):
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="fu%0Abar"')
+ self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
@@ -330,6 +348,7 @@ class TestTempURL(unittest.TestCase):
self.assertEqual(resp.headers['content-disposition'],
'attachment; filename="o"; ' +
"filename*=UTF-8''o")
+ self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
@@ -350,6 +369,7 @@ class TestTempURL(unittest.TestCase):
resp.headers['content-disposition'],
'attachment; filename="/i/want/this/just/as/it/is/"; ' +
"filename*=UTF-8''/i/want/this/just/as/it/is/")
+ self.assertIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
@@ -366,7 +386,8 @@ class TestTempURL(unittest.TestCase):
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEqual(resp.status_int, 404)
- self.assertFalse('content-disposition' in resp.headers)
+ self.assertNotIn('content-disposition', resp.headers)
+ self.assertNotIn('expires', resp.headers)
self.assertEqual(req.environ['swift.authorize_override'], True)
self.assertEqual(req.environ['REMOTE_USER'], '.wsgi.tempurl')
diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py
index da7ec0f64..7066d99dd 100755
--- a/test/unit/obj/test_server.py
+++ b/test/unit/obj/test_server.py
@@ -507,21 +507,27 @@ class TestObjectController(unittest.TestCase):
headers_out, objdevice, policy):
calls_made.append((headers_out, policy))
+ body = 'test'
headers = {
'X-Timestamp': t[1].internal,
'Content-Type': 'application/octet-stream;swift_bytes=123456789',
- 'Content-Length': '4',
'X-Backend-Storage-Policy-Index': int(policy)}
if policy.policy_type == EC_POLICY:
+ # EC fragments will typically have a different size to the body and
+ # for small bodies the fragments may be longer. For this test all
+ # that matters is that the fragment and body lengths differ.
+ body = body + 'ec_overhead'
headers['X-Object-Sysmeta-Container-Update-Override-Etag'] = \
update_etag
+ headers['X-Object-Sysmeta-Container-Update-Override-Size'] = '4'
headers['X-Object-Sysmeta-Ec-Etag'] = update_etag
+ headers['X-Object-Sysmeta-Ec-Content-Length'] = '4'
headers['X-Object-Sysmeta-Ec-Frag-Index'] = 2
+ headers['Content-Length'] = str(len(body))
- req = Request.blank('/sda1/p/a/c/o',
+ req = Request.blank('/sda1/p/a/c/o', body=body,
environ={'REQUEST_METHOD': 'PUT'},
- headers=headers, body='test')
-
+ headers=headers)
with mock.patch('swift.obj.server.ObjectController.container_update',
mock_container_update):
resp = req.get_response(self.object_controller)
@@ -725,10 +731,12 @@ class TestObjectController(unittest.TestCase):
# make PUT with given headers and verify correct etag is sent in
# container update
+ body = 'test ec overhead'
headers['X-Timestamp'] = ts_put.internal
+ headers['Content-Length'] = len(body)
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
- headers=headers, body='test')
+ headers=headers, body=body)
with mock.patch(
'swift.obj.server.ObjectController.container_update',
@@ -774,13 +782,13 @@ class TestObjectController(unittest.TestCase):
base_headers = {
'Content-Type': 'application/octet-stream;swift_bytes=123456789',
- 'Content-Length': '4',
'X-Backend-Storage-Policy-Index': int(policy),
'X-Object-Sysmeta-Ec-Frag-Index': 2}
# PUT - old style headers are sufficient
headers = dict(base_headers)
headers['X-Backend-Container-Update-Override-Etag'] = 'expected'
+ headers['X-Object-Sysmeta-Container-Update-Override-Size'] = '4'
headers['X-Object-Sysmeta-Ec-Etag'] = 'expected'
do_test(headers)
@@ -790,6 +798,7 @@ class TestObjectController(unittest.TestCase):
# update with the correct etag).
headers = dict(base_headers)
headers['X-Object-Sysmeta-Container-Update-Override-Etag'] = 'expected'
+ headers['X-Object-Sysmeta-Container-Update-Override-Size'] = '4'
do_test(headers)
# PUT - X-Object-Sysmeta-Container-Update-Override-Etag trumps
@@ -797,6 +806,7 @@ class TestObjectController(unittest.TestCase):
headers = dict(base_headers)
headers['X-Object-Sysmeta-Ec-Etag'] = 'ec etag'
headers['X-Object-Sysmeta-Container-Update-Override-Etag'] = 'expected'
+ headers['X-Object-Sysmeta-Container-Update-Override-Size'] = '4'
do_test(headers)
def _test_PUT_then_POST_async_pendings(self, policy, update_etag=None):