summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--AUTHORS1
-rw-r--r--CHANGELOG207
-rw-r--r--README.rst2
-rw-r--r--bandit.yaml3
-rw-r--r--doc/source/api/container_quotas.rst2
-rw-r--r--doc/source/config/global_memcache_config.rst73
-rw-r--r--doc/source/config/index.rst1
-rw-r--r--doc/source/config/proxy_server_config.rst5
-rw-r--r--doc/source/development_guidelines.rst33
-rw-r--r--doc/source/development_middleware.rst2
-rw-r--r--doc/source/development_saio.rst2
-rw-r--r--doc/source/getting_started.rst2
-rw-r--r--etc/internal-client.conf-sample1
-rw-r--r--etc/proxy-server.conf-sample4
-rw-r--r--releasenotes/notes/2_31_1_release-20ccd07e32b91c1f.yaml37
-rw-r--r--releasenotes/source/2023.1.rst6
-rw-r--r--releasenotes/source/index.rst14
-rw-r--r--swift/cli/ringbuilder.py12
-rw-r--r--swift/common/direct_client.py25
-rw-r--r--swift/common/internal_client.py27
-rw-r--r--swift/common/middleware/account_quotas.py149
-rw-r--r--swift/common/middleware/crypto/decrypter.py28
-rw-r--r--swift/common/ring/builder.py3
-rw-r--r--swift/common/utils/__init__.py (renamed from swift/common/utils.py)1371
-rw-r--r--swift/common/utils/libc.py487
-rw-r--r--swift/common/utils/timestamp.py399
-rw-r--r--swift/common/wsgi.py8
-rw-r--r--swift/container/server.py13
-rw-r--r--swift/container/sharder.py555
-rw-r--r--swift/proxy/controllers/base.py15
-rw-r--r--swift/proxy/controllers/container.py2
-rw-r--r--swift/proxy/controllers/obj.py140
-rw-r--r--test/functional/__init__.py5
-rw-r--r--test/functional/s3api/__init__.py10
-rw-r--r--test/functional/s3api/test_acl.py3
-rw-r--r--test/functional/s3api/test_bucket.py9
-rw-r--r--test/functional/s3api/test_multi_upload.py2
-rw-r--r--test/functional/s3api/test_versioning.py4
-rw-r--r--test/functional/s3api/test_xxe_injection.py3
-rw-r--r--test/functional/test_dlo.py2
-rw-r--r--test/functional/test_object.py13
-rw-r--r--test/functional/test_object_versioning.py3
-rw-r--r--test/functional/test_slo.py3
-rw-r--r--test/functional/test_tempurl.py3
-rw-r--r--test/functional/test_versioned_writes.py84
-rw-r--r--test/probe/test_container_failures.py73
-rw-r--r--test/unit/__init__.py6
-rw-r--r--test/unit/common/middleware/crypto/test_decrypter.py14
-rw-r--r--test/unit/common/middleware/test_account_quotas.py371
-rw-r--r--test/unit/common/ring/test_builder.py6
-rw-r--r--test/unit/common/test_direct_client.py16
-rw-r--r--test/unit/common/test_internal_client.py82
-rw-r--r--test/unit/common/test_utils.py2307
-rw-r--r--test/unit/common/test_wsgi.py6
-rw-r--r--test/unit/common/utils/__init__.py0
-rw-r--r--test/unit/common/utils/test_libc.py599
-rw-r--r--test/unit/common/utils/test_timestamp.py882
-rw-r--r--test/unit/container/test_server.py115
-rw-r--r--test/unit/container/test_sharder.py564
-rw-r--r--test/unit/obj/test_diskfile.py3
-rw-r--r--test/unit/obj/test_server.py24
-rw-r--r--test/unit/proxy/controllers/test_base.py28
-rw-r--r--test/unit/proxy/controllers/test_container.py147
-rw-r--r--test/unit/proxy/test_server.py86
64 files changed, 5512 insertions, 3590 deletions
diff --git a/AUTHORS b/AUTHORS
index 89996d5a4..fe932d3c1 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -48,6 +48,7 @@ Alex Holden (alex@alexjonasholden.com)
Alex Pecoraro (alex.pecoraro@emc.com)
Alex Szarka (szarka@inf.u-szeged.hu)
Alex Yang (alex890714@gmail.com)
+Alexander Fadeev (fadeevab.com@gmail.com)
Alexandra Settle (asettle@suse.com)
Alexandre Lécuyer (alexandre.lecuyer@corp.ovh.com)
Alfredo Moralejo (amoralej@redhat.com)
diff --git a/CHANGELOG b/CHANGELOG
index 39c8ac7f1..00b4f53a0 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,35 @@
+swift (2.31.1, OpenStack Antelope)
+
+ * Sharding fixes
+
+ * Shards no longer report stats to the root database when they are in
+ the CREATED state.
+
+ * Sharding metadata is no longer cleared when databases are deleted.
+ This could previously cause deleted shards that still had rows to
+ become stuck and never move them to the correct database.
+
+ * Fixed a performance regression in the handling of misplaced objects.
+
+ * Swift path and on-disk path are now included with all sharder logging.
+
+ * `s3token` no longer mangles request paths that include the Access Key ID.
+
+ * User metadata is now exposed via CORS when encryption is enabled,
+ matching the behavior when encryption is not enabled.
+
+ * Fewer backend requests are now required when account or container
+ information is missing from memcache.
+
+ * Fixed logging of IP and port in the proxy-server; in particular,
+ internal clients now correctly log about the replication IP/port.
+
+ * Fixed a bug in the object replicator that would cause an under-reporting
+ of failures.
+
+ * Various other minor bug fixes.
+
+
swift (2.31.0)
* S3 API improvements
@@ -93,7 +125,18 @@ swift (2.31.0)
* Various other minor bug fixes and improvements.
-swift (2.30.0)
+swift (2.30.1, zed stable backports)
+
+ * Fixed a security issue in how `s3api` handles XML parsing that allowed
+ authenticated S3 clients to read arbitrary files from proxy servers.
+ Refer to CVE-2022-47950 for more information.
+
+ * Fixed a path-rewriting bug introduced in Python 3.7.14, 3.8.14, 3.9.14,
+ and 3.10.6 that could cause some `domain_remap` requests to be routed to
+ the wrong object.
+
+
+swift (2.30.0, OpenStack Zed)
* Sharding improvements
@@ -230,6 +273,22 @@ swift (2.30.0)
* Various other minor bug fixes and improvements.
+swift (2.29.2, yoga stable backports)
+
+ * Fixed a security issue in how `s3api` handles XML parsing that allowed
+ authenticated S3 clients to read arbitrary files from proxy servers.
+ Refer to CVE-2022-47950 for more information.
+
+ * Constant-time string comparisons are now used when checking S3 API
+ signatures.
+
+ * Fixed a path-rewriting bug introduced in Python 3.7.14, 3.8.14, 3.9.14,
+ and 3.10.6 that could cause some `domain_remap` requests to be routed to
+ the wrong object.
+
+ * Improved compatibility with certain FIPS-mode-enabled systems.
+
+
swift (2.29.1, OpenStack Yoga)
* This is the final stable branch that will support Python 2.7.
@@ -404,6 +463,25 @@ swift (2.29.0)
* Various other minor bug fixes and improvements.
+swift (2.28.1, xena stable backports)
+
+ * Fixed a security issue in how `s3api` handles XML parsing that allowed
+ authenticated S3 clients to read arbitrary files from proxy servers.
+ Refer to CVE-2022-47950 for more information.
+
+ * Constant-time string comparisons are now used when checking S3 API
+ signatures.
+
+ * Fixed a path-rewriting bug introduced in Python 3.7.14, 3.8.14, 3.9.14,
+ and 3.10.6 that could cause some `domain_remap` requests to be routed to
+ the wrong object.
+
+ * Improved compatibility with certain FIPS-mode-enabled systems.
+
+ * Ensure that non-durable data and .meta files are purged from handoffs
+ after syncing.
+
+
swift (2.28.0, OpenStack Xena)
* Sharding improvements:
@@ -943,6 +1021,40 @@ swift (2.26.0, OpenStack Victoria)
* Various other minor bug fixes and improvements.
+swift (2.25.1, ussuri stable backports)
+
+ * Python 3 bug fixes:
+
+ * Fixed an error when reading encrypted data that was written while
+ running Python 2 for a path that includes non-ASCII characters. This
+ was caused by a difference in string types that resulted in
+ ambiguity when decrypting. To prevent the ambiguity for new data, set
+ `meta_version_to_write = 3` in your keymaster configuration after
+ upgrading all proxy servers.
+
+ If upgrading from Swift 2.20.0 or Swift 2.19.1 or earlier, set
+ `meta_version_to_write = 1` in your keymaster configuration prior
+ to upgrading.
+
+ * Object expiration respects the `expiring_objects_container_divisor`
+ config option.
+
+ * `fallocate_reserve` may be specified as a percentage in more places.
+
+ * The ETag-quoting middleware no longer raises TypeErrors.
+
+ * Improved how containers reclaim deleted rows to reduce locking and object
+ update throughput.
+
+ * Fix a proxy-server error when retrieving erasure coded data when
+ there are durable fragments but not enough to reconstruct.
+
+ * Fixed some SignatureDoesNotMatch errors when using the AWS .NET SDK.
+
+ * Region name config option is now respected when configuring S3 credential
+ caching.
+
+
swift (2.25.0, OpenStack Ussuri)
* WSGI server processes can now notify systemd when they are ready.
@@ -1063,6 +1175,99 @@ swift (2.24.0)
* Various other minor bug fixes and improvements.
+swift (2.23.3, train stable backports)
+
+ * Sharding improvements:
+
+ * Prevent object updates from auto-creating shard containers. This
+ ensures more consistent listings for sharded containers during
+ rebalances.
+
+ * Deleted shard containers are no longer considered root containers.
+ This prevents unnecessary sharding audit failures and allows the
+ deleted shard database to actually be unlinked.
+
+ * The sharder daemon has been enhanced to better support the shrinking
+ of shards that are no longer required. Shard containers will now
+ discover from their root container if they should be shrinking. They
+ will also discover the shards into which they should shrink, which may
+ include the root container itself.
+
+ * Improved performance of sharded container listings when performing
+ prefix listings.
+
+ * Improved container-sharder stat reporting to reduce load on root
+ container databases.
+
+ * The container sharder can now handle containers with special
+ characters in their names.
+
+ * `swift-container-info` now summarizes shard range information.
+ Pass `-v`/`--verbose` if you want to see all of them.
+
+ * Don't inject shard ranges when user quits.
+
+ * Various other minor bug fixes and improvements.
+
+ * Python 3 bug fixes:
+
+ * Fixed a potential server error when uploading data via a tempurl.
+
+ * Fixed a potential server error when getting symlink details.
+
+ * Added the ability to connect to memcached over TLS. See the
+ `tls_*` options in etc/memcache.conf-sample
+
+
+swift (2.23.2, train stable backports)
+
+ * Python 3 bug fixes:
+
+ * Fixed an error when reading encrypted data that was written while
+ running Python 2 for a path that includes non-ASCII characters. This
+ was caused by a difference in string types that resulted in
+ ambiguity when decrypting. To prevent the ambiguity for new data, set
+ `meta_version_to_write = 3` in your keymaster configuration after
+ upgrading all proxy servers.
+
+ If upgrading from Swift 2.20.0 or Swift 2.19.1 or earlier, set
+ `meta_version_to_write = 1` in your keymaster configuration prior
+ to upgrading.
+
+ * Fixed an issue when reading or writing objects with a content-type
+ like `message/*`. Previously, Swift would fail to respond.
+
+ * Object expiration respects the `expiring_objects_container_divisor`
+ config option.
+
+ * `fallocate_reserve` may be specified as a percentage in more places.
+
+ * The formpost middleware now works with unicode file names.
+
+ * Certain S3 API headers are now lower case as they would be coming
+ from AWS.
+
+ * Improved how containers reclaim deleted rows to reduce locking and object
+ update throughput.
+
+ * Fix a proxy-server error when retrieving erasure coded data when
+ there are durable fragments but not enough to reconstruct.
+
+ * Fixed 500 from cname_lookup middleware. Previously, if the looked-up
+ domain was used by domain_remap to update the request path, the
+ server would respond Internal Error.
+
+ * The bulk extract middleware once again allows clients to specify metadata
+ (including expiration timestamps) for all objects in the archive.
+
+ * Errors encountered while validating static symlink targets no longer
+ cause BadResponseLength errors in the proxy-server.
+
+ * Fixed some SignatureDoesNotMatch errors when using the AWS .NET SDK.
+
+ * Various other minor bug fixes and improvements.
+
+
swift (2.23.1, train stable backports)
* On Python 3, the KMS keymaster now works with secrets stored
diff --git a/README.rst b/README.rst
index 460c22e0b..b146402aa 100644
--- a/README.rst
+++ b/README.rst
@@ -141,7 +141,7 @@ For Client Apps
---------------
For client applications, official Python language bindings are provided
-at https://github.com/openstack/python-swiftclient.
+at https://opendev.org/openstack/python-swiftclient.
Complete API documentation at
https://docs.openstack.org/api-ref/object-store/
diff --git a/bandit.yaml b/bandit.yaml
index ed1c2b61e..7e9f58542 100644
--- a/bandit.yaml
+++ b/bandit.yaml
@@ -27,7 +27,6 @@
# B306 : mktemp_q
# B307 : eval
# B308 : mark_safe
-# B309 : httpsconnection
# B310 : urllib_urlopen
# B311 : random
# B312 : telnetlib
@@ -80,7 +79,7 @@
# B703 : django_mark_safe
# (optional) list included test IDs here, eg '[B101, B406]':
-tests: [B102, B103, B302, B303, B304, B305, B306, B308, B309, B310, B401, B501, B502, B506, B601, B602, B609]
+tests: [B102, B103, B302, B303, B304, B305, B306, B308, B310, B401, B501, B502, B506, B601, B602, B609]
# (optional) list skipped test IDs here, eg '[B101, B406]':
skips:
diff --git a/doc/source/api/container_quotas.rst b/doc/source/api/container_quotas.rst
index a41561274..9c58eef27 100644
--- a/doc/source/api/container_quotas.rst
+++ b/doc/source/api/container_quotas.rst
@@ -1,3 +1,5 @@
+.. _container_quotas:
+
================
Container quotas
================
diff --git a/doc/source/config/global_memcache_config.rst b/doc/source/config/global_memcache_config.rst
new file mode 100644
index 000000000..21ef6c43a
--- /dev/null
+++ b/doc/source/config/global_memcache_config.rst
@@ -0,0 +1,73 @@
+.. _memcache-config:
+
+-----------------------------
+Global Memcache Configuration
+-----------------------------
+
+This document describes the configuration options available for the global swift memcache configuration
+which usually lives under /etc/swift/memcache.conf.
+Documentation for other swift configuration options can be found at
+:doc:`index`.
+
+An example memcache.conf configuration can be found at
+etc/memcache.conf-sample in the source code repository.
+
+There is only 1 configuration section available:
+
+* :ref:`[memcache] <memcache_conf_memcache_section>`
+
+
+.. _memcache_conf_memcache_section:
+
+**********
+[memcache]
+**********
+
+=========================== =============== =============================================
+Option Default Description
+--------------------------- --------------- ---------------------------------------------
+memcache_servers 127.0.0.1:11211 Comma separated list of memcached servers
+ ip:port or [ipv6addr]:port
+memcache_max_connections 2 Max number of connections to each memcached
+ server per worker
+connect_timeout 0.3 Timeout for connection
+pool_timeout 1.0 Timeout for pooled connection
+tries 3 Number of servers to retry on failures
+ getting a pooled connection
+io_timeout 2.0 Timeout for read and writes
+error_suppression_interval 60.0 How long without an error before a server's
+ error count is reset. This will also be how
+ long before a server is reenabled after
+ suppression is triggered.
+ Set to 0 to disable error-limiting.
+error_suppression_limit 10 How many errors can accumulate before a
+ server is temporarily ignored
+item_size_warning_threshold -1 If an item size ever gets above
+ item_size_warning_threshold then a warning
+ will be logged. This can be used to alert
+ when memcache item sizes are getting to
+ their limit.
+ It's an absolute size in bytes. Setting the
+ value to 0 will warn on every memcache set.
+ A value of -1 disables the warning
+tls_enabled False (Optional) Global toggle for TLS usage
+ when comunicating with the caching servers
+tls_cafile (Optional) Path to a file of concatenated
+ CA certificates in PEM format necessary to
+ establish the caching server's authenticity.
+ If tls_enabled is False, this option is
+ ignored.
+tls_certfile (Optional) Path to a single file in PEM
+ format containing the client's certificate
+ as well as any number of CA certificates
+ needed to establish the certificate's
+ authenticity. This file is only required
+ when client side authentication is
+ necessary. If tls_enabled is False,
+ this option is ignored
+tls_keyfile (Optional) Path to a single file containing
+ the client's private key in. Otherwhise the
+ private key will be taken from the file
+ specified in tls_certfile. If tls_enabled
+ is False, this option is ignored
+=========================== =============== ============================================= \ No newline at end of file
diff --git a/doc/source/config/index.rst b/doc/source/config/index.rst
index bb2752b53..bfec36b78 100644
--- a/doc/source/config/index.rst
+++ b/doc/source/config/index.rst
@@ -10,6 +10,7 @@ Configuration Documentation
account_server_config.rst
container_server_config.rst
object_server_config.rst
+ global_memcache_config.rst
Configuration options for middleware can be found at:
diff --git a/doc/source/config/proxy_server_config.rst b/doc/source/config/proxy_server_config.rst
index 6e87228ca..d8f3e3eef 100644
--- a/doc/source/config/proxy_server_config.rst
+++ b/doc/source/config/proxy_server_config.rst
@@ -211,7 +211,10 @@ client_chunk_size 65536 Chunk size to r
clients
memcache_servers 127.0.0.1:11211 Comma separated list of
memcached servers
- ip:port or [ipv6addr]:port
+ ip:port or [ipv6addr]:port,
+ if this value is
+ empty, the memcache client will look
+ for a :ref:`[memcache.conf] <memcache-config>`
memcache_max_connections 2 Max number of connections to
each memcached server per
worker
diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst
index 6fdf7c507..8db586b46 100644
--- a/doc/source/development_guidelines.rst
+++ b/doc/source/development_guidelines.rst
@@ -46,12 +46,39 @@ To execute the tests:
tox
+To run a selected subset of unit tests with ``pytest``:
+
+* Create a virtual environment with ``tox``::
+
+ tox devenv -e py3 .env
+
+.. note::
+ Alternatively, here are the steps of manual preparation of the virtual environment::
+
+ virtualenv .env
+ source .env/bin/activate
+ pip3 install -r requirements.txt -r test-requirements.txt -c py36-constraints.txt
+ pip3 install -e .
+ deactivate
+
+* Activate the virtual environment::
+
+ source .env/bin/activate
+
+* Run some unit tests, for example::
+
+ pytest test/unit/common/middleware/crypto
+
+* Run all unit tests::
+
+ pytest test/unit
+
.. note::
If you installed using ``cd ~/swift; sudo python setup.py develop``, you may
need to do ``cd ~/swift; sudo chown -R ${USER}:${USER} swift.egg-info`` prior
to running ``tox``.
-* By default ``tox`` will run all of the unit test and pep8 checks listed in
+* By default ``tox`` will run **all of the unit test** and pep8 checks listed in
the ``tox.ini`` file ``envlist`` option. A subset of the test environments
can be specified on the ``tox`` command line or by setting the ``TOXENV``
environment variable. For example, to run only the pep8 checks and python2.7
@@ -63,6 +90,10 @@ To execute the tests:
TOXENV=py27,pep8 tox
+ To run unit tests with python3.8::
+
+ tox -e py38
+
.. note::
As of ``tox`` version 2.0.0, most environment variables are not automatically
passed to the test environment. Swift's ``tox.ini`` overrides this default
diff --git a/doc/source/development_middleware.rst b/doc/source/development_middleware.rst
index 1f7e9e369..774dab518 100644
--- a/doc/source/development_middleware.rst
+++ b/doc/source/development_middleware.rst
@@ -231,7 +231,7 @@ client applications. A perfect example use-case for user metadata is
object it uploads to implement its ``--changed`` option which will only
upload files that have changed since the last upload.
-.. _python-swiftclient: https://github.com/openstack/python-swiftclient
+.. _python-swiftclient: https://opendev.org/openstack/python-swiftclient
New middleware should avoid storing metadata within the User Metadata
namespace to avoid potential conflict with existing user metadata when
diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst
index a072b329c..f3ea96d17 100644
--- a/doc/source/development_saio.rst
+++ b/doc/source/development_saio.rst
@@ -287,7 +287,7 @@ Getting the code
#. Check out the python-swiftclient repo::
- cd $HOME; git clone https://github.com/openstack/python-swiftclient.git
+ cd $HOME; git clone https://opendev.org/openstack/python-swiftclient.git
#. Build a development installation of python-swiftclient::
diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst
index 81d564e8f..284e338ac 100644
--- a/doc/source/getting_started.rst
+++ b/doc/source/getting_started.rst
@@ -37,7 +37,7 @@ CLI client and SDK library
There are many clients in the :ref:`ecosystem <application-bindings>`. The official CLI
and SDK is python-swiftclient.
-* `Source code <https://github.com/openstack/python-swiftclient>`__
+* `Source code <https://opendev.org/openstack/python-swiftclient>`__
* `Python Package Index <https://pypi.org/project/python-swiftclient>`__
----------
diff --git a/etc/internal-client.conf-sample b/etc/internal-client.conf-sample
index 7ded5fd8a..d9ed5e24b 100644
--- a/etc/internal-client.conf-sample
+++ b/etc/internal-client.conf-sample
@@ -26,6 +26,7 @@
# log_statsd_metric_prefix =
[pipeline:main]
+# Note: gatekeeper middleware is not allowed in the internal client pipeline
pipeline = catch_errors proxy-logging cache symlink proxy-server
[app:proxy-server]
diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample
index d2db8d752..d893ff8d7 100644
--- a/etc/proxy-server.conf-sample
+++ b/etc/proxy-server.conf-sample
@@ -1110,11 +1110,11 @@ use = egg:swift#dlo
# Time limit on GET requests (seconds)
# max_get_time = 86400
-# Note: Put after auth in the pipeline.
+# Note: Put after auth and server-side copy in the pipeline.
[filter:container-quotas]
use = egg:swift#container_quotas
-# Note: Put after auth in the pipeline.
+# Note: Put after auth and server-side copy in the pipeline.
[filter:account-quotas]
use = egg:swift#account_quotas
diff --git a/releasenotes/notes/2_31_1_release-20ccd07e32b91c1f.yaml b/releasenotes/notes/2_31_1_release-20ccd07e32b91c1f.yaml
new file mode 100644
index 000000000..9461721d0
--- /dev/null
+++ b/releasenotes/notes/2_31_1_release-20ccd07e32b91c1f.yaml
@@ -0,0 +1,37 @@
+---
+fixes:
+ - |
+ Sharding fixes
+
+ * Shards no longer report stats to the root database when they are in
+ the ``CREATED`` state.
+
+ * Sharding metadata is no longer cleared when databases are deleted.
+ This could previously cause deleted shards that still had rows to
+ become stuck and never move them to the correct database.
+
+ * Fixed a performance regression in the handling of misplaced objects.
+
+ * Swift path and on-disk path are now included with all sharder logging.
+
+ - |
+ ``s3token`` no longer mangles request paths that include the Access Key ID.
+
+ - |
+ User metadata is now exposed via CORS when encryption is enabled,
+ matching the behavior when encryption is not enabled.
+
+ - |
+ Fewer backend requests are now required when account or container
+ information is missing from memcache.
+
+ - |
+ Fixed logging of IP and port in the proxy-server; in particular,
+ internal clients now correctly log about the replication IP/port.
+
+ - |
+ Fixed a bug in the object replicator that would cause an under-reporting
+ of failures.
+
+ - |
+ Various other minor bug fixes.
diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst
new file mode 100644
index 000000000..d1238479b
--- /dev/null
+++ b/releasenotes/source/2023.1.rst
@@ -0,0 +1,6 @@
+===========================
+2023.1 Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/2023.1
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 3607a0391..587a30366 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,29 +6,17 @@
:maxdepth: 1
current
-
+ 2023.1
zed
-
yoga
-
xena
-
wallaby
-
victoria
-
ussuri
-
train
-
stein
-
rocky
-
queens
-
pike
-
ocata
-
newton
diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py
index 5ab6a6f3a..62b956023 100644
--- a/swift/cli/ringbuilder.py
+++ b/swift/cli/ringbuilder.py
@@ -194,7 +194,11 @@ def check_devs(devs, input_question, opts, abort_msg):
print('Matched more than one device:')
for dev in devs:
print(' %s' % format_device(dev))
- if not opts.yes and input(input_question) != 'y':
+ try:
+ abort = not opts.yes and input(input_question) != 'y'
+ except (EOFError, KeyboardInterrupt):
+ abort = True
+ if abort:
print(abort_msg)
exit(EXIT_ERROR)
@@ -544,7 +548,11 @@ swift-ring-builder <builder_file> create <part_power> <replicas>
if len(argv) < 6:
print(Commands.create.__doc__.strip())
exit(EXIT_ERROR)
- builder = RingBuilder(int(argv[3]), float(argv[4]), int(argv[5]))
+ try:
+ builder = RingBuilder(int(argv[3]), float(argv[4]), int(argv[5]))
+ except ValueError as e:
+ print(e)
+ exit(EXIT_ERROR)
backup_dir = pathjoin(dirname(builder_file), 'backups')
try:
mkdir(backup_dir)
diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py
index f6234fa38..deb9fbe9e 100644
--- a/swift/common/direct_client.py
+++ b/swift/common/direct_client.py
@@ -378,6 +378,31 @@ def direct_put_container(node, part, account, container, conn_timeout=5,
content_length=content_length, chunk_size=chunk_size)
+def direct_post_container(node, part, account, container, conn_timeout=5,
+ response_timeout=15, headers=None):
+ """
+ Make a POST request to a container server.
+
+ :param node: node dictionary from the ring
+ :param part: partition the container is on
+ :param account: account name
+ :param container: container name
+ :param conn_timeout: timeout in seconds for establishing the connection
+ :param response_timeout: timeout in seconds for getting the response
+ :param headers: additional headers to include in the request
+ :raises ClientException: HTTP PUT request failed
+ """
+ if headers is None:
+ headers = {}
+
+ lower_headers = set(k.lower() for k in headers)
+ headers_out = gen_headers(headers,
+ add_ts='x-timestamp' not in lower_headers)
+ path = _make_path(account, container)
+ return _make_req(node, part, 'POST', path, headers_out, 'Container',
+ conn_timeout, response_timeout)
+
+
def direct_put_container_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15,
headers=None):
diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py
index 2c1c99cc0..fc5242ae8 100644
--- a/swift/common/internal_client.py
+++ b/swift/common/internal_client.py
@@ -28,6 +28,7 @@ from zlib import compressobj
from swift.common.exceptions import ClientException
from swift.common.http import (HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES,
is_client_error, is_server_error)
+from swift.common.middleware.gatekeeper import GatekeeperMiddleware
from swift.common.request_helpers import USE_REPLICATION_NETWORK_HEADER
from swift.common.swob import Request, bytes_to_wsgi
from swift.common.utils import quote, close_if_possible, drain_and_close
@@ -144,6 +145,8 @@ class InternalClient(object):
:param user_agent: User agent to be sent to requests to Swift.
:param request_tries: Number of tries before InternalClient.make_request()
gives up.
+ :param use_replication_network: Force the client to use the replication
+ network over the cluster.
:param global_conf: a dict of options to update the loaded proxy config.
Options in ``global_conf`` will override those in ``conf_path`` except
where the ``conf_path`` option is preceded by ``set``.
@@ -151,12 +154,17 @@ class InternalClient(object):
"""
def __init__(self, conf_path, user_agent, request_tries,
- allow_modify_pipeline=False, use_replication_network=False,
- global_conf=None, app=None):
+ use_replication_network=False, global_conf=None, app=None,
+ **kwargs):
if request_tries < 1:
raise ValueError('request_tries must be positive')
+ # Internal clients don't use the gatekeeper and the pipeline remains
+ # static so we never allow anything to modify the proxy pipeline.
+ if kwargs.get('allow_modify_pipeline'):
+ raise ValueError("'allow_modify_pipeline' is no longer supported")
self.app = app or loadapp(conf_path, global_conf=global_conf,
- allow_modify_pipeline=allow_modify_pipeline,)
+ allow_modify_pipeline=False,)
+ self.check_gatekeeper_not_loaded(self.app)
self.user_agent = \
self.app._pipeline_final_app.backend_user_agent = user_agent
self.request_tries = request_tries
@@ -167,6 +175,19 @@ class InternalClient(object):
self.auto_create_account_prefix = \
self.app._pipeline_final_app.auto_create_account_prefix
+ @staticmethod
+ def check_gatekeeper_not_loaded(app):
+ # the Gatekeeper middleware would prevent an InternalClient passing
+ # X-Backend-* headers to the proxy app, so ensure it's not present
+ try:
+ for app in app._pipeline:
+ if isinstance(app, GatekeeperMiddleware):
+ raise ValueError(
+ "Gatekeeper middleware is not allowed in the "
+ "InternalClient proxy pipeline")
+ except AttributeError:
+ pass
+
def make_request(
self, method, path, headers, acceptable_statuses, body_file=None,
params=None):
diff --git a/swift/common/middleware/account_quotas.py b/swift/common/middleware/account_quotas.py
index cd41e34da..0baf00635 100644
--- a/swift/common/middleware/account_quotas.py
+++ b/swift/common/middleware/account_quotas.py
@@ -19,9 +19,19 @@ given account quota (in bytes) is exceeded while DELETE requests are still
allowed.
``account_quotas`` uses the ``x-account-meta-quota-bytes`` metadata entry to
-store the quota. Write requests to this metadata entry are only permitted for
-resellers. There is no quota limit if ``x-account-meta-quota-bytes`` is not
-set.
+store the overall account quota. Write requests to this metadata entry are
+only permitted for resellers. There is no overall account quota limit if
+``x-account-meta-quota-bytes`` is not set.
+
+Additionally, account quotas may be set for each storage policy, using metadata
+of the form ``x-account-quota-bytes-policy-<policy name>``. Again, only
+resellers may update these metadata, and there will be no limit for a
+particular policy if the corresponding metadata is not set.
+
+.. note::
+ Per-policy quotas need not sum to the overall account quota, and the sum of
+ all :ref:`container_quotas` for a given policy need not sum to the account's
+ policy quota.
The ``account_quotas`` middleware should be added to the pipeline in your
``/etc/swift/proxy-server.conf`` file just after any auth middleware.
@@ -55,7 +65,8 @@ account size has been updated.
from swift.common.swob import HTTPForbidden, HTTPBadRequest, \
HTTPRequestEntityTooLarge, wsgify
from swift.common.registry import register_swift_info
-from swift.proxy.controllers.base import get_account_info
+from swift.common.storage_policy import POLICIES
+from swift.proxy.controllers.base import get_account_info, get_container_info
class AccountQuotaMiddleware(object):
@@ -67,12 +78,50 @@ class AccountQuotaMiddleware(object):
def __init__(self, app, *args, **kwargs):
self.app = app
+ def handle_account(self, request):
+ if request.method in ("POST", "PUT"):
+ # account request, so we pay attention to the quotas
+ new_quotas = {}
+ new_quotas[None] = request.headers.get(
+ 'X-Account-Meta-Quota-Bytes')
+ if request.headers.get(
+ 'X-Remove-Account-Meta-Quota-Bytes'):
+ new_quotas[None] = 0 # X-Remove dominates if both are present
+
+ for policy in POLICIES:
+ tail = 'Account-Quota-Bytes-Policy-%s' % policy.name
+ if request.headers.get('X-Remove-' + tail):
+ new_quotas[policy.idx] = 0
+ else:
+ quota = request.headers.pop('X-' + tail, None)
+ new_quotas[policy.idx] = quota
+
+ if request.environ.get('reseller_request') is True:
+ if any(quota and not quota.isdigit()
+ for quota in new_quotas.values()):
+ return HTTPBadRequest()
+ for idx, quota in new_quotas.items():
+ if idx is None:
+ continue # For legacy reasons, it's in user meta
+ hdr = 'X-Account-Sysmeta-Quota-Bytes-Policy-%d' % idx
+ request.headers[hdr] = quota
+ elif any(quota is not None for quota in new_quotas.values()):
+ # deny quota set for non-reseller
+ return HTTPForbidden()
+
+ resp = request.get_response(self.app)
+ # Non-resellers can't update quotas, but they *can* see them
+ for policy in POLICIES:
+ infix = 'Quota-Bytes-Policy'
+ value = resp.headers.get('X-Account-Sysmeta-%s-%d' % (
+ infix, policy.idx))
+ if value:
+ resp.headers['X-Account-%s-%s' % (infix, policy.name)] = value
+ return resp
+
@wsgify
def __call__(self, request):
- if request.method not in ("POST", "PUT"):
- return self.app
-
try:
ver, account, container, obj = request.split_path(
2, 4, rest_with_last=True)
@@ -80,31 +129,19 @@ class AccountQuotaMiddleware(object):
return self.app
if not container:
- # account request, so we pay attention to the quotas
- new_quota = request.headers.get(
- 'X-Account-Meta-Quota-Bytes')
- remove_quota = request.headers.get(
- 'X-Remove-Account-Meta-Quota-Bytes')
- else:
- # container or object request; even if the quota headers are set
- # in the request, they're meaningless
- new_quota = remove_quota = None
-
- if remove_quota:
- new_quota = 0 # X-Remove dominates if both are present
+ return self.handle_account(request)
+ # container or object request; even if the quota headers are set
+ # in the request, they're meaningless
- if request.environ.get('reseller_request') is True:
- if new_quota and not new_quota.isdigit():
- return HTTPBadRequest()
+ if not (request.method == "PUT" and obj):
return self.app
+ # OK, object PUT
- # deny quota set for non-reseller
- if new_quota is not None:
- return HTTPForbidden()
-
- if request.method == "POST" or not obj:
+ if request.environ.get('reseller_request') is True:
+ # but resellers aren't constrained by quotas :-)
return self.app
+ # Object PUT request
content_length = (request.content_length or 0)
account_info = get_account_info(request.environ, self.app,
@@ -114,24 +151,50 @@ class AccountQuotaMiddleware(object):
try:
quota = int(account_info['meta'].get('quota-bytes', -1))
except ValueError:
- return self.app
- if quota < 0:
- return self.app
-
- new_size = int(account_info['bytes']) + content_length
- if quota < new_size:
- resp = HTTPRequestEntityTooLarge(body='Upload exceeds quota.')
- if 'swift.authorize' in request.environ:
- orig_authorize = request.environ['swift.authorize']
+ quota = -1
+ if quota >= 0:
+ new_size = int(account_info['bytes']) + content_length
+ if quota < new_size:
+ resp = HTTPRequestEntityTooLarge(body='Upload exceeds quota.')
+ if 'swift.authorize' in request.environ:
+ orig_authorize = request.environ['swift.authorize']
+
+ def reject_authorize(*args, **kwargs):
+ aresp = orig_authorize(*args, **kwargs)
+ if aresp:
+ return aresp
+ return resp
+ request.environ['swift.authorize'] = reject_authorize
+ else:
+ return resp
- def reject_authorize(*args, **kwargs):
- aresp = orig_authorize(*args, **kwargs)
- if aresp:
- return aresp
+ container_info = get_container_info(request.environ, self.app,
+ swift_source='AQ')
+ if not container_info:
+ return self.app
+ policy_idx = container_info['storage_policy']
+ sysmeta_key = 'quota-bytes-policy-%s' % policy_idx
+ try:
+ policy_quota = int(account_info['sysmeta'].get(sysmeta_key, -1))
+ except ValueError:
+ policy_quota = -1
+ if policy_quota >= 0:
+ policy_stats = account_info['storage_policies'].get(policy_idx, {})
+ new_size = int(policy_stats.get('bytes', 0)) + content_length
+ if policy_quota < new_size:
+ resp = HTTPRequestEntityTooLarge(
+ body='Upload exceeds policy quota.')
+ if 'swift.authorize' in request.environ:
+ orig_authorize = request.environ['swift.authorize']
+
+ def reject_authorize(*args, **kwargs):
+ aresp = orig_authorize(*args, **kwargs)
+ if aresp:
+ return aresp
+ return resp
+ request.environ['swift.authorize'] = reject_authorize
+ else:
return resp
- request.environ['swift.authorize'] = reject_authorize
- else:
- return resp
return self.app
diff --git a/swift/common/middleware/crypto/decrypter.py b/swift/common/middleware/crypto/decrypter.py
index 34dfef43b..2ca3b2ec7 100644
--- a/swift/common/middleware/crypto/decrypter.py
+++ b/swift/common/middleware/crypto/decrypter.py
@@ -197,7 +197,7 @@ class DecrypterObjContext(BaseDecrypterContext):
result.append((new_prefix + short_name, decrypted_value))
return result
- def decrypt_resp_headers(self, put_keys, post_keys):
+ def decrypt_resp_headers(self, put_keys, post_keys, update_cors_exposed):
"""
Find encrypted headers and replace with the decrypted versions.
@@ -236,11 +236,27 @@ class DecrypterObjContext(BaseDecrypterContext):
# that map to the same x-object-meta- header names i.e. decrypted
# headers win over unexpected, unencrypted headers.
if post_keys:
- mod_hdr_pairs.extend(self.decrypt_user_metadata(post_keys))
+ decrypted_meta = self.decrypt_user_metadata(post_keys)
+ mod_hdr_pairs.extend(decrypted_meta)
+ else:
+ decrypted_meta = []
mod_hdr_names = {h.lower() for h, v in mod_hdr_pairs}
- mod_hdr_pairs.extend([(h, v) for h, v in self._response_headers
- if h.lower() not in mod_hdr_names])
+
+ found_aceh = False
+ for header, value in self._response_headers:
+ lheader = header.lower()
+ if lheader in mod_hdr_names:
+ continue
+ if lheader == 'access-control-expose-headers':
+ found_aceh = True
+ mod_hdr_pairs.append((header, value + ', ' + ', '.join(
+ meta.lower() for meta, _data in decrypted_meta)))
+ else:
+ mod_hdr_pairs.append((header, value))
+ if update_cors_exposed and not found_aceh:
+ mod_hdr_pairs.append(('Access-Control-Expose-Headers', ', '.join(
+ meta.lower() for meta, _data in decrypted_meta)))
return mod_hdr_pairs
def multipart_response_iter(self, resp, boundary, body_key, crypto_meta):
@@ -326,7 +342,9 @@ class DecrypterObjContext(BaseDecrypterContext):
self._response_exc_info)
return app_resp
- mod_resp_headers = self.decrypt_resp_headers(put_keys, post_keys)
+ mod_resp_headers = self.decrypt_resp_headers(
+ put_keys, post_keys,
+ update_cors_exposed=bool(req.headers.get('origin')))
if put_crypto_meta and req.method == 'GET' and \
is_success(self._get_status_int()):
diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py
index e64fe4089..91845070e 100644
--- a/swift/common/ring/builder.py
+++ b/swift/common/ring/builder.py
@@ -87,6 +87,9 @@ class RingBuilder(object):
if part_power > 32:
raise ValueError("part_power must be at most 32 (was %d)"
% (part_power,))
+ if part_power < 0:
+ raise ValueError("part_power must be at least 0 (was %d)"
+ % (part_power,))
if replicas < 1:
raise ValueError("replicas must be at least 1 (was %.6f)"
% (replicas,))
diff --git a/swift/common/utils.py b/swift/common/utils/__init__.py
index f6139b0f4..16dc58807 100644
--- a/swift/common/utils.py
+++ b/swift/common/utils/__init__.py
@@ -26,7 +26,6 @@ import fcntl
import grp
import hashlib
import json
-import math
import operator
import os
import pwd
@@ -37,12 +36,9 @@ import sys
import time
import uuid
import functools
-import platform
import email.parser
from random import random, shuffle
from contextlib import contextmanager, closing
-import ctypes
-import ctypes.util
from optparse import OptionParser
import traceback
import warnings
@@ -96,7 +92,32 @@ from swift.common.header_key_dict import HeaderKeyDict
from swift.common.linkat import linkat
# For backwards compatability with 3rd party middlewares
-from swift.common.registry import register_swift_info, get_swift_info # noqa
+from swift.common.registry import register_swift_info, get_swift_info # noqa
+from swift.common.utils.libc import ( # noqa
+ F_SETPIPE_SZ,
+ load_libc_function,
+ config_fallocate_value,
+ disable_fallocate,
+ fallocate,
+ punch_hole,
+ drop_buffer_cache,
+ get_md5_socket,
+ modify_priority,
+)
+from swift.common.utils.timestamp import ( # noqa
+ NORMAL_FORMAT,
+ INTERNAL_FORMAT,
+ SHORT_FORMAT,
+ MAX_OFFSET,
+ PRECISION,
+ Timestamp,
+ encode_timestamps,
+ decode_timestamps,
+ normalize_timestamp,
+ EPOCH,
+ last_modified_date_to_timestamp,
+ normalize_delete_at_timestamp,
+)
# logging doesn't import patched as cleanly as one would like
from logging.handlers import SysLogHandler
@@ -109,79 +130,6 @@ NOTICE = 25
logging.addLevelName(NOTICE, 'NOTICE')
SysLogHandler.priority_map['NOTICE'] = 'notice'
-# These are lazily pulled from libc elsewhere
-_sys_fallocate = None
-_posix_fadvise = None
-_libc_socket = None
-_libc_bind = None
-_libc_accept = None
-# see man -s 2 setpriority
-_libc_setpriority = None
-# see man -s 2 syscall
-_posix_syscall = None
-
-# If set to non-zero, fallocate routines will fail based on free space
-# available being at or below this amount, in bytes.
-FALLOCATE_RESERVE = 0
-# Indicates if FALLOCATE_RESERVE is the percentage of free space (True) or
-# the number of bytes (False).
-FALLOCATE_IS_PERCENT = False
-
-# from /usr/include/linux/falloc.h
-FALLOC_FL_KEEP_SIZE = 1
-FALLOC_FL_PUNCH_HOLE = 2
-
-# from /usr/src/linux-headers-*/include/uapi/linux/resource.h
-PRIO_PROCESS = 0
-
-
-# /usr/include/x86_64-linux-gnu/asm/unistd_64.h defines syscalls there
-# are many like it, but this one is mine, see man -s 2 ioprio_set
-def NR_ioprio_set():
- """Give __NR_ioprio_set value for your system."""
- architecture = os.uname()[4]
- arch_bits = platform.architecture()[0]
- # check if supported system, now support x86_64 and AArch64
- if architecture == 'x86_64' and arch_bits == '64bit':
- return 251
- elif architecture == 'aarch64' and arch_bits == '64bit':
- return 30
- raise OSError("Swift doesn't support ionice priority for %s %s" %
- (architecture, arch_bits))
-
-
-# this syscall integer probably only works on x86_64 linux systems, you
-# can check if it's correct on yours with something like this:
-"""
-#include <stdio.h>
-#include <sys/syscall.h>
-
-int main(int argc, const char* argv[]) {
- printf("%d\n", __NR_ioprio_set);
- return 0;
-}
-"""
-
-# this is the value for "which" that says our who value will be a pid
-# pulled out of /usr/src/linux-headers-*/include/linux/ioprio.h
-IOPRIO_WHO_PROCESS = 1
-
-
-IO_CLASS_ENUM = {
- 'IOPRIO_CLASS_RT': 1,
- 'IOPRIO_CLASS_BE': 2,
- 'IOPRIO_CLASS_IDLE': 3,
-}
-
-# the IOPRIO_PRIO_VALUE "macro" is also pulled from
-# /usr/src/linux-headers-*/include/linux/ioprio.h
-IOPRIO_CLASS_SHIFT = 13
-
-
-def IOPRIO_PRIO_VALUE(class_, data):
- return (((class_) << IOPRIO_CLASS_SHIFT) | data)
-
-
# Used by hash_path to offer a bit more security when generating hashes for
# paths. It simply appends this value to all paths; guessing the hash a path
# will end up with would also require knowing this suffix.
@@ -190,12 +138,6 @@ HASH_PATH_PREFIX = b''
SWIFT_CONF_FILE = '/etc/swift/swift.conf'
-# These constants are Linux-specific, and Python doesn't seem to know
-# about them. We ask anyway just in case that ever gets fixed.
-#
-# The values were copied from the Linux 3.x kernel headers.
-AF_ALG = getattr(socket, 'AF_ALG', 38)
-F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
O_TMPFILE = getattr(os, 'O_TMPFILE', 0o20000000 | os.O_DIRECTORY)
# Used by the parse_socket_string() function to validate IPv6 addresses
@@ -515,10 +457,6 @@ def eventlet_monkey_patch():
logging.logThreads = 0
-def noop_libc_function(*args):
- return 0
-
-
def validate_configuration():
try:
validate_hash_conf()
@@ -526,39 +464,6 @@ def validate_configuration():
sys.exit("Error: %s" % e)
-def load_libc_function(func_name, log_error=True,
- fail_if_missing=False, errcheck=False):
- """
- Attempt to find the function in libc, otherwise return a no-op func.
-
- :param func_name: name of the function to pull from libc.
- :param log_error: log an error when a function can't be found
- :param fail_if_missing: raise an exception when a function can't be found.
- Default behavior is to return a no-op function.
- :param errcheck: boolean, if true install a wrapper on the function
- to check for a return values of -1 and call
- ctype.get_errno and raise an OSError
- """
- try:
- libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
- func = getattr(libc, func_name)
- except AttributeError:
- if fail_if_missing:
- raise
- if log_error:
- logging.warning(_("Unable to locate %s in libc. Leaving as a "
- "no-op."), func_name)
- return noop_libc_function
- if errcheck:
- def _errcheck(result, f, args):
- if result == -1:
- errcode = ctypes.get_errno()
- raise OSError(errcode, os.strerror(errcode))
- return result
- func.errcheck = _errcheck
- return func
-
-
def generate_trans_id(trans_id_suffix):
return 'tx%s-%010x%s' % (
uuid.uuid4().hex[:21], int(time.time()), quote(trans_id_suffix))
@@ -585,6 +490,7 @@ class _UTC(datetime.tzinfo):
"""
A tzinfo class for datetime objects that returns a 0 timedelta (UTC time)
"""
+
def dst(self, dt):
return datetime.timedelta(0)
utcoffset = dst
@@ -754,25 +660,6 @@ def get_trans_id_time(trans_id):
return None
-def config_fallocate_value(reserve_value):
- """
- Returns fallocate reserve_value as an int or float.
- Returns is_percent as a boolean.
- Returns a ValueError on invalid fallocate value.
- """
- try:
- if str(reserve_value[-1:]) == '%':
- reserve_value = float(reserve_value[:-1])
- is_percent = True
- else:
- reserve_value = int(reserve_value)
- is_percent = False
- except ValueError:
- raise ValueError('Error: %s is an invalid value for fallocate'
- '_reserve.' % reserve_value)
- return reserve_value, is_percent
-
-
class FileLikeIter(object):
def __init__(self, iterable):
@@ -923,163 +810,6 @@ def fs_has_free_space(fs_path, space_needed, is_percent):
return free_bytes >= space_needed
-class _LibcWrapper(object):
- """
- A callable object that forwards its calls to a C function from libc.
-
- These objects are lazy. libc will not be checked until someone tries to
- either call the function or check its availability.
-
- _LibcWrapper objects have an "available" property; if true, then libc
- has the function of that name. If false, then calls will fail with a
- NotImplementedError.
- """
- def __init__(self, func_name):
- self._func_name = func_name
- self._func_handle = None
- self._loaded = False
-
- def _ensure_loaded(self):
- if not self._loaded:
- func_name = self._func_name
- try:
- # Keep everything in this try-block in local variables so
- # that a typo in self.some_attribute_name doesn't raise a
- # spurious AttributeError.
- func_handle = load_libc_function(
- func_name, fail_if_missing=True)
- self._func_handle = func_handle
- except AttributeError:
- # We pass fail_if_missing=True to load_libc_function and
- # then ignore the error. It's weird, but otherwise we have
- # to check if self._func_handle is noop_libc_function, and
- # that's even weirder.
- pass
- self._loaded = True
-
- @property
- def available(self):
- self._ensure_loaded()
- return bool(self._func_handle)
-
- def __call__(self, *args):
- if self.available:
- return self._func_handle(*args)
- else:
- raise NotImplementedError(
- "No function %r found in libc" % self._func_name)
-
-
-_fallocate_enabled = True
-_fallocate_warned_about_missing = False
-_sys_fallocate = _LibcWrapper('fallocate')
-_sys_posix_fallocate = _LibcWrapper('posix_fallocate')
-
-
-def disable_fallocate():
- global _fallocate_enabled
- _fallocate_enabled = False
-
-
-def fallocate(fd, size, offset=0):
- """
- Pre-allocate disk space for a file.
-
- This function can be disabled by calling disable_fallocate(). If no
- suitable C function is available in libc, this function is a no-op.
-
- :param fd: file descriptor
- :param size: size to allocate (in bytes)
- """
- global _fallocate_enabled
- if not _fallocate_enabled:
- return
-
- if size < 0:
- size = 0 # Done historically; not really sure why
- if size >= (1 << 63):
- raise ValueError('size must be less than 2 ** 63')
- if offset < 0:
- raise ValueError('offset must be non-negative')
- if offset >= (1 << 63):
- raise ValueError('offset must be less than 2 ** 63')
-
- # Make sure there's some (configurable) amount of free space in
- # addition to the number of bytes we're allocating.
- if FALLOCATE_RESERVE:
- st = os.fstatvfs(fd)
- free = st.f_frsize * st.f_bavail - size
- if FALLOCATE_IS_PERCENT:
- free = (float(free) / float(st.f_frsize * st.f_blocks)) * 100
- if float(free) <= float(FALLOCATE_RESERVE):
- raise OSError(
- errno.ENOSPC,
- 'FALLOCATE_RESERVE fail %g <= %g' %
- (free, FALLOCATE_RESERVE))
-
- if _sys_fallocate.available:
- # Parameters are (fd, mode, offset, length).
- #
- # mode=FALLOC_FL_KEEP_SIZE pre-allocates invisibly (without
- # affecting the reported file size).
- ret = _sys_fallocate(
- fd, FALLOC_FL_KEEP_SIZE, ctypes.c_uint64(offset),
- ctypes.c_uint64(size))
- err = ctypes.get_errno()
- elif _sys_posix_fallocate.available:
- # Parameters are (fd, offset, length).
- ret = _sys_posix_fallocate(fd, ctypes.c_uint64(offset),
- ctypes.c_uint64(size))
- err = ctypes.get_errno()
- else:
- # No suitable fallocate-like function is in our libc. Warn about it,
- # but just once per process, and then do nothing.
- global _fallocate_warned_about_missing
- if not _fallocate_warned_about_missing:
- logging.warning(_("Unable to locate fallocate, posix_fallocate in "
- "libc. Leaving as a no-op."))
- _fallocate_warned_about_missing = True
- return
-
- if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP,
- errno.EINVAL):
- raise OSError(err, 'Unable to fallocate(%s)' % size)
-
-
-def punch_hole(fd, offset, length):
- """
- De-allocate disk space in the middle of a file.
-
- :param fd: file descriptor
- :param offset: index of first byte to de-allocate
- :param length: number of bytes to de-allocate
- """
- if offset < 0:
- raise ValueError('offset must be non-negative')
- if offset >= (1 << 63):
- raise ValueError('offset must be less than 2 ** 63')
- if length <= 0:
- raise ValueError('length must be positive')
- if length >= (1 << 63):
- raise ValueError('length must be less than 2 ** 63')
-
- if _sys_fallocate.available:
- # Parameters are (fd, mode, offset, length).
- ret = _sys_fallocate(
- fd,
- FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
- ctypes.c_uint64(offset),
- ctypes.c_uint64(length))
- err = ctypes.get_errno()
- if ret and err:
- mode_str = "FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE"
- raise OSError(err, "Unable to fallocate(%d, %s, %d, %d)" % (
- fd, mode_str, offset, length))
- else:
- raise OSError(errno.ENOTSUP,
- 'No suitable C function found for hole punching')
-
-
def fsync(fd):
"""
Sync modified file data and metadata to disk.
@@ -1129,402 +859,6 @@ def fsync_dir(dirpath):
os.close(dirfd)
-def drop_buffer_cache(fd, offset, length):
- """
- Drop 'buffer' cache for the given range of the given file.
-
- :param fd: file descriptor
- :param offset: start offset
- :param length: length
- """
- global _posix_fadvise
- if _posix_fadvise is None:
- _posix_fadvise = load_libc_function('posix_fadvise64')
- # 4 means "POSIX_FADV_DONTNEED"
- ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
- ctypes.c_uint64(length), 4)
- if ret != 0:
- logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
- "-> %(ret)s", {'fd': fd, 'offset': offset,
- 'length': length, 'ret': ret})
-
-
-NORMAL_FORMAT = "%016.05f"
-INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
-SHORT_FORMAT = NORMAL_FORMAT + '_%x'
-MAX_OFFSET = (16 ** 16) - 1
-PRECISION = 1e-5
-# Setting this to True will cause the internal format to always display
-# extended digits - even when the value is equivalent to the normalized form.
-# This isn't ideal during an upgrade when some servers might not understand
-# the new time format - but flipping it to True works great for testing.
-FORCE_INTERNAL = False # or True
-
-
-@functools.total_ordering
-class Timestamp(object):
- """
- Internal Representation of Swift Time.
-
- The normalized form of the X-Timestamp header looks like a float
- with a fixed width to ensure stable string sorting - normalized
- timestamps look like "1402464677.04188"
-
- To support overwrites of existing data without modifying the original
- timestamp but still maintain consistency a second internal offset vector
- is append to the normalized timestamp form which compares and sorts
- greater than the fixed width float format but less than a newer timestamp.
- The internalized format of timestamps looks like
- "1402464677.04188_0000000000000000" - the portion after the underscore is
- the offset and is a formatted hexadecimal integer.
-
- The internalized form is not exposed to clients in responses from
- Swift. Normal client operations will not create a timestamp with an
- offset.
-
- The Timestamp class in common.utils supports internalized and
- normalized formatting of timestamps and also comparison of timestamp
- values. When the offset value of a Timestamp is 0 - it's considered
- insignificant and need not be represented in the string format; to
- support backwards compatibility during a Swift upgrade the
- internalized and normalized form of a Timestamp with an
- insignificant offset are identical. When a timestamp includes an
- offset it will always be represented in the internalized form, but
- is still excluded from the normalized form. Timestamps with an
- equivalent timestamp portion (the float part) will compare and order
- by their offset. Timestamps with a greater timestamp portion will
- always compare and order greater than a Timestamp with a lesser
- timestamp regardless of it's offset. String comparison and ordering
- is guaranteed for the internalized string format, and is backwards
- compatible for normalized timestamps which do not include an offset.
- """
-
- def __init__(self, timestamp, offset=0, delta=0, check_bounds=True):
- """
- Create a new Timestamp.
-
- :param timestamp: time in seconds since the Epoch, may be any of:
-
- * a float or integer
- * normalized/internalized string
- * another instance of this class (offset is preserved)
-
- :param offset: the second internal offset vector, an int
- :param delta: deca-microsecond difference from the base timestamp
- param, an int
- """
- if isinstance(timestamp, bytes):
- timestamp = timestamp.decode('ascii')
- if isinstance(timestamp, six.string_types):
- base, base_offset = timestamp.partition('_')[::2]
- self.timestamp = float(base)
- if '_' in base_offset:
- raise ValueError('invalid literal for int() with base 16: '
- '%r' % base_offset)
- if base_offset:
- self.offset = int(base_offset, 16)
- else:
- self.offset = 0
- else:
- self.timestamp = float(timestamp)
- self.offset = getattr(timestamp, 'offset', 0)
- # increment offset
- if offset >= 0:
- self.offset += offset
- else:
- raise ValueError('offset must be non-negative')
- if self.offset > MAX_OFFSET:
- raise ValueError('offset must be smaller than %d' % MAX_OFFSET)
- self.raw = int(round(self.timestamp / PRECISION))
- # add delta
- if delta:
- self.raw = self.raw + delta
- if self.raw <= 0:
- raise ValueError(
- 'delta must be greater than %d' % (-1 * self.raw))
- self.timestamp = float(self.raw * PRECISION)
- if check_bounds:
- if self.timestamp < 0:
- raise ValueError('timestamp cannot be negative')
- if self.timestamp >= 10000000000:
- raise ValueError('timestamp too large')
-
- @classmethod
- def now(cls, offset=0, delta=0):
- return cls(time.time(), offset=offset, delta=delta)
-
- def __repr__(self):
- return INTERNAL_FORMAT % (self.timestamp, self.offset)
-
- def __str__(self):
- raise TypeError('You must specify which string format is required')
-
- def __float__(self):
- return self.timestamp
-
- def __int__(self):
- return int(self.timestamp)
-
- def __nonzero__(self):
- return bool(self.timestamp or self.offset)
-
- def __bool__(self):
- return self.__nonzero__()
-
- @property
- def normal(self):
- return NORMAL_FORMAT % self.timestamp
-
- @property
- def internal(self):
- if self.offset or FORCE_INTERNAL:
- return INTERNAL_FORMAT % (self.timestamp, self.offset)
- else:
- return self.normal
-
- @property
- def short(self):
- if self.offset or FORCE_INTERNAL:
- return SHORT_FORMAT % (self.timestamp, self.offset)
- else:
- return self.normal
-
- @property
- def isoformat(self):
- """
- Get an isoformat string representation of the 'normal' part of the
- Timestamp with microsecond precision and no trailing timezone, for
- example::
-
- 1970-01-01T00:00:00.000000
-
- :return: an isoformat string
- """
- t = float(self.normal)
- if six.PY3:
- # On Python 3, round manually using ROUND_HALF_EVEN rounding
- # method, to use the same rounding method than Python 2. Python 3
- # used a different rounding method, but Python 3.4.4 and 3.5.1 use
- # again ROUND_HALF_EVEN as Python 2.
- # See https://bugs.python.org/issue23517
- frac, t = math.modf(t)
- us = round(frac * 1e6)
- if us >= 1000000:
- t += 1
- us -= 1000000
- elif us < 0:
- t -= 1
- us += 1000000
- dt = datetime.datetime.utcfromtimestamp(t)
- dt = dt.replace(microsecond=us)
- else:
- dt = datetime.datetime.utcfromtimestamp(t)
-
- isoformat = dt.isoformat()
- # python isoformat() doesn't include msecs when zero
- if len(isoformat) < len("1970-01-01T00:00:00.000000"):
- isoformat += ".000000"
- return isoformat
-
- @classmethod
- def from_isoformat(cls, date_string):
- """
- Parse an isoformat string representation of time to a Timestamp object.
-
- :param date_string: a string formatted as per an Timestamp.isoformat
- property.
- :return: an instance of this class.
- """
- start = datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%f")
- delta = start - EPOCH
- # This calculation is based on Python 2.7's Modules/datetimemodule.c,
- # function delta_to_microseconds(), but written in Python.
- return cls(delta.total_seconds())
-
- def ceil(self):
- """
- Return the 'normal' part of the timestamp rounded up to the nearest
- integer number of seconds.
-
- This value should be used whenever the second-precision Last-Modified
- time of a resource is required.
-
- :return: a float value with second precision.
- """
- return math.ceil(float(self))
-
- def __eq__(self, other):
- if other is None:
- return False
- if not isinstance(other, Timestamp):
- try:
- other = Timestamp(other, check_bounds=False)
- except ValueError:
- return False
- return self.internal == other.internal
-
- def __ne__(self, other):
- return not (self == other)
-
- def __lt__(self, other):
- if other is None:
- return False
- if not isinstance(other, Timestamp):
- other = Timestamp(other, check_bounds=False)
- if other.timestamp < 0:
- return False
- if other.timestamp >= 10000000000:
- return True
- return self.internal < other.internal
-
- def __hash__(self):
- return hash(self.internal)
-
- def __invert__(self):
- if self.offset:
- raise ValueError('Cannot invert timestamps with offsets')
- return Timestamp((999999999999999 - self.raw) * PRECISION)
-
-
-def encode_timestamps(t1, t2=None, t3=None, explicit=False):
- """
- Encode up to three timestamps into a string. Unlike a Timestamp object, the
- encoded string does NOT used fixed width fields and consequently no
- relative chronology of the timestamps can be inferred from lexicographic
- sorting of encoded timestamp strings.
-
- The format of the encoded string is:
- <t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
-
- i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
- otherwise the time offsets for t2 and t3 are appended. If explicit is True
- then the offsets for t2 and t3 are always appended even if zero.
-
- Note: any offset value in t1 will be preserved, but offsets on t2 and t3
- are not preserved. In the anticipated use cases for this method (and the
- inverse decode_timestamps method) the timestamps passed as t2 and t3 are
- not expected to have offsets as they will be timestamps associated with a
- POST request. In the case where the encoding is used in a container objects
- table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
- content type and metadata times (if different from the data file) i.e.
- correspond to POST timestamps. In the case where the encoded form is used
- in a .meta file name, t1 and t2 both correspond to POST timestamps.
- """
- form = '{0}'
- values = [t1.short]
- if t2 is not None:
- t2_t1_delta = t2.raw - t1.raw
- explicit = explicit or (t2_t1_delta != 0)
- values.append(t2_t1_delta)
- if t3 is not None:
- t3_t2_delta = t3.raw - t2.raw
- explicit = explicit or (t3_t2_delta != 0)
- values.append(t3_t2_delta)
- if explicit:
- form += '{1:+x}'
- if t3 is not None:
- form += '{2:+x}'
- return form.format(*values)
-
-
-def decode_timestamps(encoded, explicit=False):
- """
- Parses a string of the form generated by encode_timestamps and returns
- a tuple of the three component timestamps. If explicit is False, component
- timestamps that are not explicitly encoded will be assumed to have zero
- delta from the previous component and therefore take the value of the
- previous component. If explicit is True, component timestamps that are
- not explicitly encoded will be returned with value None.
- """
- # TODO: some tests, e.g. in test_replicator, put float timestamps values
- # into container db's, hence this defensive check, but in real world
- # this may never happen.
- if not isinstance(encoded, six.string_types):
- ts = Timestamp(encoded)
- return ts, ts, ts
-
- parts = []
- signs = []
- pos_parts = encoded.split('+')
- for part in pos_parts:
- # parse time components and their signs
- # e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1]
- neg_parts = part.split('-')
- parts = parts + neg_parts
- signs = signs + [1] + [-1] * (len(neg_parts) - 1)
- t1 = Timestamp(parts[0])
- t2 = t3 = None
- if len(parts) > 1:
- t2 = t1
- delta = signs[1] * int(parts[1], 16)
- # if delta = 0 we want t2 = t3 = t1 in order to
- # preserve any offset in t1 - only construct a distinct
- # timestamp if there is a non-zero delta.
- if delta:
- t2 = Timestamp((t1.raw + delta) * PRECISION)
- elif not explicit:
- t2 = t1
- if len(parts) > 2:
- t3 = t2
- delta = signs[2] * int(parts[2], 16)
- if delta:
- t3 = Timestamp((t2.raw + delta) * PRECISION)
- elif not explicit:
- t3 = t2
- return t1, t2, t3
-
-
-def normalize_timestamp(timestamp):
- """
- Format a timestamp (string or numeric) into a standardized
- xxxxxxxxxx.xxxxx (10.5) format.
-
- Note that timestamps using values greater than or equal to November 20th,
- 2286 at 17:46 UTC will use 11 digits to represent the number of
- seconds.
-
- :param timestamp: unix timestamp
- :returns: normalized timestamp as a string
- """
- return Timestamp(timestamp).normal
-
-
-EPOCH = datetime.datetime(1970, 1, 1)
-
-
-def last_modified_date_to_timestamp(last_modified_date_str):
- """
- Convert a last modified date (like you'd get from a container listing,
- e.g. 2014-02-28T23:22:36.698390) to a float.
- """
- return Timestamp.from_isoformat(last_modified_date_str)
-
-
-def normalize_delete_at_timestamp(timestamp, high_precision=False):
- """
- Format a timestamp (string or numeric) into a standardized
- xxxxxxxxxx (10) or xxxxxxxxxx.xxxxx (10.5) format.
-
- Note that timestamps less than 0000000000 are raised to
- 0000000000 and values greater than November 20th, 2286 at
- 17:46:39 UTC will be capped at that date and time, resulting in
- no return value exceeding 9999999999.99999 (or 9999999999 if
- using low-precision).
-
- This cap is because the expirer is already working through a
- sorted list of strings that were all a length of 10. Adding
- another digit would mess up the sort and cause the expirer to
- break from processing early. By 2286, this problem will need to
- be fixed, probably by creating an additional .expiring_objects
- account to work from with 11 (or more) digit container names.
-
- :param timestamp: unix timestamp
- :returns: normalized timestamp as a string
- """
- fmt = '%016.5f' if high_precision else '%010d'
- return fmt % min(max(0, float(timestamp)), 9999999999.99999)
-
-
def mkdirs(path):
"""
Ensures the path is a directory or makes it if not. Errors if the path
@@ -1715,6 +1049,7 @@ class RateLimitedIterator(object):
this many elements; default is 0 (rate limit
immediately)
"""
+
def __init__(self, iterable, elements_per_second, limit_after=0,
ratelimit_if=lambda _junk: True):
self.iterator = iter(iterable)
@@ -1749,6 +1084,7 @@ class GreenthreadSafeIterator(object):
an error like "ValueError: generator already executing". By wrapping calls
to next() with a mutex, we avoid that error.
"""
+
def __init__(self, unsafe_iterable):
self.unsafe_iter = iter(unsafe_iterable)
self.semaphore = eventlet.semaphore.Semaphore(value=1)
@@ -2068,6 +1404,12 @@ class SwiftLoggerAdapter(logging.LoggerAdapter):
Like logging.LoggerAdapter, you have to subclass this and override the
process() method to accomplish anything useful.
"""
+
+ @property
+ def name(self):
+ # py3 does this for us already; add it for py2
+ return self.logger.name
+
def get_metric_name(self, metric):
# subclasses may override this method to annotate the metric name
return metric
@@ -2110,6 +1452,7 @@ class PrefixLoggerAdapter(SwiftLoggerAdapter):
Adds an optional prefix to all its log messages. When the prefix has not
been set, messages are unchanged.
"""
+
def set_prefix(self, prefix):
self.extra['prefix'] = prefix
@@ -2129,6 +1472,7 @@ class MetricsPrefixLoggerAdapter(SwiftLoggerAdapter):
"""
Adds a prefix to all Statsd metrics' names.
"""
+
def __init__(self, logger, extra, metric_prefix):
"""
:param logger: an instance of logging.Logger
@@ -2382,6 +1726,7 @@ class LogLevelFilter(object):
(DEBUG < INFO < WARN < ERROR < CRITICAL|FATAL)
Default: DEBUG
"""
+
def __init__(self, level=logging.DEBUG):
self.level = level
@@ -3682,6 +3027,7 @@ class GreenAsyncPile(object):
Correlating results with jobs (if necessary) is left to the caller.
"""
+
def __init__(self, size_or_pool):
"""
:param size_or_pool: thread pool size or a pool to use
@@ -3775,6 +3121,7 @@ class StreamingPile(GreenAsyncPile):
When used as a context manager, has the same worker-killing properties as
:class:`ContextPool`.
"""
+
def __init__(self, size):
""":param size: number of worker threads to use"""
self.pool = ContextPool(size)
@@ -4266,6 +3613,7 @@ class Everything(object):
A container that contains everything. If "e" is an instance of
Everything, then "x in e" is true for all x.
"""
+
def __contains__(self, element):
return True
@@ -4297,6 +3645,7 @@ class CloseableChain(object):
Like itertools.chain, but with a close method that will attempt to invoke
its sub-iterators' close methods, if any.
"""
+
def __init__(self, *iterables):
self.iterables = iterables
self.chained_iter = itertools.chain(*self.iterables)
@@ -4340,6 +3689,7 @@ class InputProxy(object):
File-like object that counts bytes read.
To be swapped in for wsgi.input for accounting purposes.
"""
+
def __init__(self, wsgi_input):
"""
:param wsgi_input: file-like object to wrap the functionality of
@@ -4481,6 +3831,7 @@ class Spliterator(object):
"l" # shorter than requested; this can happen with the last iterator
"""
+
def __init__(self, source_iterable):
self.input_iterator = iter(source_iterable)
self.leftovers = None
@@ -5093,87 +4444,6 @@ def parse_content_disposition(header):
return header, attributes
-class sockaddr_alg(ctypes.Structure):
- _fields_ = [("salg_family", ctypes.c_ushort),
- ("salg_type", ctypes.c_ubyte * 14),
- ("salg_feat", ctypes.c_uint),
- ("salg_mask", ctypes.c_uint),
- ("salg_name", ctypes.c_ubyte * 64)]
-
-
-_bound_md5_sockfd = None
-
-
-def get_md5_socket():
- """
- Get an MD5 socket file descriptor. One can MD5 data with it by writing it
- to the socket with os.write, then os.read the 16 bytes of the checksum out
- later.
-
- NOTE: It is the caller's responsibility to ensure that os.close() is
- called on the returned file descriptor. This is a bare file descriptor,
- not a Python object. It doesn't close itself.
- """
-
- # Linux's AF_ALG sockets work like this:
- #
- # First, initialize a socket with socket() and bind(). This tells the
- # socket what algorithm to use, as well as setting up any necessary bits
- # like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
- # algorithm name.
- #
- # Second, to hash some data, get a second socket by calling accept() on
- # the first socket. Write data to the socket, then when finished, read the
- # checksum from the socket and close it. This lets you checksum multiple
- # things without repeating all the setup code each time.
- #
- # Since we only need to bind() one socket, we do that here and save it for
- # future re-use. That way, we only use one file descriptor to get an MD5
- # socket instead of two, and we also get to save some syscalls.
-
- global _bound_md5_sockfd
- global _libc_socket
- global _libc_bind
- global _libc_accept
-
- if _libc_accept is None:
- _libc_accept = load_libc_function('accept', fail_if_missing=True)
- if _libc_socket is None:
- _libc_socket = load_libc_function('socket', fail_if_missing=True)
- if _libc_bind is None:
- _libc_bind = load_libc_function('bind', fail_if_missing=True)
-
- # Do this at first call rather than at import time so that we don't use a
- # file descriptor on systems that aren't using any MD5 sockets.
- if _bound_md5_sockfd is None:
- sockaddr_setup = sockaddr_alg(
- AF_ALG,
- (ord('h'), ord('a'), ord('s'), ord('h'), 0),
- 0, 0,
- (ord('m'), ord('d'), ord('5'), 0))
- hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
- ctypes.c_int(socket.SOCK_SEQPACKET),
- ctypes.c_int(0))
- if hash_sockfd < 0:
- raise IOError(ctypes.get_errno(),
- "Failed to initialize MD5 socket")
-
- bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
- ctypes.pointer(sockaddr_setup),
- ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
- if bind_result < 0:
- os.close(hash_sockfd)
- raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
-
- _bound_md5_sockfd = hash_sockfd
-
- md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
- if md5_sockfd < 0:
- raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
-
- return md5_sockfd
-
-
try:
_test_md5 = hashlib.md5(usedforsecurity=False) # nosec
@@ -5195,19 +4465,19 @@ except TypeError:
return hashlib.md5(string) # nosec
-class ShardRangeOuterBound(object):
+class NamespaceOuterBound(object):
"""
A custom singleton type to be subclassed for the outer bounds of
- ShardRanges.
+ Namespaces.
"""
_singleton = None
def __new__(cls):
- if cls is ShardRangeOuterBound:
- raise TypeError('ShardRangeOuterBound is an abstract class; '
+ if cls is NamespaceOuterBound:
+ raise TypeError('NamespaceOuterBound is an abstract class; '
'only subclasses should be instantiated')
if cls._singleton is None:
- cls._singleton = super(ShardRangeOuterBound, cls).__new__(cls)
+ cls._singleton = super(NamespaceOuterBound, cls).__new__(cls)
return cls._singleton
def __str__(self):
@@ -5222,6 +4492,280 @@ class ShardRangeOuterBound(object):
__nonzero__ = __bool__
+@functools.total_ordering
+class Namespace(object):
+
+ __slots__ = ('_lower', '_upper', 'name')
+
+ @functools.total_ordering
+ class MaxBound(NamespaceOuterBound):
+ # singleton for maximum bound
+ def __ge__(self, other):
+ return True
+
+ @functools.total_ordering
+ class MinBound(NamespaceOuterBound):
+ # singleton for minimum bound
+ def __le__(self, other):
+ return True
+
+ MIN = MinBound()
+ MAX = MaxBound()
+
+ def __init__(self, name, lower, upper):
+ self._lower = Namespace.MIN
+ self._upper = Namespace.MAX
+ self.lower = lower
+ self.upper = upper
+ self.name = name
+
+ def __iter__(self):
+ yield 'name', str(self.name)
+ yield 'lower', self.lower_str
+ yield 'upper', self.upper_str
+
+ def __repr__(self):
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(
+ '%s=%r' % prop for prop in self))
+
+ def __lt__(self, other):
+ # a Namespace is less than other if its entire namespace is less than
+ # other; if other is another Namespace that implies that this
+ # Namespace's upper must be less than or equal to the other
+ # Namespace's lower
+ if self.upper == Namespace.MAX:
+ return False
+ if isinstance(other, Namespace):
+ return self.upper <= other.lower
+ elif other is None:
+ return True
+ else:
+ return self.upper < self._encode(other)
+
+ def __gt__(self, other):
+ # a Namespace is greater than other if its entire namespace is greater
+ # than other; if other is another Namespace that implies that this
+ # Namespace's lower must be less greater than or equal to the other
+ # Namespace's upper
+ if self.lower == Namespace.MIN:
+ return False
+ if isinstance(other, Namespace):
+ return self.lower >= other.upper
+ elif other is None:
+ return False
+ else:
+ return self.lower >= self._encode(other)
+
+ def __eq__(self, other):
+ # test for equality of range bounds only
+ if not isinstance(other, Namespace):
+ return False
+ return self.lower == other.lower and self.upper == other.upper
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __contains__(self, item):
+ # test if the given item is within the namespace
+ if item == '':
+ return False
+ item = self._encode_bound(item)
+ return self.lower < item <= self.upper
+
+ @classmethod
+ def _encode(cls, value):
+ if six.PY2 and isinstance(value, six.text_type):
+ return value.encode('utf-8')
+ if six.PY3 and isinstance(value, six.binary_type):
+ # This should never fail -- the value should always be coming from
+ # valid swift paths, which means UTF-8
+ return value.decode('utf-8')
+ return value
+
+ def _encode_bound(self, bound):
+ if isinstance(bound, NamespaceOuterBound):
+ return bound
+ if not (isinstance(bound, six.text_type) or
+ isinstance(bound, six.binary_type)):
+ raise TypeError('must be a string type')
+ return self._encode(bound)
+
+ @property
+ def lower(self):
+ return self._lower
+
+ @property
+ def lower_str(self):
+ return str(self.lower)
+
+ @lower.setter
+ def lower(self, value):
+ if value is None or (value == b"" if isinstance(value, bytes) else
+ value == u""):
+ value = Namespace.MIN
+ try:
+ value = self._encode_bound(value)
+ except TypeError as err:
+ raise TypeError('lower %s' % err)
+ if value > self._upper:
+ raise ValueError(
+ 'lower (%r) must be less than or equal to upper (%r)' %
+ (value, self.upper))
+ self._lower = value
+
+ @property
+ def upper(self):
+ return self._upper
+
+ @property
+ def upper_str(self):
+ return str(self.upper)
+
+ @upper.setter
+ def upper(self, value):
+ if value is None or (value == b"" if isinstance(value, bytes) else
+ value == u""):
+ value = Namespace.MAX
+ try:
+ value = self._encode_bound(value)
+ except TypeError as err:
+ raise TypeError('upper %s' % err)
+ if value < self._lower:
+ raise ValueError(
+ 'upper (%r) must be greater than or equal to lower (%r)' %
+ (value, self.lower))
+ self._upper = value
+
+ @property
+ def end_marker(self):
+ return self.upper_str + '\x00' if self.upper else ''
+
+ def entire_namespace(self):
+ """
+ Returns True if this namespace includes the entire namespace, False
+ otherwise.
+ """
+ return (self.lower == Namespace.MIN and
+ self.upper == Namespace.MAX)
+
+ def overlaps(self, other):
+ """
+ Returns True if this namespace overlaps with the other namespace.
+
+ :param other: an instance of :class:`~swift.common.utils.Namespace`
+ """
+ if not isinstance(other, Namespace):
+ return False
+ return max(self.lower, other.lower) < min(self.upper, other.upper)
+
+ def includes(self, other):
+ """
+ Returns True if this namespace includes the whole of the other
+ namespace, False otherwise.
+
+ :param other: an instance of :class:`~swift.common.utils.Namespace`
+ """
+ return (self.lower <= other.lower) and (other.upper <= self.upper)
+
+ def expand(self, donors):
+ """
+ Expands the bounds as necessary to match the minimum and maximum bounds
+ of the given donors.
+
+ :param donors: A list of :class:`~swift.common.utils.Namespace`
+ :return: True if the bounds have been modified, False otherwise.
+ """
+ modified = False
+ new_lower = self.lower
+ new_upper = self.upper
+ for donor in donors:
+ new_lower = min(new_lower, donor.lower)
+ new_upper = max(new_upper, donor.upper)
+ if self.lower > new_lower or self.upper < new_upper:
+ self.lower = new_lower
+ self.upper = new_upper
+ modified = True
+ return modified
+
+
+class NamespaceBoundList(object):
+ def __init__(self, bounds):
+ """
+ Encapsulate a compact representation of namespaces. Each item in the
+ list is a list [lower bound, name].
+
+ :param bounds: a list of lists ``[lower bound, name]``. The list
+ should be ordered by ``lower bound``.
+ """
+ self.bounds = [] if bounds is None else bounds
+
+ @classmethod
+ def parse(cls, namespaces):
+ """
+ Create a NamespaceBoundList object by parsing a list of Namespaces or
+ shard ranges and only storing the compact bounds list.
+
+ Each Namespace in the given list of ``namespaces`` provides the next
+ [lower bound, name] list to append to the NamespaceBoundList. The
+ given ``namespaces`` should be contiguous because the
+ NamespaceBoundList only stores lower bounds; if ``namespaces`` has
+ overlaps then at least one of the overlapping namespaces may be
+ ignored; similarly, gaps between namespaces are not represented in the
+ NamespaceBoundList.
+
+ :param namespaces: A list of Namespace instances. The list should be
+ ordered by namespace bounds.
+ :return: a NamespaceBoundList.
+ """
+ if not namespaces:
+ return None
+ bounds = []
+ upper = namespaces[0].lower
+ for ns in namespaces:
+ if ns.lower < upper:
+ # Discard overlapping namespace.
+ # Overlapping namespaces are expected in lists of shard ranges
+ # fetched from the backend. For example, while a parent
+ # container is in the process of sharding, the parent shard
+ # range and its children shard ranges may be returned in the
+ # list of shard ranges. However, the backend sorts the list by
+ # (upper, state, lower, name) such that the children precede
+ # the parent, and it is the children that we prefer to retain
+ # in the NamespaceBoundList. For example, these namespaces:
+ # (a-b, "child1"), (b-c, "child2"), (a-c, "parent")
+ # would result in a NamespaceBoundList:
+ # (a, "child1"), (b, "child2")
+ # Unexpected overlaps or gaps may result in namespaces being
+ # 'extended' because only lower bounds are stored. For example,
+ # these namespaces:
+ # (a-b, "ns1"), (d-e, "ns2")
+ # would result in a NamespaceBoundList:
+ # (a, "ns1"), (d, "ns2")
+ # When used to find a target namespace for an object update
+ # that lies in a gap, the NamespaceBoundList will map the
+ # object name to the preceding namespace. In the example, an
+ # object named "c" would be mapped to "ns1". (In previous
+ # versions, an object update lying in a gap would have been
+ # mapped to the root container.)
+ continue
+ bounds.append([ns.lower_str, str(ns.name)])
+ upper = ns.upper
+ return cls(bounds)
+
+ def get_namespace(self, item):
+ """
+ Get a Namespace instance that contains ``item``.
+
+ :param item: The item for a which a Namespace is to be found.
+ :return: the Namespace that contains ``item``.
+ """
+ pos = bisect.bisect(self.bounds, [item]) - 1
+ lower, name = self.bounds[pos]
+ upper = ('' if pos + 1 == len(self.bounds)
+ else self.bounds[pos + 1][0])
+ return Namespace(name, lower, upper)
+
+
class ShardName(object):
"""
Encapsulates the components of a shard name.
@@ -5238,6 +4782,7 @@ class ShardName(object):
root container's own shard range will have a name format of
<account>/<root_container> which will raise ValueError if passed to parse.
"""
+
def __init__(self, account, root_container,
parent_container_hash,
timestamp,
@@ -5329,7 +4874,7 @@ class ShardName(object):
raise ValueError('invalid name: %s' % name)
-class ShardRange(object):
+class ShardRange(Namespace):
"""
A ShardRange encapsulates sharding state related to a container including
lower and upper bounds that define the object namespace for which the
@@ -5398,41 +4943,25 @@ class ShardRange(object):
SHARDING_STATES = (SHARDING, SHARDED)
CLEAVING_STATES = SHRINKING_STATES + SHARDING_STATES
- @functools.total_ordering
- class MaxBound(ShardRangeOuterBound):
- # singleton for maximum bound
- def __ge__(self, other):
- return True
-
- @functools.total_ordering
- class MinBound(ShardRangeOuterBound):
- # singleton for minimum bound
- def __le__(self, other):
- return True
-
- MIN = MinBound()
- MAX = MaxBound()
__slots__ = (
'account', 'container',
'_timestamp', '_meta_timestamp', '_state_timestamp', '_epoch',
- '_lower', '_upper', '_deleted', '_state', '_count', '_bytes',
+ '_deleted', '_state', '_count', '_bytes',
'_tombstones', '_reported')
- def __init__(self, name, timestamp, lower=MIN, upper=MAX,
+ def __init__(self, name, timestamp,
+ lower=Namespace.MIN, upper=Namespace.MAX,
object_count=0, bytes_used=0, meta_timestamp=None,
deleted=False, state=None, state_timestamp=None, epoch=None,
reported=False, tombstones=-1):
+ super(ShardRange, self).__init__(name=name, lower=lower, upper=upper)
self.account = self.container = self._timestamp = \
self._meta_timestamp = self._state_timestamp = self._epoch = None
- self._lower = ShardRange.MIN
- self._upper = ShardRange.MAX
self._deleted = False
self._state = None
self.name = name
self.timestamp = timestamp
- self.lower = lower
- self.upper = upper
self.deleted = deleted
self.object_count = object_count
self.bytes_used = bytes_used
@@ -5450,24 +4979,6 @@ class ShardRange(object):
# a key assumption for bisect, which is used by utils.find_shard_range
return sr.upper, sr.state, sr.lower, sr.name
- @classmethod
- def _encode(cls, value):
- if six.PY2 and isinstance(value, six.text_type):
- return value.encode('utf-8')
- if six.PY3 and isinstance(value, six.binary_type):
- # This should never fail -- the value should always be coming from
- # valid swift paths, which means UTF-8
- return value.decode('utf-8')
- return value
-
- def _encode_bound(self, bound):
- if isinstance(bound, ShardRangeOuterBound):
- return bound
- if not (isinstance(bound, six.text_type) or
- isinstance(bound, six.binary_type)):
- raise TypeError('must be a string type')
- return self._encode(bound)
-
def is_child_of(self, parent):
"""
Test if this shard range is a child of another shard range. The
@@ -5639,56 +5150,6 @@ class ShardRange(object):
self._meta_timestamp = self._to_timestamp(ts)
@property
- def lower(self):
- return self._lower
-
- @property
- def lower_str(self):
- return str(self.lower)
-
- @lower.setter
- def lower(self, value):
- if value is None or (value == b"" if isinstance(value, bytes) else
- value == u""):
- value = ShardRange.MIN
- try:
- value = self._encode_bound(value)
- except TypeError as err:
- raise TypeError('lower %s' % err)
- if value > self._upper:
- raise ValueError(
- 'lower (%r) must be less than or equal to upper (%r)' %
- (value, self.upper))
- self._lower = value
-
- @property
- def end_marker(self):
- return self.upper_str + '\x00' if self.upper else ''
-
- @property
- def upper(self):
- return self._upper
-
- @property
- def upper_str(self):
- return str(self.upper)
-
- @upper.setter
- def upper(self, value):
- if value is None or (value == b"" if isinstance(value, bytes) else
- value == u""):
- value = ShardRange.MAX
- try:
- value = self._encode_bound(value)
- except TypeError as err:
- raise TypeError('upper %s' % err)
- if value < self._lower:
- raise ValueError(
- 'upper (%r) must be greater than or equal to lower (%r)' %
- (value, self.lower))
- self._upper = value
-
- @property
def object_count(self):
return self._count
@@ -5895,56 +5356,12 @@ class ShardRange(object):
self.timestamp = timestamp or Timestamp.now()
return True
- def __contains__(self, item):
- # test if the given item is within the namespace
- if item == '':
- return False
- item = self._encode_bound(item)
- return self.lower < item <= self.upper
-
- def __lt__(self, other):
- # a ShardRange is less than other if its entire namespace is less than
- # other; if other is another ShardRange that implies that this
- # ShardRange's upper must be less than or equal to the other
- # ShardRange's lower
- if self.upper == ShardRange.MAX:
- return False
- if isinstance(other, ShardRange):
- return self.upper <= other.lower
- elif other is None:
- return True
- else:
- return self.upper < self._encode(other)
-
- def __gt__(self, other):
- # a ShardRange is greater than other if its entire namespace is greater
- # than other; if other is another ShardRange that implies that this
- # ShardRange's lower must be less greater than or equal to the other
- # ShardRange's upper
- if self.lower == ShardRange.MIN:
- return False
- if isinstance(other, ShardRange):
- return self.lower >= other.upper
- elif other is None:
- return False
- else:
- return self.lower >= self._encode(other)
-
- def __eq__(self, other):
- # test for equality of range bounds only
- if not isinstance(other, ShardRange):
- return False
- return self.lower == other.lower and self.upper == other.upper
-
# A by-the-book implementation should probably hash the value, which
# in our case would be account+container+lower+upper (+timestamp ?).
# But we seem to be okay with just the identity.
def __hash__(self):
return id(self)
- def __ne__(self, other):
- return not (self == other)
-
def __repr__(self):
return '%s<%r to %r as of %s, (%d, %d) as of %s, %s as of %s>' % (
self.__class__.__name__, self.lower, self.upper,
@@ -5952,34 +5369,6 @@ class ShardRange(object):
self.meta_timestamp.internal, self.state_text,
self.state_timestamp.internal)
- def entire_namespace(self):
- """
- Returns True if the ShardRange includes the entire namespace, False
- otherwise.
- """
- return (self.lower == ShardRange.MIN and
- self.upper == ShardRange.MAX)
-
- def overlaps(self, other):
- """
- Returns True if the ShardRange namespace overlaps with the other
- ShardRange's namespace.
-
- :param other: an instance of :class:`~swift.common.utils.ShardRange`
- """
- if not isinstance(other, ShardRange):
- return False
- return max(self.lower, other.lower) < min(self.upper, other.upper)
-
- def includes(self, other):
- """
- Returns True if this namespace includes the whole of the other
- namespace, False otherwise.
-
- :param other: an instance of :class:`~swift.common.utils.ShardRange`
- """
- return (self.lower <= other.lower) and (other.upper <= self.upper)
-
def __iter__(self):
yield 'name', self.name
yield 'timestamp', self.timestamp.internal
@@ -6028,26 +5417,6 @@ class ShardRange(object):
params['state_timestamp'], params['epoch'],
params.get('reported', 0), params.get('tombstones', -1))
- def expand(self, donors):
- """
- Expands the bounds as necessary to match the minimum and maximum bounds
- of the given donors.
-
- :param donors: A list of :class:`~swift.common.utils.ShardRange`
- :return: True if the bounds have been modified, False otherwise.
- """
- modified = False
- new_lower = self.lower
- new_upper = self.upper
- for donor in donors:
- new_lower = min(new_lower, donor.lower)
- new_upper = max(new_upper, donor.upper)
- if self.lower > new_lower or self.upper < new_upper:
- self.lower = new_lower
- self.upper = new_upper
- modified = True
- return modified
-
class ShardRangeList(UserList):
"""
@@ -6057,6 +5426,7 @@ class ShardRangeList(UserList):
This class does not enforce ordering or continuity of the list items:
callers should ensure that items are added in order as appropriate.
"""
+
def __getitem__(self, index):
# workaround for py3 - not needed for py2.7,py3.8
result = self.data[index]
@@ -6069,27 +5439,27 @@ class ShardRangeList(UserList):
only be equal to the lowest bound of all items in the list if the list
contents has been sorted.
- :return: lower bound of first item in the list, or ShardRange.MIN
+ :return: lower bound of first item in the list, or Namespace.MIN
if the list is empty.
"""
if not self:
# empty list has range MIN->MIN
- return ShardRange.MIN
+ return Namespace.MIN
return self[0].lower
@property
def upper(self):
"""
- Returns the upper bound of the first item in the list. Note: this will
+ Returns the upper bound of the last item in the list. Note: this will
only be equal to the uppermost bound of all items in the list if the
list has previously been sorted.
- :return: upper bound of first item in the list, or ShardRange.MIN
+ :return: upper bound of last item in the list, or Namespace.MIN
if the list is empty.
"""
if not self:
# empty list has range MIN->MIN
- return ShardRange.MIN
+ return Namespace.MIN
return self[-1].upper
@property
@@ -6231,79 +5601,13 @@ def filter_shard_ranges(shard_ranges, includes, marker, end_marker):
if marker or end_marker:
return list(filter(shard_range_filter, shard_ranges))
- if marker == ShardRange.MAX or end_marker == ShardRange.MIN:
+ if marker == Namespace.MAX or end_marker == Namespace.MIN:
# MIN and MAX are both Falsy so not handled by shard_range_filter
return []
return shard_ranges
-def modify_priority(conf, logger):
- """
- Modify priority by nice and ionice.
- """
-
- global _libc_setpriority
- if _libc_setpriority is None:
- _libc_setpriority = load_libc_function('setpriority',
- errcheck=True)
-
- def _setpriority(nice_priority):
- """
- setpriority for this pid
-
- :param nice_priority: valid values are -19 to 20
- """
- try:
- _libc_setpriority(PRIO_PROCESS, os.getpid(),
- int(nice_priority))
- except (ValueError, OSError):
- print(_("WARNING: Unable to modify scheduling priority of process."
- " Keeping unchanged! Check logs for more info. "))
- logger.exception('Unable to modify nice priority')
- else:
- logger.debug('set nice priority to %s' % nice_priority)
-
- nice_priority = conf.get('nice_priority')
- if nice_priority is not None:
- _setpriority(nice_priority)
-
- global _posix_syscall
- if _posix_syscall is None:
- _posix_syscall = load_libc_function('syscall', errcheck=True)
-
- def _ioprio_set(io_class, io_priority):
- """
- ioprio_set for this process
-
- :param io_class: the I/O class component, can be
- IOPRIO_CLASS_RT, IOPRIO_CLASS_BE,
- or IOPRIO_CLASS_IDLE
- :param io_priority: priority value in the I/O class
- """
- try:
- io_class = IO_CLASS_ENUM[io_class]
- io_priority = int(io_priority)
- _posix_syscall(NR_ioprio_set(),
- IOPRIO_WHO_PROCESS,
- os.getpid(),
- IOPRIO_PRIO_VALUE(io_class, io_priority))
- except (KeyError, ValueError, OSError):
- print(_("WARNING: Unable to modify I/O scheduling class "
- "and priority of process. Keeping unchanged! "
- "Check logs for more info."))
- logger.exception("Unable to modify ionice priority")
- else:
- logger.debug('set ionice class %s priority %s',
- io_class, io_priority)
-
- io_class = conf.get("ionice_class")
- if io_class is None:
- return
- io_priority = conf.get("ionice_priority", 0)
- _ioprio_set(io_class, io_priority)
-
-
def o_tmpfile_in_path_supported(dirpath):
fd = None
try:
@@ -6590,6 +5894,7 @@ class NoopMutex(object):
of which have the message-interleaving trouble you'd expect from TCP or
file handlers.
"""
+
def __init__(self):
# Usually, it's an error to have multiple greenthreads all waiting
# to write to the same file descriptor. It's often a sign of inadequate
@@ -6857,6 +6162,7 @@ class Watchdog(object):
=> the exception is raised, then the greenlet watchdog sleep(3) to
wake up for the 1st timeout expiration
"""
+
def __init__(self):
# key => (timeout, timeout_at, caller_greenthread, exception)
self._timeouts = dict()
@@ -6946,6 +6252,7 @@ class WatchdogTimeout(object):
"""
Context manager to schedule a timeout in a Watchdog instance
"""
+
def __init__(self, watchdog, timeout, exc, timeout_at=None):
"""
Schedule a timeout in a Watchdog instance
diff --git a/swift/common/utils/libc.py b/swift/common/utils/libc.py
new file mode 100644
index 000000000..df2179020
--- /dev/null
+++ b/swift/common/utils/libc.py
@@ -0,0 +1,487 @@
+# Copyright (c) 2010-2023 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions Swift uses to interact with libc and other low-level APIs."""
+
+import ctypes
+import ctypes.util
+import errno
+import fcntl
+import logging
+import os
+import platform
+import socket
+
+
+# These are lazily pulled from libc elsewhere
+_sys_fallocate = None
+_posix_fadvise = None
+_libc_socket = None
+_libc_bind = None
+_libc_accept = None
+# see man -s 2 setpriority
+_libc_setpriority = None
+# see man -s 2 syscall
+_posix_syscall = None
+
+# If set to non-zero, fallocate routines will fail based on free space
+# available being at or below this amount, in bytes.
+FALLOCATE_RESERVE = 0
+# Indicates if FALLOCATE_RESERVE is the percentage of free space (True) or
+# the number of bytes (False).
+FALLOCATE_IS_PERCENT = False
+
+# from /usr/include/linux/falloc.h
+FALLOC_FL_KEEP_SIZE = 1
+FALLOC_FL_PUNCH_HOLE = 2
+
+# from /usr/src/linux-headers-*/include/uapi/linux/resource.h
+PRIO_PROCESS = 0
+
+
+# /usr/include/x86_64-linux-gnu/asm/unistd_64.h defines syscalls there
+# are many like it, but this one is mine, see man -s 2 ioprio_set
+def NR_ioprio_set():
+ """Give __NR_ioprio_set value for your system."""
+ architecture = os.uname()[4]
+ arch_bits = platform.architecture()[0]
+ # check if supported system, now support x86_64 and AArch64
+ if architecture == 'x86_64' and arch_bits == '64bit':
+ return 251
+ elif architecture == 'aarch64' and arch_bits == '64bit':
+ return 30
+ raise OSError("Swift doesn't support ionice priority for %s %s" %
+ (architecture, arch_bits))
+
+
+# this syscall integer probably only works on x86_64 linux systems, you
+# can check if it's correct on yours with something like this:
+"""
+#include <stdio.h>
+#include <sys/syscall.h>
+
+int main(int argc, const char* argv[]) {
+ printf("%d\n", __NR_ioprio_set);
+ return 0;
+}
+"""
+
+# this is the value for "which" that says our who value will be a pid
+# pulled out of /usr/src/linux-headers-*/include/linux/ioprio.h
+IOPRIO_WHO_PROCESS = 1
+
+
+IO_CLASS_ENUM = {
+ 'IOPRIO_CLASS_RT': 1,
+ 'IOPRIO_CLASS_BE': 2,
+ 'IOPRIO_CLASS_IDLE': 3,
+}
+
+# the IOPRIO_PRIO_VALUE "macro" is also pulled from
+# /usr/src/linux-headers-*/include/linux/ioprio.h
+IOPRIO_CLASS_SHIFT = 13
+
+
+def IOPRIO_PRIO_VALUE(class_, data):
+ return (((class_) << IOPRIO_CLASS_SHIFT) | data)
+
+
+# These constants are Linux-specific, and Python doesn't seem to know
+# about them. We ask anyway just in case that ever gets fixed.
+#
+# The values were copied from the Linux 3.x kernel headers.
+AF_ALG = getattr(socket, 'AF_ALG', 38)
+F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
+
+
+def noop_libc_function(*args):
+ return 0
+
+
+def load_libc_function(func_name, log_error=True,
+ fail_if_missing=False, errcheck=False):
+ """
+ Attempt to find the function in libc, otherwise return a no-op func.
+
+ :param func_name: name of the function to pull from libc.
+ :param log_error: log an error when a function can't be found
+ :param fail_if_missing: raise an exception when a function can't be found.
+ Default behavior is to return a no-op function.
+ :param errcheck: boolean, if true install a wrapper on the function
+ to check for a return values of -1 and call
+ ctype.get_errno and raise an OSError
+ """
+ try:
+ libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
+ func = getattr(libc, func_name)
+ except AttributeError:
+ if fail_if_missing:
+ raise
+ if log_error:
+ logging.warning("Unable to locate %s in libc. Leaving as a "
+ "no-op.", func_name)
+ return noop_libc_function
+ if errcheck:
+ def _errcheck(result, f, args):
+ if result == -1:
+ errcode = ctypes.get_errno()
+ raise OSError(errcode, os.strerror(errcode))
+ return result
+ func.errcheck = _errcheck
+ return func
+
+
+class _LibcWrapper(object):
+ """
+ A callable object that forwards its calls to a C function from libc.
+
+ These objects are lazy. libc will not be checked until someone tries to
+ either call the function or check its availability.
+
+ _LibcWrapper objects have an "available" property; if true, then libc
+ has the function of that name. If false, then calls will fail with a
+ NotImplementedError.
+ """
+
+ def __init__(self, func_name):
+ self._func_name = func_name
+ self._func_handle = None
+ self._loaded = False
+
+ def _ensure_loaded(self):
+ if not self._loaded:
+ func_name = self._func_name
+ try:
+ # Keep everything in this try-block in local variables so
+ # that a typo in self.some_attribute_name doesn't raise a
+ # spurious AttributeError.
+ func_handle = load_libc_function(
+ func_name, fail_if_missing=True)
+ self._func_handle = func_handle
+ except AttributeError:
+ # We pass fail_if_missing=True to load_libc_function and
+ # then ignore the error. It's weird, but otherwise we have
+ # to check if self._func_handle is noop_libc_function, and
+ # that's even weirder.
+ pass
+ self._loaded = True
+
+ @property
+ def available(self):
+ self._ensure_loaded()
+ return bool(self._func_handle)
+
+ def __call__(self, *args):
+ if self.available:
+ return self._func_handle(*args)
+ else:
+ raise NotImplementedError(
+ "No function %r found in libc" % self._func_name)
+
+
+def config_fallocate_value(reserve_value):
+ """
+ Returns fallocate reserve_value as an int or float.
+ Returns is_percent as a boolean.
+ Returns a ValueError on invalid fallocate value.
+ """
+ try:
+ if str(reserve_value[-1:]) == '%':
+ reserve_value = float(reserve_value[:-1])
+ is_percent = True
+ else:
+ reserve_value = int(reserve_value)
+ is_percent = False
+ except ValueError:
+ raise ValueError('Error: %s is an invalid value for fallocate'
+ '_reserve.' % reserve_value)
+ return reserve_value, is_percent
+
+
+_fallocate_enabled = True
+_fallocate_warned_about_missing = False
+_sys_fallocate = _LibcWrapper('fallocate')
+_sys_posix_fallocate = _LibcWrapper('posix_fallocate')
+
+
+def disable_fallocate():
+ global _fallocate_enabled
+ _fallocate_enabled = False
+
+
+def fallocate(fd, size, offset=0):
+ """
+ Pre-allocate disk space for a file.
+
+ This function can be disabled by calling disable_fallocate(). If no
+ suitable C function is available in libc, this function is a no-op.
+
+ :param fd: file descriptor
+ :param size: size to allocate (in bytes)
+ """
+ global _fallocate_enabled
+ if not _fallocate_enabled:
+ return
+
+ if size < 0:
+ size = 0 # Done historically; not really sure why
+ if size >= (1 << 63):
+ raise ValueError('size must be less than 2 ** 63')
+ if offset < 0:
+ raise ValueError('offset must be non-negative')
+ if offset >= (1 << 63):
+ raise ValueError('offset must be less than 2 ** 63')
+
+ # Make sure there's some (configurable) amount of free space in
+ # addition to the number of bytes we're allocating.
+ if FALLOCATE_RESERVE:
+ st = os.fstatvfs(fd)
+ free = st.f_frsize * st.f_bavail - size
+ if FALLOCATE_IS_PERCENT:
+ free = (float(free) / float(st.f_frsize * st.f_blocks)) * 100
+ if float(free) <= float(FALLOCATE_RESERVE):
+ raise OSError(
+ errno.ENOSPC,
+ 'FALLOCATE_RESERVE fail %g <= %g' %
+ (free, FALLOCATE_RESERVE))
+
+ if _sys_fallocate.available:
+ # Parameters are (fd, mode, offset, length).
+ #
+ # mode=FALLOC_FL_KEEP_SIZE pre-allocates invisibly (without
+ # affecting the reported file size).
+ ret = _sys_fallocate(
+ fd, FALLOC_FL_KEEP_SIZE, ctypes.c_uint64(offset),
+ ctypes.c_uint64(size))
+ err = ctypes.get_errno()
+ elif _sys_posix_fallocate.available:
+ # Parameters are (fd, offset, length).
+ ret = _sys_posix_fallocate(fd, ctypes.c_uint64(offset),
+ ctypes.c_uint64(size))
+ err = ctypes.get_errno()
+ else:
+ # No suitable fallocate-like function is in our libc. Warn about it,
+ # but just once per process, and then do nothing.
+ global _fallocate_warned_about_missing
+ if not _fallocate_warned_about_missing:
+ logging.warning("Unable to locate fallocate, posix_fallocate in "
+ "libc. Leaving as a no-op.")
+ _fallocate_warned_about_missing = True
+ return
+
+ if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP,
+ errno.EINVAL):
+ raise OSError(err, 'Unable to fallocate(%s)' % size)
+
+
+def punch_hole(fd, offset, length):
+ """
+ De-allocate disk space in the middle of a file.
+
+ :param fd: file descriptor
+ :param offset: index of first byte to de-allocate
+ :param length: number of bytes to de-allocate
+ """
+ if offset < 0:
+ raise ValueError('offset must be non-negative')
+ if offset >= (1 << 63):
+ raise ValueError('offset must be less than 2 ** 63')
+ if length <= 0:
+ raise ValueError('length must be positive')
+ if length >= (1 << 63):
+ raise ValueError('length must be less than 2 ** 63')
+
+ if _sys_fallocate.available:
+ # Parameters are (fd, mode, offset, length).
+ ret = _sys_fallocate(
+ fd,
+ FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+ ctypes.c_uint64(offset),
+ ctypes.c_uint64(length))
+ err = ctypes.get_errno()
+ if ret and err:
+ mode_str = "FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE"
+ raise OSError(err, "Unable to fallocate(%d, %s, %d, %d)" % (
+ fd, mode_str, offset, length))
+ else:
+ raise OSError(errno.ENOTSUP,
+ 'No suitable C function found for hole punching')
+
+
+def drop_buffer_cache(fd, offset, length):
+ """
+ Drop 'buffer' cache for the given range of the given file.
+
+ :param fd: file descriptor
+ :param offset: start offset
+ :param length: length
+ """
+ global _posix_fadvise
+ if _posix_fadvise is None:
+ _posix_fadvise = load_libc_function('posix_fadvise64')
+ # 4 means "POSIX_FADV_DONTNEED"
+ ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
+ ctypes.c_uint64(length), 4)
+ if ret != 0:
+ logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
+ "-> %(ret)s", {'fd': fd, 'offset': offset,
+ 'length': length, 'ret': ret})
+
+
+class sockaddr_alg(ctypes.Structure):
+ _fields_ = [("salg_family", ctypes.c_ushort),
+ ("salg_type", ctypes.c_ubyte * 14),
+ ("salg_feat", ctypes.c_uint),
+ ("salg_mask", ctypes.c_uint),
+ ("salg_name", ctypes.c_ubyte * 64)]
+
+
+_bound_md5_sockfd = None
+
+
+def get_md5_socket():
+ """
+ Get an MD5 socket file descriptor. One can MD5 data with it by writing it
+ to the socket with os.write, then os.read the 16 bytes of the checksum out
+ later.
+
+ NOTE: It is the caller's responsibility to ensure that os.close() is
+ called on the returned file descriptor. This is a bare file descriptor,
+ not a Python object. It doesn't close itself.
+ """
+
+ # Linux's AF_ALG sockets work like this:
+ #
+ # First, initialize a socket with socket() and bind(). This tells the
+ # socket what algorithm to use, as well as setting up any necessary bits
+ # like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
+ # algorithm name.
+ #
+ # Second, to hash some data, get a second socket by calling accept() on
+ # the first socket. Write data to the socket, then when finished, read the
+ # checksum from the socket and close it. This lets you checksum multiple
+ # things without repeating all the setup code each time.
+ #
+ # Since we only need to bind() one socket, we do that here and save it for
+ # future re-use. That way, we only use one file descriptor to get an MD5
+ # socket instead of two, and we also get to save some syscalls.
+
+ global _bound_md5_sockfd
+ global _libc_socket
+ global _libc_bind
+ global _libc_accept
+
+ if _libc_accept is None:
+ _libc_accept = load_libc_function('accept', fail_if_missing=True)
+ if _libc_socket is None:
+ _libc_socket = load_libc_function('socket', fail_if_missing=True)
+ if _libc_bind is None:
+ _libc_bind = load_libc_function('bind', fail_if_missing=True)
+
+ # Do this at first call rather than at import time so that we don't use a
+ # file descriptor on systems that aren't using any MD5 sockets.
+ if _bound_md5_sockfd is None:
+ sockaddr_setup = sockaddr_alg(
+ AF_ALG,
+ (ord('h'), ord('a'), ord('s'), ord('h'), 0),
+ 0, 0,
+ (ord('m'), ord('d'), ord('5'), 0))
+ hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
+ ctypes.c_int(socket.SOCK_SEQPACKET),
+ ctypes.c_int(0))
+ if hash_sockfd < 0:
+ raise IOError(ctypes.get_errno(),
+ "Failed to initialize MD5 socket")
+
+ bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
+ ctypes.pointer(sockaddr_setup),
+ ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
+ if bind_result < 0:
+ os.close(hash_sockfd)
+ raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
+
+ _bound_md5_sockfd = hash_sockfd
+
+ md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
+ if md5_sockfd < 0:
+ raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
+
+ return md5_sockfd
+
+
+def modify_priority(conf, logger):
+ """
+ Modify priority by nice and ionice.
+ """
+
+ global _libc_setpriority
+ if _libc_setpriority is None:
+ _libc_setpriority = load_libc_function('setpriority',
+ errcheck=True)
+
+ def _setpriority(nice_priority):
+ """
+ setpriority for this pid
+
+ :param nice_priority: valid values are -19 to 20
+ """
+ try:
+ _libc_setpriority(PRIO_PROCESS, os.getpid(),
+ int(nice_priority))
+ except (ValueError, OSError):
+ print("WARNING: Unable to modify scheduling priority of process."
+ " Keeping unchanged! Check logs for more info. ")
+ logger.exception('Unable to modify nice priority')
+ else:
+ logger.debug('set nice priority to %s' % nice_priority)
+
+ nice_priority = conf.get('nice_priority')
+ if nice_priority is not None:
+ _setpriority(nice_priority)
+
+ global _posix_syscall
+ if _posix_syscall is None:
+ _posix_syscall = load_libc_function('syscall', errcheck=True)
+
+ def _ioprio_set(io_class, io_priority):
+ """
+ ioprio_set for this process
+
+ :param io_class: the I/O class component, can be
+ IOPRIO_CLASS_RT, IOPRIO_CLASS_BE,
+ or IOPRIO_CLASS_IDLE
+ :param io_priority: priority value in the I/O class
+ """
+ try:
+ io_class = IO_CLASS_ENUM[io_class]
+ io_priority = int(io_priority)
+ _posix_syscall(NR_ioprio_set(),
+ IOPRIO_WHO_PROCESS,
+ os.getpid(),
+ IOPRIO_PRIO_VALUE(io_class, io_priority))
+ except (KeyError, ValueError, OSError):
+ print("WARNING: Unable to modify I/O scheduling class "
+ "and priority of process. Keeping unchanged! "
+ "Check logs for more info.")
+ logger.exception("Unable to modify ionice priority")
+ else:
+ logger.debug('set ionice class %s priority %s',
+ io_class, io_priority)
+
+ io_class = conf.get("ionice_class")
+ if io_class is None:
+ return
+ io_priority = conf.get("ionice_priority", 0)
+ _ioprio_set(io_class, io_priority)
diff --git a/swift/common/utils/timestamp.py b/swift/common/utils/timestamp.py
new file mode 100644
index 000000000..be83fe512
--- /dev/null
+++ b/swift/common/utils/timestamp.py
@@ -0,0 +1,399 @@
+# Copyright (c) 2010-2023 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Timestamp-related functions for use with Swift."""
+
+import datetime
+import functools
+import math
+import time
+
+import six
+
+
+NORMAL_FORMAT = "%016.05f"
+INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
+SHORT_FORMAT = NORMAL_FORMAT + '_%x'
+MAX_OFFSET = (16 ** 16) - 1
+PRECISION = 1e-5
+# Setting this to True will cause the internal format to always display
+# extended digits - even when the value is equivalent to the normalized form.
+# This isn't ideal during an upgrade when some servers might not understand
+# the new time format - but flipping it to True works great for testing.
+FORCE_INTERNAL = False # or True
+
+
+@functools.total_ordering
+class Timestamp(object):
+ """
+ Internal Representation of Swift Time.
+
+ The normalized form of the X-Timestamp header looks like a float
+ with a fixed width to ensure stable string sorting - normalized
+ timestamps look like "1402464677.04188"
+
+ To support overwrites of existing data without modifying the original
+ timestamp but still maintain consistency a second internal offset vector
+ is append to the normalized timestamp form which compares and sorts
+ greater than the fixed width float format but less than a newer timestamp.
+ The internalized format of timestamps looks like
+ "1402464677.04188_0000000000000000" - the portion after the underscore is
+ the offset and is a formatted hexadecimal integer.
+
+ The internalized form is not exposed to clients in responses from
+ Swift. Normal client operations will not create a timestamp with an
+ offset.
+
+ The Timestamp class in common.utils supports internalized and
+ normalized formatting of timestamps and also comparison of timestamp
+ values. When the offset value of a Timestamp is 0 - it's considered
+ insignificant and need not be represented in the string format; to
+ support backwards compatibility during a Swift upgrade the
+ internalized and normalized form of a Timestamp with an
+ insignificant offset are identical. When a timestamp includes an
+ offset it will always be represented in the internalized form, but
+ is still excluded from the normalized form. Timestamps with an
+ equivalent timestamp portion (the float part) will compare and order
+ by their offset. Timestamps with a greater timestamp portion will
+ always compare and order greater than a Timestamp with a lesser
+ timestamp regardless of it's offset. String comparison and ordering
+ is guaranteed for the internalized string format, and is backwards
+ compatible for normalized timestamps which do not include an offset.
+ """
+
+ def __init__(self, timestamp, offset=0, delta=0, check_bounds=True):
+ """
+ Create a new Timestamp.
+
+ :param timestamp: time in seconds since the Epoch, may be any of:
+
+ * a float or integer
+ * normalized/internalized string
+ * another instance of this class (offset is preserved)
+
+ :param offset: the second internal offset vector, an int
+ :param delta: deca-microsecond difference from the base timestamp
+ param, an int
+ """
+ if isinstance(timestamp, bytes):
+ timestamp = timestamp.decode('ascii')
+ if isinstance(timestamp, six.string_types):
+ base, base_offset = timestamp.partition('_')[::2]
+ self.timestamp = float(base)
+ if '_' in base_offset:
+ raise ValueError('invalid literal for int() with base 16: '
+ '%r' % base_offset)
+ if base_offset:
+ self.offset = int(base_offset, 16)
+ else:
+ self.offset = 0
+ else:
+ self.timestamp = float(timestamp)
+ self.offset = getattr(timestamp, 'offset', 0)
+ # increment offset
+ if offset >= 0:
+ self.offset += offset
+ else:
+ raise ValueError('offset must be non-negative')
+ if self.offset > MAX_OFFSET:
+ raise ValueError('offset must be smaller than %d' % MAX_OFFSET)
+ self.raw = int(round(self.timestamp / PRECISION))
+ # add delta
+ if delta:
+ self.raw = self.raw + delta
+ if self.raw <= 0:
+ raise ValueError(
+ 'delta must be greater than %d' % (-1 * self.raw))
+ self.timestamp = float(self.raw * PRECISION)
+ if check_bounds:
+ if self.timestamp < 0:
+ raise ValueError('timestamp cannot be negative')
+ if self.timestamp >= 10000000000:
+ raise ValueError('timestamp too large')
+
+ @classmethod
+ def now(cls, offset=0, delta=0):
+ return cls(time.time(), offset=offset, delta=delta)
+
+ def __repr__(self):
+ return INTERNAL_FORMAT % (self.timestamp, self.offset)
+
+ def __str__(self):
+ raise TypeError('You must specify which string format is required')
+
+ def __float__(self):
+ return self.timestamp
+
+ def __int__(self):
+ return int(self.timestamp)
+
+ def __nonzero__(self):
+ return bool(self.timestamp or self.offset)
+
+ def __bool__(self):
+ return self.__nonzero__()
+
+ @property
+ def normal(self):
+ return NORMAL_FORMAT % self.timestamp
+
+ @property
+ def internal(self):
+ if self.offset or FORCE_INTERNAL:
+ return INTERNAL_FORMAT % (self.timestamp, self.offset)
+ else:
+ return self.normal
+
+ @property
+ def short(self):
+ if self.offset or FORCE_INTERNAL:
+ return SHORT_FORMAT % (self.timestamp, self.offset)
+ else:
+ return self.normal
+
+ @property
+ def isoformat(self):
+ """
+ Get an isoformat string representation of the 'normal' part of the
+ Timestamp with microsecond precision and no trailing timezone, for
+ example::
+
+ 1970-01-01T00:00:00.000000
+
+ :return: an isoformat string
+ """
+ t = float(self.normal)
+ if six.PY3:
+ # On Python 3, round manually using ROUND_HALF_EVEN rounding
+ # method, to use the same rounding method than Python 2. Python 3
+ # used a different rounding method, but Python 3.4.4 and 3.5.1 use
+ # again ROUND_HALF_EVEN as Python 2.
+ # See https://bugs.python.org/issue23517
+ frac, t = math.modf(t)
+ us = round(frac * 1e6)
+ if us >= 1000000:
+ t += 1
+ us -= 1000000
+ elif us < 0:
+ t -= 1
+ us += 1000000
+ dt = datetime.datetime.utcfromtimestamp(t)
+ dt = dt.replace(microsecond=us)
+ else:
+ dt = datetime.datetime.utcfromtimestamp(t)
+
+ isoformat = dt.isoformat()
+ # python isoformat() doesn't include msecs when zero
+ if len(isoformat) < len("1970-01-01T00:00:00.000000"):
+ isoformat += ".000000"
+ return isoformat
+
+ @classmethod
+ def from_isoformat(cls, date_string):
+ """
+ Parse an isoformat string representation of time to a Timestamp object.
+
+ :param date_string: a string formatted as per an Timestamp.isoformat
+ property.
+ :return: an instance of this class.
+ """
+ start = datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%f")
+ delta = start - EPOCH
+ # This calculation is based on Python 2.7's Modules/datetimemodule.c,
+ # function delta_to_microseconds(), but written in Python.
+ return cls(delta.total_seconds())
+
+ def ceil(self):
+ """
+ Return the 'normal' part of the timestamp rounded up to the nearest
+ integer number of seconds.
+
+ This value should be used whenever the second-precision Last-Modified
+ time of a resource is required.
+
+ :return: a float value with second precision.
+ """
+ return math.ceil(float(self))
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+ if not isinstance(other, Timestamp):
+ try:
+ other = Timestamp(other, check_bounds=False)
+ except ValueError:
+ return False
+ return self.internal == other.internal
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __lt__(self, other):
+ if other is None:
+ return False
+ if not isinstance(other, Timestamp):
+ other = Timestamp(other, check_bounds=False)
+ if other.timestamp < 0:
+ return False
+ if other.timestamp >= 10000000000:
+ return True
+ return self.internal < other.internal
+
+ def __hash__(self):
+ return hash(self.internal)
+
+ def __invert__(self):
+ if self.offset:
+ raise ValueError('Cannot invert timestamps with offsets')
+ return Timestamp((999999999999999 - self.raw) * PRECISION)
+
+
+def encode_timestamps(t1, t2=None, t3=None, explicit=False):
+ """
+ Encode up to three timestamps into a string. Unlike a Timestamp object, the
+ encoded string does NOT used fixed width fields and consequently no
+ relative chronology of the timestamps can be inferred from lexicographic
+ sorting of encoded timestamp strings.
+
+ The format of the encoded string is:
+ <t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
+
+ i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
+ otherwise the time offsets for t2 and t3 are appended. If explicit is True
+ then the offsets for t2 and t3 are always appended even if zero.
+
+ Note: any offset value in t1 will be preserved, but offsets on t2 and t3
+ are not preserved. In the anticipated use cases for this method (and the
+ inverse decode_timestamps method) the timestamps passed as t2 and t3 are
+ not expected to have offsets as they will be timestamps associated with a
+ POST request. In the case where the encoding is used in a container objects
+ table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
+ content type and metadata times (if different from the data file) i.e.
+ correspond to POST timestamps. In the case where the encoded form is used
+ in a .meta file name, t1 and t2 both correspond to POST timestamps.
+ """
+ form = '{0}'
+ values = [t1.short]
+ if t2 is not None:
+ t2_t1_delta = t2.raw - t1.raw
+ explicit = explicit or (t2_t1_delta != 0)
+ values.append(t2_t1_delta)
+ if t3 is not None:
+ t3_t2_delta = t3.raw - t2.raw
+ explicit = explicit or (t3_t2_delta != 0)
+ values.append(t3_t2_delta)
+ if explicit:
+ form += '{1:+x}'
+ if t3 is not None:
+ form += '{2:+x}'
+ return form.format(*values)
+
+
+def decode_timestamps(encoded, explicit=False):
+ """
+ Parses a string of the form generated by encode_timestamps and returns
+ a tuple of the three component timestamps. If explicit is False, component
+ timestamps that are not explicitly encoded will be assumed to have zero
+ delta from the previous component and therefore take the value of the
+ previous component. If explicit is True, component timestamps that are
+ not explicitly encoded will be returned with value None.
+ """
+ # TODO: some tests, e.g. in test_replicator, put float timestamps values
+ # into container db's, hence this defensive check, but in real world
+ # this may never happen.
+ if not isinstance(encoded, six.string_types):
+ ts = Timestamp(encoded)
+ return ts, ts, ts
+
+ parts = []
+ signs = []
+ pos_parts = encoded.split('+')
+ for part in pos_parts:
+ # parse time components and their signs
+ # e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1]
+ neg_parts = part.split('-')
+ parts = parts + neg_parts
+ signs = signs + [1] + [-1] * (len(neg_parts) - 1)
+ t1 = Timestamp(parts[0])
+ t2 = t3 = None
+ if len(parts) > 1:
+ t2 = t1
+ delta = signs[1] * int(parts[1], 16)
+ # if delta = 0 we want t2 = t3 = t1 in order to
+ # preserve any offset in t1 - only construct a distinct
+ # timestamp if there is a non-zero delta.
+ if delta:
+ t2 = Timestamp((t1.raw + delta) * PRECISION)
+ elif not explicit:
+ t2 = t1
+ if len(parts) > 2:
+ t3 = t2
+ delta = signs[2] * int(parts[2], 16)
+ if delta:
+ t3 = Timestamp((t2.raw + delta) * PRECISION)
+ elif not explicit:
+ t3 = t2
+ return t1, t2, t3
+
+
+def normalize_timestamp(timestamp):
+ """
+ Format a timestamp (string or numeric) into a standardized
+ xxxxxxxxxx.xxxxx (10.5) format.
+
+ Note that timestamps using values greater than or equal to November 20th,
+ 2286 at 17:46 UTC will use 11 digits to represent the number of
+ seconds.
+
+ :param timestamp: unix timestamp
+ :returns: normalized timestamp as a string
+ """
+ return Timestamp(timestamp).normal
+
+
+EPOCH = datetime.datetime(1970, 1, 1)
+
+
+def last_modified_date_to_timestamp(last_modified_date_str):
+ """
+ Convert a last modified date (like you'd get from a container listing,
+ e.g. 2014-02-28T23:22:36.698390) to a float.
+ """
+ return Timestamp.from_isoformat(last_modified_date_str)
+
+
+def normalize_delete_at_timestamp(timestamp, high_precision=False):
+ """
+ Format a timestamp (string or numeric) into a standardized
+ xxxxxxxxxx (10) or xxxxxxxxxx.xxxxx (10.5) format.
+
+ Note that timestamps less than 0000000000 are raised to
+ 0000000000 and values greater than November 20th, 2286 at
+ 17:46:39 UTC will be capped at that date and time, resulting in
+ no return value exceeding 9999999999.99999 (or 9999999999 if
+ using low-precision).
+
+ This cap is because the expirer is already working through a
+ sorted list of strings that were all a length of 10. Adding
+ another digit would mess up the sort and cause the expirer to
+ break from processing early. By 2286, this problem will need to
+ be fixed, probably by creating an additional .expiring_objects
+ account to work from with 11 (or more) digit container names.
+
+ :param timestamp: unix timestamp
+ :returns: normalized timestamp as a string
+ """
+ fmt = '%016.5f' if high_precision else '%010d'
+ return fmt % min(max(0, float(timestamp)), 9999999999.99999)
diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py
index 4fa4946dd..7c39a89e2 100644
--- a/swift/common/wsgi.py
+++ b/swift/common/wsgi.py
@@ -361,10 +361,14 @@ def loadapp(conf_file, global_conf=None, allow_modify_pipeline=True):
if func and allow_modify_pipeline:
func(PipelineWrapper(ctx))
filters = [c.create() for c in reversed(ctx.filter_contexts)]
+ pipeline = [ultimate_app]
+ ultimate_app._pipeline = pipeline
+ ultimate_app._pipeline_final_app = ultimate_app
app = ultimate_app
- app._pipeline_final_app = ultimate_app
for filter_app in filters:
- app = filter_app(app)
+ app = filter_app(pipeline[0])
+ pipeline.insert(0, app)
+ app._pipeline = pipeline
app._pipeline_final_app = ultimate_app
return app
return ctx.create()
diff --git a/swift/container/server.py b/swift/container/server.py
index 8afc04750..080716d68 100644
--- a/swift/container/server.py
+++ b/swift/container/server.py
@@ -860,7 +860,14 @@ class ContainerController(BaseStorageServer):
@public
@timing_stats()
def POST(self, req):
- """Handle HTTP POST request."""
+ """
+ Handle HTTP POST request.
+
+ A POST request will update the container's ``put_timestamp``, unless
+ it has an ``X-Backend-No-Timestamp-Update`` header with a truthy value.
+
+ :param req: an instance of :class:`~swift.common.swob.Request`.
+ """
drive, part, account, container = get_container_name_and_placement(req)
req_timestamp = valid_timestamp(req)
if 'x-container-sync-to' in req.headers:
@@ -878,7 +885,9 @@ class ContainerController(BaseStorageServer):
broker = self._get_container_broker(drive, part, account, container)
if broker.is_deleted():
return HTTPNotFound(request=req)
- broker.update_put_timestamp(req_timestamp.internal)
+ if not config_true_value(
+ req.headers.get('x-backend-no-timestamp-update', False)):
+ broker.update_put_timestamp(req_timestamp.internal)
self._update_metadata(req, broker, req_timestamp, 'POST')
return HTTPNoContent(request=req)
diff --git a/swift/container/sharder.py b/swift/container/sharder.py
index 7afa3d840..ee97880cd 100644
--- a/swift/container/sharder.py
+++ b/swift/container/sharder.py
@@ -15,6 +15,7 @@
import collections
import errno
import json
+import logging
import operator
import time
from collections import defaultdict
@@ -37,7 +38,8 @@ from swift.common.swob import str_to_wsgi
from swift.common.utils import get_logger, config_true_value, \
dump_recon_cache, whataremyips, Timestamp, ShardRange, GreenAsyncPile, \
config_positive_int_value, quorum_size, parse_override_options, \
- Everything, config_auto_int_value, ShardRangeList, config_percent_value
+ Everything, config_auto_int_value, ShardRangeList, config_percent_value, \
+ node_to_string
from swift.container.backend import ContainerBroker, \
RECORD_TYPE_SHARD, UNSHARDED, SHARDING, SHARDED, COLLAPSED, \
SHARD_UPDATE_STATES, sift_shard_ranges, SHARD_UPDATE_STAT_STATES
@@ -894,7 +896,6 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
internal_client_conf_path,
'Swift Container Sharder',
request_tries,
- allow_modify_pipeline=False,
use_replication_network=True,
global_conf={'log_name': '%s-ic' % conf.get(
'log_name', self.log_route)})
@@ -908,6 +909,44 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self.stats_interval = float(conf.get('stats_interval', '3600'))
self.reported = 0
+ def _format_log_msg(self, broker, msg, *args):
+ # make best effort to include broker properties...
+ try:
+ db_file = broker.db_file
+ except Exception: # noqa
+ db_file = ''
+ try:
+ path = broker.path
+ except Exception: # noqa
+ path = ''
+
+ if args:
+ msg = msg % args
+ return '%s, path: %s, db: %s' % (msg, quote(path), db_file)
+
+ def _log(self, level, broker, msg, *args):
+ if not self.logger.isEnabledFor(level):
+ return
+
+ self.logger.log(level, self._format_log_msg(broker, msg, *args))
+
+ def debug(self, broker, msg, *args, **kwargs):
+ self._log(logging.DEBUG, broker, msg, *args, **kwargs)
+
+ def info(self, broker, msg, *args, **kwargs):
+ self._log(logging.INFO, broker, msg, *args, **kwargs)
+
+ def warning(self, broker, msg, *args, **kwargs):
+ self._log(logging.WARNING, broker, msg, *args, **kwargs)
+
+ def error(self, broker, msg, *args, **kwargs):
+ self._log(logging.ERROR, broker, msg, *args, **kwargs)
+
+ def exception(self, broker, msg, *args, **kwargs):
+ if not self.logger.isEnabledFor(logging.ERROR):
+ return
+ self.logger.exception(self._format_log_msg(broker, msg, *args))
+
def _zero_stats(self):
"""Zero out the stats."""
super(ContainerSharder, self)._zero_stats()
@@ -1040,13 +1079,12 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# container DB, which predicates sharding starting. But s-m-s-r and
# auto-sharding do set epoch and then merge, so we use it to tell
# whether sharding has been taking too long or not.
- self.logger.warning(
- 'Cleaving has not completed in %.2f seconds since %s.'
- ' Container DB file and path: %s (%s), DB state: %s,'
- ' own_shard_range state: %s, state count of shard ranges: %s' %
+ self.warning(
+ broker, 'Cleaving has not completed in %.2f seconds since %s. '
+ 'DB state: %s, own_shard_range state: %s, state count of '
+ 'shard ranges: %s' %
(time.time() - float(own_shard_range.epoch),
- own_shard_range.epoch.isoformat, broker.db_file,
- quote(broker.path), db_state,
+ own_shard_range.epoch.isoformat, db_state,
own_shard_range.state_text, str(state_count)))
def _report_stats(self):
@@ -1127,14 +1165,14 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
'GET', path, headers, acceptable_statuses=(2,),
params=params)
except internal_client.UnexpectedResponse as err:
- self.logger.warning("Failed to get shard ranges from %s: %s",
- quote(broker.root_path), err)
+ self.warning(broker, "Failed to get shard ranges from %s: %s",
+ quote(broker.root_path), err)
return None
record_type = resp.headers.get('x-backend-record-type')
if record_type != 'shard':
err = 'unexpected record type %r' % record_type
- self.logger.error("Failed to get shard ranges from %s: %s",
- quote(broker.root_path), err)
+ self.error(broker, "Failed to get shard ranges from %s: %s",
+ quote(broker.root_path), err)
return None
try:
@@ -1144,32 +1182,33 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
return [ShardRange.from_dict(shard_range)
for shard_range in data]
except (ValueError, TypeError, KeyError) as err:
- self.logger.error(
- "Failed to get shard ranges from %s: invalid data: %r",
- quote(broker.root_path), err)
+ self.error(broker,
+ "Failed to get shard ranges from %s: invalid data: %r",
+ quote(broker.root_path), err)
return None
- def _put_container(self, node, part, account, container, headers, body):
+ def _put_container(self, broker, node, part, account, container, headers,
+ body):
try:
direct_put_container(node, part, account, container,
conn_timeout=self.conn_timeout,
response_timeout=self.node_timeout,
headers=headers, contents=body)
except DirectClientException as err:
- self.logger.warning(
- 'Failed to put shard ranges to %s:%s/%s %s/%s: %s',
- node['ip'], node['port'], node['device'],
- quote(account), quote(container), err.http_status)
+ self.warning(broker,
+ 'Failed to put shard ranges to %s %s/%s: %s',
+ node_to_string(node, replication=True),
+ quote(account), quote(container), err.http_status)
except (Exception, Timeout) as err:
- self.logger.exception(
- 'Failed to put shard ranges to %s:%s/%s %s/%s: %s',
- node['ip'], node['port'], node['device'],
- quote(account), quote(container), err)
+ self.exception(broker,
+ 'Failed to put shard ranges to %s %s/%s: %s',
+ node_to_string(node, replication=True),
+ quote(account), quote(container), err)
else:
return True
return False
- def _send_shard_ranges(self, account, container, shard_ranges,
+ def _send_shard_ranges(self, broker, account, container, shard_ranges,
headers=None):
body = json.dumps([dict(sr, reported=0)
for sr in shard_ranges]).encode('ascii')
@@ -1184,7 +1223,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
pool = GreenAsyncPile(len(nodes))
for node in nodes:
- pool.spawn(self._put_container, node, part, account,
+ pool.spawn(self._put_container, broker, node, part, account,
container, headers, body)
results = pool.waitall(None)
@@ -1291,9 +1330,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
% broker.db_epoch)
if warnings:
- self.logger.warning(
- 'Audit failed for root %s (%s): %s',
- broker.db_file, quote(broker.path), ', '.join(warnings))
+ self.warning(broker, 'Audit failed for root: %s',
+ ', '.join(warnings))
self._increment_stat('audit_root', 'failure', statsd=True)
return False
@@ -1332,15 +1370,16 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# it and reload own shard range (note: own_range_from_root may
# not necessarily be 'newer' than the own shard range we
# already have, but merging will get us to the 'newest' state)
- self.logger.debug('Updating own shard range from root')
+ self.debug(broker, 'Updating own shard range from root')
own_shard_range_from_root = shard_range
broker.merge_shard_ranges(own_shard_range_from_root)
orig_own_shard_range = own_shard_range
own_shard_range = broker.get_own_shard_range()
if (orig_own_shard_range != own_shard_range or
orig_own_shard_range.state != own_shard_range.state):
- self.logger.info('Updated own shard range from %s to %s',
- orig_own_shard_range, own_shard_range)
+ self.info(broker,
+ 'Updated own shard range from %s to %s',
+ orig_own_shard_range, own_shard_range)
elif shard_range.is_child_of(own_shard_range):
children_shard_ranges.append(shard_range)
else:
@@ -1351,8 +1390,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# DB is fully cleaved and reaches SHARDED DB state, after which it
# is useful for debugging for the set of sub-shards to which a
# shards has sharded to be frozen.
- self.logger.debug('Updating %d children shard ranges from root',
- len(children_shard_ranges))
+ self.debug(broker, 'Updating %d children shard ranges from root',
+ len(children_shard_ranges))
broker.merge_shard_ranges(children_shard_ranges)
if (other_shard_ranges
@@ -1415,9 +1454,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
combined_shard_ranges, own_shard_range)
if not (overlaps or paths_with_gaps):
# only merge if shard ranges appear to be *good*
- self.logger.debug(
- 'Updating %s other shard range(s) from root',
- len(filtered_other_shard_ranges))
+ self.debug(broker,
+ 'Updating %s other shard range(s) from root',
+ len(filtered_other_shard_ranges))
broker.merge_shard_ranges(filtered_other_shard_ranges)
return own_shard_range, own_shard_range_from_root
@@ -1439,8 +1478,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
own_shard_range.timestamp < delete_age and
broker.empty()):
broker.delete_db(Timestamp.now().internal)
- self.logger.debug('Marked shard container as deleted %s (%s)',
- broker.db_file, quote(broker.path))
+ self.debug(broker, 'Marked shard container as deleted')
def _do_audit_shard_container(self, broker):
warnings = []
@@ -1451,9 +1489,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
own_shard_range = broker.get_own_shard_range(no_default=True)
if not own_shard_range:
- self.logger.warning('Audit failed for shard %s (%s) - skipping: '
- 'missing own shard range',
- broker.db_file, quote(broker.path))
+ self.warning(broker, 'Audit failed for shard: missing own shard '
+ 'range (skipping)')
return False, warnings
# Get the root view of the world, at least that part of the world
@@ -1492,9 +1529,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self._increment_stat('audit_shard', 'attempted')
success, warnings = self._do_audit_shard_container(broker)
if warnings:
- self.logger.warning(
- 'Audit warnings for shard %s (%s): %s',
- broker.db_file, quote(broker.path), ', '.join(warnings))
+ self.warning(broker, 'Audit warnings for shard: %s',
+ ', '.join(warnings))
self._increment_stat(
'audit_shard', 'success' if success else 'failure', statsd=True)
return success
@@ -1513,9 +1549,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if broker.is_deleted():
if broker.is_old_enough_to_reclaim(time.time(), self.reclaim_age) \
and not broker.is_empty_enough_to_reclaim():
- self.logger.warning(
- 'Reclaimable db stuck waiting for shrinking: %s (%s)',
- broker.db_file, quote(broker.path))
+ self.warning(broker,
+ 'Reclaimable db stuck waiting for shrinking')
# if the container has been marked as deleted, all metadata will
# have been erased so no point auditing. But we want it to pass, in
# case any objects exist inside it.
@@ -1525,106 +1560,115 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
return self._audit_root_container(broker)
return self._audit_shard_container(broker)
- def yield_objects(self, broker, src_shard_range, since_row=None):
+ def yield_objects(self, broker, src_shard_range, since_row=None,
+ batch_size=None):
"""
- Iterates through all objects in ``src_shard_range`` in name order
- yielding them in lists of up to CONTAINER_LISTING_LIMIT length. Both
- deleted and undeleted objects are included.
+ Iterates through all object rows in ``src_shard_range`` in name order
+ yielding them in lists of up to ``batch_size`` in length. All batches
+ of rows that are not marked deleted are yielded before all batches of
+ rows that are marked deleted.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param src_shard_range: A :class:`~swift.common.utils.ShardRange`
describing the source range.
- :param since_row: include only items whose ROWID is greater than
- the given row id; by default all rows are included.
- :return: a generator of tuples of (list of objects, broker info dict)
+ :param since_row: include only object rows whose ROWID is greater than
+ the given row id; by default all object rows are included.
+ :param batch_size: The maximum number of object rows to include in each
+ yielded batch; defaults to cleave_row_batch_size.
+ :return: a generator of tuples of (list of rows, broker info dict)
"""
- marker = src_shard_range.lower_str
- while True:
- info = broker.get_info()
- info['max_row'] = broker.get_max_row()
- start = time.time()
- objects = broker.get_objects(
- self.cleave_row_batch_size,
- marker=marker,
- end_marker=src_shard_range.end_marker,
- include_deleted=None, # give me everything
- since_row=since_row)
- if objects:
- self.logger.debug('got %s objects from %s in %ss',
- len(objects), broker.db_file,
- time.time() - start)
- yield objects, info
-
- if len(objects) < self.cleave_row_batch_size:
- break
- marker = objects[-1]['name']
+ if (src_shard_range.lower == ShardRange.MAX or
+ src_shard_range.upper == ShardRange.MIN):
+ # this is an unexpected condition but handled with an early return
+ # just in case, because:
+ # lower == ShardRange.MAX -> marker == ''
+ # which could result in rows being erroneously yielded.
+ return
+
+ batch_size = batch_size or self.cleave_row_batch_size
+ for include_deleted in (False, True):
+ marker = src_shard_range.lower_str
+ while True:
+ info = broker.get_info()
+ info['max_row'] = broker.get_max_row()
+ start = time.time()
+ objects = broker.get_objects(
+ limit=batch_size,
+ marker=marker,
+ end_marker=src_shard_range.end_marker,
+ include_deleted=include_deleted,
+ since_row=since_row)
+ self.debug(broker, 'got %s rows (deleted=%s) in %ss',
+ len(objects), include_deleted, time.time() - start)
+ if objects:
+ yield objects, info
+
+ if len(objects) < batch_size:
+ break
+ marker = objects[-1]['name']
def yield_objects_to_shard_range(self, broker, src_shard_range,
dest_shard_ranges):
"""
- Iterates through all objects in ``src_shard_range`` to place them in
- destination shard ranges provided by the ``next_shard_range`` function.
- Yields tuples of (object list, destination shard range in which those
- objects belong). Note that the same destination shard range may be
- referenced in more than one yielded tuple.
+ Iterates through all object rows in ``src_shard_range`` to place them
+ in destination shard ranges provided by the ``dest_shard_ranges``
+ function. Yields tuples of ``(batch of object rows, destination shard
+ range in which those object rows belong, broker info)``.
+
+ If no destination shard range exists for a batch of object rows then
+ tuples are yielded of ``(batch of object rows, None, broker info)``.
+ This indicates to the caller that there are a non-zero number of object
+ rows for which no destination shard range was found.
+
+ Note that the same destination shard range may be referenced in more
+ than one yielded tuple.
:param broker: A :class:`~swift.container.backend.ContainerBroker`.
:param src_shard_range: A :class:`~swift.common.utils.ShardRange`
describing the source range.
:param dest_shard_ranges: A function which should return a list of
- destination shard ranges in name order.
- :return: a generator of tuples of
- (object list, shard range, broker info dict)
+ destination shard ranges sorted in the order defined by
+ :meth:`~swift.common.utils.ShardRange.sort_key`.
+ :return: a generator of tuples of ``(object row list, shard range,
+ broker info dict)`` where ``shard_range`` may be ``None``.
"""
- dest_shard_range_iter = dest_shard_range = None
- for objs, info in self.yield_objects(broker, src_shard_range):
- if not objs:
- return
+ # calling dest_shard_ranges() may result in a request to fetch shard
+ # ranges, so first check that the broker actually has misplaced object
+ # rows in the source namespace
+ for _ in self.yield_objects(broker, src_shard_range, batch_size=1):
+ break
+ else:
+ return
- def next_or_none(it):
- try:
- return next(it)
- except StopIteration:
- return None
-
- if dest_shard_range_iter is None:
- dest_shard_range_iter = iter(dest_shard_ranges())
- dest_shard_range = next_or_none(dest_shard_range_iter)
-
- unplaced = False
- last_index = next_index = 0
- for obj in objs:
- if dest_shard_range is None:
- # no more destinations: yield remainder of batch and bail
- # NB there may be more batches of objects but none of them
- # will be placed so no point fetching them
- yield objs[last_index:], None, info
- return
- if obj['name'] <= dest_shard_range.lower:
- unplaced = True
- elif unplaced:
- # end of run of unplaced objects, yield them
- yield objs[last_index:next_index], None, info
- last_index = next_index
- unplaced = False
- while (dest_shard_range and
- obj['name'] > dest_shard_range.upper):
- if next_index != last_index:
- # yield the objects in current dest_shard_range
- yield (objs[last_index:next_index],
- dest_shard_range,
- info)
- last_index = next_index
- dest_shard_range = next_or_none(dest_shard_range_iter)
- next_index += 1
-
- if next_index != last_index:
- # yield tail of current batch of objects
- # NB there may be more objects for the current
- # dest_shard_range in the next batch from yield_objects
- yield (objs[last_index:next_index],
- None if unplaced else dest_shard_range,
- info)
+ dest_shard_range_iter = iter(dest_shard_ranges())
+ src_shard_range_marker = src_shard_range.lower
+ for dest_shard_range in dest_shard_range_iter:
+ if dest_shard_range.upper <= src_shard_range.lower:
+ continue
+
+ if dest_shard_range.lower > src_shard_range_marker:
+ # no destination for a sub-namespace of the source namespace
+ sub_src_range = src_shard_range.copy(
+ lower=src_shard_range_marker, upper=dest_shard_range.lower)
+ for objs, info in self.yield_objects(broker, sub_src_range):
+ yield objs, None, info
+
+ sub_src_range = src_shard_range.copy(
+ lower=max(dest_shard_range.lower, src_shard_range.lower),
+ upper=min(dest_shard_range.upper, src_shard_range.upper))
+ for objs, info in self.yield_objects(broker, sub_src_range):
+ yield objs, dest_shard_range, info
+
+ src_shard_range_marker = dest_shard_range.upper
+ if dest_shard_range.upper >= src_shard_range.upper:
+ # the entire source namespace has been traversed
+ break
+ else:
+ # dest_shard_ranges_iter was exhausted before reaching the end of
+ # the source namespace
+ sub_src_range = src_shard_range.copy(lower=src_shard_range_marker)
+ for objs, info in self.yield_objects(broker, sub_src_range):
+ yield objs, None, info
def _post_replicate_hook(self, broker, info, responses):
# override superclass behaviour
@@ -1636,9 +1680,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
part, dest_broker.db_file, node_id)
quorum = quorum_size(self.ring.replica_count)
if not success and responses.count(True) < quorum:
- self.logger.warning(
- 'Failed to sufficiently replicate misplaced objects: %s in %s '
- '(not removing)', dest_shard_range, quote(broker.path))
+ self.warning(broker, 'Failed to sufficiently replicate misplaced '
+ 'objects to %s (not removing)',
+ dest_shard_range)
return False
if broker.get_info()['id'] != info['id']:
@@ -1656,9 +1700,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
success = True
if not success:
- self.logger.warning(
- 'Refused to remove misplaced objects: %s in %s',
- dest_shard_range, quote(broker.path))
+ self.warning(broker,
+ 'Refused to remove misplaced objects for dest %s',
+ dest_shard_range)
return success
def _move_objects(self, src_broker, src_shard_range, policy_index,
@@ -1676,8 +1720,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
continue
if dest_shard_range.name == src_broker.path:
- self.logger.debug(
- 'Skipping source as misplaced objects destination')
+ self.debug(src_broker,
+ 'Skipping source as misplaced objects destination')
# in shrinking context, the misplaced objects might actually be
# correctly placed if the root has expanded this shard but this
# broker has not yet been updated
@@ -1702,14 +1746,14 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
placed += len(objs)
if unplaced:
- self.logger.warning(
- 'Failed to find destination for at least %s misplaced objects '
- 'in %s', unplaced, quote(src_broker.path))
+ self.warning(src_broker, 'Failed to find destination for at least '
+ '%s misplaced objects', unplaced)
# TODO: consider executing the replication jobs concurrently
for dest_shard_range, dest_args in dest_brokers.items():
- self.logger.debug('moving misplaced objects found in range %s' %
- dest_shard_range)
+ self.debug(src_broker,
+ 'moving misplaced objects found in range %s',
+ dest_shard_range)
success &= self._replicate_and_delete(
src_broker, dest_shard_range, **dest_args)
@@ -1789,8 +1833,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
:return: True if all misplaced objects were sufficiently replicated to
their correct shard containers, False otherwise
"""
- self.logger.debug('Looking for misplaced objects in %s (%s)',
- quote(broker.path), broker.db_file)
+ self.debug(broker, 'Looking for misplaced objects')
self._increment_stat('misplaced', 'attempted')
src_broker = src_broker or broker
if src_bounds is None:
@@ -1798,7 +1841,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# (ab)use ShardRange instances to encapsulate source namespaces
src_ranges = [ShardRange('dont/care', Timestamp.now(), lower, upper)
for lower, upper in src_bounds]
- self.logger.debug('misplaced object source bounds %s' % src_bounds)
+ self.debug(broker, 'misplaced object source bounds %s', src_bounds)
policy_index = broker.storage_policy_index
success = True
num_placed = num_unplaced = 0
@@ -1814,11 +1857,11 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# the found stat records the number of DBs in which any misplaced
# rows were found, not the total number of misplaced rows
self._increment_stat('misplaced', 'found', statsd=True)
- self.logger.debug('Placed %s misplaced objects (%s unplaced)',
- num_placed, num_unplaced)
+ self.debug(broker, 'Placed %s misplaced objects (%s unplaced)',
+ num_placed, num_unplaced)
self._increment_stat('misplaced', 'success' if success else 'failure',
statsd=True)
- self.logger.debug('Finished handling misplaced objects')
+ self.debug(broker, 'Finished handling misplaced objects')
return success
def _find_shard_ranges(self, broker):
@@ -1834,12 +1877,10 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
own_shard_range = broker.get_own_shard_range()
shard_ranges = broker.get_shard_ranges()
if shard_ranges and shard_ranges[-1].upper >= own_shard_range.upper:
- self.logger.debug('Scan for shard ranges already completed for %s',
- quote(broker.path))
+ self.debug(broker, 'Scan for shard ranges already completed')
return 0
- self.logger.info('Starting scan for shard ranges on %s',
- quote(broker.path))
+ self.info(broker, 'Starting scan for shard ranges')
self._increment_stat('scanned', 'attempted')
start = time.time()
@@ -1851,11 +1892,11 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if not shard_data:
if last_found:
- self.logger.info("Already found all shard ranges")
+ self.info(broker, "Already found all shard ranges")
self._increment_stat('scanned', 'success', statsd=True)
else:
# we didn't find anything
- self.logger.warning("No shard ranges found")
+ self.warning(broker, "No shard ranges found")
self._increment_stat('scanned', 'failure', statsd=True)
return 0
@@ -1863,14 +1904,14 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
broker, shard_data, self.shards_account_prefix)
broker.merge_shard_ranges(shard_ranges)
num_found = len(shard_ranges)
- self.logger.info(
- "Completed scan for shard ranges: %d found", num_found)
+ self.info(broker, "Completed scan for shard ranges: %d found",
+ num_found)
self._update_stat('scanned', 'found', step=num_found)
self._min_stat('scanned', 'min_time', round(elapsed / num_found, 3))
self._max_stat('scanned', 'max_time', round(elapsed / num_found, 3))
if last_found:
- self.logger.info("Final shard range reached.")
+ self.info(broker, "Final shard range reached.")
self._increment_stat('scanned', 'success', statsd=True)
return num_found
@@ -1897,16 +1938,15 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# may think they are in fact roots, but it cleans up well enough
# once everyone's upgraded.
success = self._send_shard_ranges(
- shard_range.account, shard_range.container,
+ broker, shard_range.account, shard_range.container,
[shard_range], headers=headers)
if success:
- self.logger.debug('PUT new shard range container for %s',
- shard_range)
+ self.debug(broker, 'PUT new shard range container for %s',
+ shard_range)
self._increment_stat('created', 'success', statsd=True)
else:
- self.logger.error(
- 'PUT of new shard container %r failed for %s.',
- shard_range, quote(broker.path))
+ self.error(broker, 'PUT of new shard container %r failed',
+ shard_range)
self._increment_stat('created', 'failure', statsd=True)
# break, not continue, because elsewhere it is assumed that
# finding and cleaving shard ranges progresses linearly, so we
@@ -1918,12 +1958,10 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if created_ranges:
broker.merge_shard_ranges(created_ranges)
if not broker.is_root_container():
- self._send_shard_ranges(
- broker.root_account, broker.root_container, created_ranges)
- self.logger.info(
- "Completed creating shard range containers: %d created, "
- "from sharding container %s",
- len(created_ranges), quote(broker.path))
+ self._send_shard_ranges(broker, broker.root_account,
+ broker.root_container, created_ranges)
+ self.info(broker, "Completed creating %d shard range containers",
+ len(created_ranges))
return len(created_ranges)
def _cleave_shard_broker(self, broker, cleaving_context, shard_range,
@@ -1949,8 +1987,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
since_row=sync_from_row):
shard_broker.merge_items(objects)
if objects is None:
- self.logger.info("Cleaving '%s': %r - zero objects found",
- quote(broker.path), shard_range)
+ self.info(broker, "Cleaving %r - zero objects found",
+ shard_range)
if shard_broker.get_info()['put_timestamp'] == put_timestamp:
# This was just created; don't need to replicate this
# SR because there was nothing there. So cleanup and
@@ -1973,8 +2011,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
[{'sync_point': source_max_row, 'remote_id': source_db_id}] +
source_broker.get_syncs())
else:
- self.logger.debug("Cleaving '%s': %r - shard db already in sync",
- quote(broker.path), shard_range)
+ self.debug(broker, "Cleaving %r - shard db already in sync",
+ shard_range)
replication_quorum = self.existing_shard_replication_quorum
if own_shard_range.state in ShardRange.SHRINKING_STATES:
@@ -2009,9 +2047,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if result == CLEAVE_EMPTY:
self.delete_db(shard_broker)
else: # result == CLEAVE_SUCCESS:
- self.logger.info(
- 'Replicating new shard container %s for %s',
- quote(shard_broker.path), own_shard_range)
+ self.info(broker, 'Replicating new shard container %s for %s',
+ quote(shard_broker.path), own_shard_range)
success, responses = self._replicate_object(
shard_part, shard_broker.db_file, node_id)
@@ -2022,20 +2059,18 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# insufficient replication or replication not even attempted;
# break because we don't want to progress the cleave cursor
# until each shard range has been successfully cleaved
- self.logger.warning(
- 'Failed to sufficiently replicate cleaved shard %s for %s:'
- ' %s successes, %s required.', shard_range,
- quote(broker.path),
- replication_successes, replication_quorum)
+ self.warning(broker,
+ 'Failed to sufficiently replicate cleaved shard '
+ '%s: %s successes, %s required', shard_range,
+ replication_successes, replication_quorum)
self._increment_stat('cleaved', 'failure', statsd=True)
result = CLEAVE_FAILED
else:
elapsed = round(time.time() - start, 3)
self._min_stat('cleaved', 'min_time', elapsed)
self._max_stat('cleaved', 'max_time', elapsed)
- self.logger.info(
- 'Cleaved %s for shard range %s in %gs.',
- quote(broker.path), shard_range, elapsed)
+ self.info(broker, 'Cleaved %s in %gs', shard_range,
+ elapsed)
self._increment_stat('cleaved', 'success', statsd=True)
if result in (CLEAVE_SUCCESS, CLEAVE_EMPTY):
@@ -2049,10 +2084,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
def _cleave_shard_range(self, broker, cleaving_context, shard_range,
own_shard_range):
- self.logger.info("Cleaving '%s' from row %s into %s for %r",
- quote(broker.path),
- cleaving_context.last_cleave_to_row,
- quote(shard_range.name), shard_range)
+ self.info(broker, "Cleaving from row %s into %s for %r",
+ cleaving_context.last_cleave_to_row,
+ quote(shard_range.name), shard_range)
self._increment_stat('cleaved', 'attempted')
policy_index = broker.storage_policy_index
shard_part, shard_broker, node_id, put_timestamp = \
@@ -2068,8 +2102,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# Returns True if misplaced objects have been moved and the entire
# container namespace has been successfully cleaved, False otherwise
if broker.is_sharded():
- self.logger.debug('Passing over already sharded container %s',
- quote(broker.path))
+ self.debug(broker, 'Passing over already sharded container')
return True
cleaving_context = CleavingContext.load(broker)
@@ -2077,9 +2110,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# ensure any misplaced objects in the source broker are moved; note
# that this invocation of _move_misplaced_objects is targetted at
# the *retiring* db.
- self.logger.debug(
- 'Moving any misplaced objects from sharding container: %s',
- quote(broker.path))
+ self.debug(broker,
+ 'Moving any misplaced objects from sharding container')
bounds = self._make_default_misplaced_object_bounds(broker)
cleaving_context.misplaced_done = self._move_misplaced_objects(
broker, src_broker=broker.get_brokers()[0],
@@ -2087,8 +2119,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
cleaving_context.store(broker)
if cleaving_context.cleaving_done:
- self.logger.debug('Cleaving already complete for container %s',
- quote(broker.path))
+ self.debug(broker, 'Cleaving already complete for container')
return cleaving_context.misplaced_done
shard_ranges = broker.get_shard_ranges(marker=cleaving_context.marker)
@@ -2103,25 +2134,23 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# always update ranges_todo in case shard ranges have changed since
# last visit
cleaving_context.ranges_todo = len(ranges_todo)
- self.logger.debug('Continuing to cleave (%s done, %s todo): %s',
- cleaving_context.ranges_done,
- cleaving_context.ranges_todo,
- quote(broker.path))
+ self.debug(broker, 'Continuing to cleave (%s done, %s todo)',
+ cleaving_context.ranges_done,
+ cleaving_context.ranges_todo)
else:
cleaving_context.start()
own_shard_range = broker.get_own_shard_range()
cleaving_context.cursor = own_shard_range.lower_str
cleaving_context.ranges_todo = len(ranges_todo)
- self.logger.info('Starting to cleave (%s todo): %s',
- cleaving_context.ranges_todo, quote(broker.path))
+ self.info(broker, 'Starting to cleave (%s todo)',
+ cleaving_context.ranges_todo)
own_shard_range = broker.get_own_shard_range(no_default=True)
if own_shard_range is None:
# A default should never be SHRINKING or SHRUNK but because we
# may write own_shard_range back to broker, let's make sure
# it can't be defaulted.
- self.logger.warning('Failed to get own_shard_range for %s',
- quote(broker.path))
+ self.warning(broker, 'Failed to get own_shard_range')
ranges_todo = [] # skip cleaving
ranges_done = []
@@ -2137,14 +2166,14 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
break
if shard_range.lower > cleaving_context.cursor:
- self.logger.info('Stopped cleave at gap: %r - %r' %
- (cleaving_context.cursor, shard_range.lower))
+ self.info(broker, 'Stopped cleave at gap: %r - %r' %
+ (cleaving_context.cursor, shard_range.lower))
break
if shard_range.state not in (ShardRange.CREATED,
ShardRange.CLEAVED,
ShardRange.ACTIVE):
- self.logger.info('Stopped cleave at unready %s', shard_range)
+ self.info(broker, 'Stopped cleave at unready %s', shard_range)
break
cleave_result = self._cleave_shard_range(
@@ -2161,9 +2190,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# that here in case we hit a failure right off the bat or ended loop
# with skipped ranges
cleaving_context.store(broker)
- self.logger.debug(
- 'Cleaved %s shard ranges for %s',
- len(ranges_done), quote(broker.path))
+ self.debug(broker, 'Cleaved %s shard ranges', len(ranges_done))
return (cleaving_context.misplaced_done and
cleaving_context.cleaving_done)
@@ -2178,8 +2205,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# This is more of a belts and braces, not sure we could even
# get this far with without an own_shard_range. But because
# we will be writing own_shard_range back, we need to make sure
- self.logger.warning('Failed to get own_shard_range for %s',
- quote(broker.path))
+ self.warning(broker, 'Failed to get own_shard_range')
return False
own_shard_range.update_meta(0, 0)
if own_shard_range.state in ShardRange.SHRINKING_STATES:
@@ -2200,13 +2226,10 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if broker.set_sharded_state():
return True
else:
- self.logger.warning(
- 'Failed to remove retiring db file for %s',
- quote(broker.path))
+ self.warning(broker, 'Failed to remove retiring db file')
else:
- self.logger.warning(
- 'Repeat cleaving required for %r with context: %s',
- broker.db_files[0], dict(cleaving_context))
+ self.warning(broker, 'Repeat cleaving required, context: %s',
+ dict(cleaving_context))
cleaving_context.reset()
cleaving_context.store(broker)
@@ -2216,33 +2239,32 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
candidates = find_sharding_candidates(
broker, self.shard_container_threshold, shard_ranges)
if candidates:
- self.logger.debug('Identified %s sharding candidates',
- len(candidates))
+ self.debug(broker, 'Identified %s sharding candidates',
+ len(candidates))
broker.merge_shard_ranges(candidates)
def _find_and_enable_shrinking_candidates(self, broker):
if not broker.is_sharded():
- self.logger.warning('Cannot shrink a not yet sharded container %s',
- quote(broker.path))
+ self.warning(broker, 'Cannot shrink a not yet sharded container')
return
compactible_sequences = find_compactible_shard_sequences(
broker, self.shrink_threshold, self.expansion_limit,
self.max_shrinking, self.max_expanding, include_shrinking=True)
- self.logger.debug('Found %s compactible sequences of length(s) %s' %
- (len(compactible_sequences),
- [len(s) for s in compactible_sequences]))
+ self.debug(broker, 'Found %s compactible sequences of length(s) %s' %
+ (len(compactible_sequences),
+ [len(s) for s in compactible_sequences]))
process_compactible_shard_sequences(broker, compactible_sequences)
own_shard_range = broker.get_own_shard_range()
for sequence in compactible_sequences:
acceptor = sequence[-1]
donors = ShardRangeList(sequence[:-1])
- self.logger.debug(
- 'shrinking %d objects from %d shard ranges into %s in %s' %
- (donors.object_count, len(donors), acceptor, broker.db_file))
+ self.debug(broker,
+ 'shrinking %d objects from %d shard ranges into %s' %
+ (donors.object_count, len(donors), acceptor))
if acceptor.name != own_shard_range.name:
- self._send_shard_ranges(
- acceptor.account, acceptor.container, [acceptor])
+ self._send_shard_ranges(broker, acceptor.account,
+ acceptor.container, [acceptor])
acceptor.increment_meta(donors.object_count, donors.bytes_used)
# Now send a copy of the expanded acceptor, with an updated
# timestamp, to each donor container. This forces each donor to
@@ -2252,8 +2274,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# the acceptor will then update the root to have the deleted donor
# shard range.
for donor in donors:
- self._send_shard_ranges(
- donor.account, donor.container, [donor, acceptor])
+ self._send_shard_ranges(broker, donor.account,
+ donor.container, [donor, acceptor])
def _update_root_container(self, broker):
own_shard_range = broker.get_own_shard_range(no_default=True)
@@ -2266,8 +2288,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# count that is consistent with the current object_count
reclaimer = self._reclaim(broker)
tombstones = reclaimer.get_tombstone_count()
- self.logger.debug('tombstones in %s = %d',
- quote(broker.path), tombstones)
+ self.debug(broker, 'tombstones = %d', tombstones)
# shrinking candidates are found in the root DB so that's the only
# place we need up to date tombstone stats.
own_shard_range.update_tombstones(tombstones)
@@ -2288,25 +2309,23 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
include_own=True,
include_deleted=True)
# send everything
- if self._send_shard_ranges(
- broker.root_account, broker.root_container, shard_ranges,
- {'Referer': quote(broker.path)}):
+ if self._send_shard_ranges(broker, broker.root_account,
+ broker.root_container, shard_ranges,
+ {'Referer': quote(broker.path)}):
# on success, mark ourselves as reported so we don't keep
# hammering the root
own_shard_range.reported = True
broker.merge_shard_ranges(own_shard_range)
- self.logger.debug(
- 'updated root objs=%d, tombstones=%s (%s)',
- own_shard_range.object_count, own_shard_range.tombstones,
- quote(broker.path))
+ self.debug(broker, 'updated root objs=%d, tombstones=%s',
+ own_shard_range.object_count,
+ own_shard_range.tombstones)
def _process_broker(self, broker, node, part):
broker.get_info() # make sure account/container are populated
state = broker.get_db_state()
is_deleted = broker.is_deleted()
- self.logger.debug('Starting processing %s state %s%s',
- quote(broker.path), state,
- ' (deleted)' if is_deleted else '')
+ self.debug(broker, 'Starting processing, state %s%s', state,
+ ' (deleted)' if is_deleted else '')
if not self._audit_container(broker):
return
@@ -2331,18 +2350,17 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# or manually triggered cleaving.
if broker.set_sharding_state():
state = SHARDING
- self.logger.info('Kick off container cleaving on %s, '
- 'own shard range in state %r',
- quote(broker.path),
- own_shard_range.state_text)
+ self.info(broker, 'Kick off container cleaving, '
+ 'own shard range in state %r',
+ own_shard_range.state_text)
elif is_leader:
if broker.set_sharding_state():
state = SHARDING
else:
- self.logger.debug(
- 'Own shard range in state %r but no shard ranges '
- 'and not leader; remaining unsharded: %s',
- own_shard_range.state_text, quote(broker.path))
+ self.debug(broker,
+ 'Own shard range in state %r but no shard '
+ 'ranges and not leader; remaining unsharded',
+ own_shard_range.state_text)
if state == SHARDING:
if is_leader:
@@ -2364,13 +2382,11 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if self._complete_sharding(broker):
state = SHARDED
self._increment_stat('visited', 'completed', statsd=True)
- self.logger.info(
- 'Completed cleaving of %s, DB set to sharded state',
- quote(broker.path))
+ self.info(broker, 'Completed cleaving, DB set to sharded '
+ 'state')
else:
- self.logger.info(
- 'Completed cleaving of %s, DB remaining in sharding '
- 'state', quote(broker.path))
+ self.info(broker, 'Completed cleaving, DB remaining in '
+ 'sharding state')
if not broker.is_deleted():
if state == SHARDED and broker.is_root_container():
@@ -2381,9 +2397,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self._find_and_enable_sharding_candidates(broker)
for shard_range in broker.get_shard_ranges(
states=[ShardRange.SHARDING]):
- self._send_shard_ranges(
- shard_range.account, shard_range.container,
- [shard_range])
+ self._send_shard_ranges(broker, shard_range.account,
+ shard_range.container,
+ [shard_range])
if not broker.is_root_container():
# Update the root container with this container's shard range
@@ -2394,9 +2410,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# simultaneously become deleted.
self._update_root_container(broker)
- self.logger.debug('Finished processing %s state %s%s',
- quote(broker.path), broker.get_db_state(),
- ' (deleted)' if is_deleted else '')
+ self.debug(broker,
+ 'Finished processing, state %s%s',
+ broker.get_db_state(), ' (deleted)' if is_deleted else '')
def _one_shard_cycle(self, devices_to_shard, partitions_to_shard):
"""
@@ -2464,15 +2480,14 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self._increment_stat('visited', 'skipped')
except (Exception, Timeout) as err:
self._increment_stat('visited', 'failure', statsd=True)
- self.logger.exception(
- 'Unhandled exception while processing %s: %s', path, err)
+ self.exception(broker, 'Unhandled exception while processing: '
+ '%s', err)
error = err
try:
self._record_sharding_progress(broker, node, error)
except (Exception, Timeout) as error:
- self.logger.exception(
- 'Unhandled exception while dumping progress for %s: %s',
- path, error)
+ self.exception(broker, 'Unhandled exception while dumping '
+ 'progress: %s', error)
self._periodic_report_stats()
self._report_stats()
diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py
index d7d893b72..758aed72b 100644
--- a/swift/proxy/controllers/base.py
+++ b/swift/proxy/controllers/base.py
@@ -438,6 +438,11 @@ def get_container_info(env, app, swift_source=None):
account = wsgi_to_str(wsgi_account)
container = wsgi_to_str(wsgi_container)
+ # Try to cut through all the layers to the proxy app
+ try:
+ app = app._pipeline_final_app
+ except AttributeError:
+ pass
# Check in environment cache and in memcache (in that order)
info = _get_info_from_caches(app, env, account, container)
@@ -526,6 +531,11 @@ def get_account_info(env, app, swift_source=None):
account = wsgi_to_str(wsgi_account)
+ # Try to cut through all the layers to the proxy app
+ try:
+ app = app._pipeline_final_app
+ except AttributeError:
+ pass
# Check in environment cache and in memcache (in that order)
info = _get_info_from_caches(app, env, account)
@@ -605,7 +615,10 @@ def get_cache_key(account, container=None, obj=None, shard=None):
raise ValueError('Shard cache key requires account and container')
if obj:
raise ValueError('Shard cache key cannot have obj')
- cache_key = 'shard-%s/%s/%s' % (shard, account, container)
+ if shard == 'updating':
+ cache_key = 'shard-%s-v2/%s/%s' % (shard, account, container)
+ else:
+ cache_key = 'shard-%s/%s/%s' % (shard, account, container)
elif obj:
if not (account and container):
raise ValueError('Object cache key requires account and container')
diff --git a/swift/proxy/controllers/container.py b/swift/proxy/controllers/container.py
index 3803f40d5..4102d652a 100644
--- a/swift/proxy/controllers/container.py
+++ b/swift/proxy/controllers/container.py
@@ -693,7 +693,7 @@ class ContainerController(Controller):
similar to a merge_items REPLICATE request.
Not client facing; internal clients or middlewares must include
- ``X-Backend-Allow-Method: UPDATE`` header to access.
+ ``X-Backend-Allow-Private-Methods: true`` header to access.
"""
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py
index 974680364..b69631538 100644
--- a/swift/proxy/controllers/obj.py
+++ b/swift/proxy/controllers/obj.py
@@ -48,7 +48,7 @@ from swift.common.utils import (
normalize_delete_at_timestamp, public, get_expirer_container,
document_iters_to_http_response_body, parse_content_range,
quorum_size, reiterate, close_if_possible, safe_json_loads, md5,
- ShardRange, find_shard_range, cache_from_env)
+ ShardRange, find_shard_range, cache_from_env, NamespaceBoundList)
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_metadata, check_object_creation
from swift.common import constraints
@@ -278,37 +278,67 @@ class BaseObjectController(Controller):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
- def _get_cached_updating_shard_ranges(
+ def _get_cached_updating_namespaces(
self, infocache, memcache, cache_key):
"""
- Fetch cached shard ranges from infocache and memcache.
+ Fetch cached updating namespaces of updating shard ranges from
+ infocache and memcache.
:param infocache: the infocache instance.
:param memcache: an instance of a memcache client,
:class:`swift.common.memcached.MemcacheRing`.
:param cache_key: the cache key for both infocache and memcache.
- :return: a tuple of (list of shard ranges in dict format, cache state)
- """
- cached_ranges = infocache.get(cache_key)
- if cached_ranges:
- cache_state = 'infocache_hit'
+ :return: a tuple of (an instance of NamespaceBoundList, cache state)
+ """
+ # try get namespaces from infocache first
+ namespace_list = infocache.get(cache_key)
+ if namespace_list:
+ return namespace_list, 'infocache_hit'
+
+ # then try get them from memcache
+ if not memcache:
+ return None, 'disabled'
+ skip_chance = self.app.container_updating_shard_ranges_skip_cache
+ if skip_chance and random.random() < skip_chance:
+ return None, 'skip'
+ try:
+ namespaces = memcache.get(cache_key, raise_on_error=True)
+ cache_state = 'hit' if namespaces else 'miss'
+ except MemcacheConnectionError:
+ namespaces = None
+ cache_state = 'error'
+
+ if namespaces:
+ if six.PY2:
+ # json.loads() in memcache.get will convert json 'string' to
+ # 'unicode' with python2, here we cast 'unicode' back to 'str'
+ namespaces = [
+ [lower.encode('utf-8'), name.encode('utf-8')]
+ for lower, name in namespaces]
+ namespace_list = NamespaceBoundList(namespaces)
else:
- if memcache:
- skip_chance = \
- self.app.container_updating_shard_ranges_skip_cache
- if skip_chance and random.random() < skip_chance:
- cache_state = 'skip'
- else:
- try:
- cached_ranges = memcache.get(
- cache_key, raise_on_error=True)
- cache_state = 'hit' if cached_ranges else 'miss'
- except MemcacheConnectionError:
- cache_state = 'error'
- else:
- cache_state = 'disabled'
- cached_ranges = cached_ranges or []
- return cached_ranges, cache_state
+ namespace_list = None
+ return namespace_list, cache_state
+
+ def _get_update_shard_caching_disabled(self, req, account, container, obj):
+ """
+ Fetch all updating shard ranges for the given root container when
+ all caching is disabled.
+
+ :param req: original Request instance.
+ :param account: account from which shard ranges should be fetched.
+ :param container: container from which shard ranges should be fetched.
+ :param obj: object getting updated.
+ :return: an instance of :class:`swift.common.utils.ShardRange`,
+ or None if the update should go back to the root
+ """
+ # legacy behavior requests container server for includes=obj
+ shard_ranges, response = self._get_shard_ranges(
+ req, account, container, states='updating', includes=obj)
+ record_cache_op_metrics(
+ self.logger, 'shard_updating', 'disabled', response)
+ # there will be only one shard range in the list if any
+ return shard_ranges[0] if shard_ranges else None
def _get_update_shard(self, req, account, container, obj):
"""
@@ -327,39 +357,41 @@ class BaseObjectController(Controller):
"""
if not self.app.recheck_updating_shard_ranges:
# caching is disabled
- cache_state = 'disabled'
- # legacy behavior requests container server for includes=obj
- shard_ranges, response = self._get_shard_ranges(
- req, account, container, states='updating', includes=obj)
+ return self._get_update_shard_caching_disabled(
+ req, account, container, obj)
+
+ # caching is enabled, try to get from caches
+ response = None
+ cache_key = get_cache_key(account, container, shard='updating')
+ infocache = req.environ.setdefault('swift.infocache', {})
+ memcache = cache_from_env(req.environ, True)
+ cached_namespaces, cache_state = self._get_cached_updating_namespaces(
+ infocache, memcache, cache_key)
+ if cached_namespaces:
+ # found cached namespaces in either infocache or memcache
+ infocache[cache_key] = cached_namespaces
+ namespace = cached_namespaces.get_namespace(obj)
+ update_shard = ShardRange(
+ name=namespace.name, timestamp=0, lower=namespace.lower,
+ upper=namespace.upper)
else:
- # try to get from cache
- response = None
- cache_key = get_cache_key(account, container, shard='updating')
- infocache = req.environ.setdefault('swift.infocache', {})
- memcache = cache_from_env(req.environ, True)
- (cached_ranges, cache_state
- ) = self._get_cached_updating_shard_ranges(
- infocache, memcache, cache_key)
- if cached_ranges:
- # found cached shard ranges in either infocache or memcache
- infocache[cache_key] = tuple(cached_ranges)
- shard_ranges = [ShardRange.from_dict(shard_range)
- for shard_range in cached_ranges]
- else:
- # pull full set of updating shards from backend
- shard_ranges, response = self._get_shard_ranges(
- req, account, container, states='updating')
- if shard_ranges:
- cached_ranges = [dict(sr) for sr in shard_ranges]
- infocache[cache_key] = tuple(cached_ranges)
- if memcache:
- memcache.set(
- cache_key, cached_ranges,
- time=self.app.recheck_updating_shard_ranges)
-
+ # pull full set of updating shard ranges from backend
+ shard_ranges, response = self._get_shard_ranges(
+ req, account, container, states='updating')
+ if shard_ranges:
+ # only store the list of namespace lower bounds and names into
+ # infocache and memcache.
+ cached_namespaces = NamespaceBoundList.parse(
+ shard_ranges)
+ infocache[cache_key] = cached_namespaces
+ if memcache:
+ memcache.set(
+ cache_key, cached_namespaces.bounds,
+ time=self.app.recheck_updating_shard_ranges)
+ update_shard = find_shard_range(obj, shard_ranges or [])
record_cache_op_metrics(
self.logger, 'shard_updating', cache_state, response)
- return find_shard_range(obj, shard_ranges or [])
+ return update_shard
def _get_update_target(self, req, container_info):
# find the sharded container to which we'll send the update
diff --git a/test/functional/__init__.py b/test/functional/__init__.py
index a17054cfe..4654dc793 100644
--- a/test/functional/__init__.py
+++ b/test/functional/__init__.py
@@ -33,6 +33,7 @@ from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
+from unittest import SkipTest
from six.moves.configparser import ConfigParser, NoSectionError
from six.moves import http_client
@@ -47,10 +48,8 @@ from test import get_config, listen_zero
from test.debug_logger import debug_logger
from test.unit import FakeMemcache
# importing skip_if_no_xattrs so that functional tests can grab it from the
-# test.functional namespace. Importing SkipTest so this works under both
-# nose and testr test runners.
+# test.functional namespace.
from test.unit import skip_if_no_xattrs as real_skip_if_no_xattrs
-from test.unit import SkipTest
from swift.common import constraints, utils, ring, storage_policy
from swift.common.ring import Ring
diff --git a/test/functional/s3api/__init__.py b/test/functional/s3api/__init__.py
index 5be7fa118..4993de61d 100644
--- a/test/functional/s3api/__init__.py
+++ b/test/functional/s3api/__init__.py
@@ -17,6 +17,8 @@ import unittest
import traceback
from contextlib import contextmanager
import logging
+from unittest import SkipTest
+
import test.functional as tf
from test.functional.s3api.s3_test_client import (
Connection, get_boto3_conn, tear_down_s3)
@@ -44,8 +46,10 @@ class S3ApiBase(unittest.TestCase):
logging.getLogger('boto').setLevel(logging.DEBUG)
def setUp(self):
+ if not tf.config.get('s3_access_key'):
+ raise SkipTest('no s3api user configured')
if 's3api' not in tf.cluster_info:
- raise tf.SkipTest('s3api middleware is not enabled')
+ raise SkipTest('s3api middleware is not enabled')
if tf.config.get('account'):
user_id = '%s:%s' % (tf.config['account'], tf.config['username'])
else:
@@ -81,8 +85,10 @@ class S3ApiBase(unittest.TestCase):
class S3ApiBaseBoto3(S3ApiBase):
def setUp(self):
+ if not tf.config.get('s3_access_key'):
+ raise SkipTest('no s3api user configured')
if 's3api' not in tf.cluster_info:
- raise tf.SkipTest('s3api middleware is not enabled')
+ raise SkipTest('s3api middleware is not enabled')
try:
self.conn = get_boto3_conn(
tf.config['s3_access_key'], tf.config['s3_secret_key'])
diff --git a/test/functional/s3api/test_acl.py b/test/functional/s3api/test_acl.py
index 7a3d4f98d..46c6298c3 100644
--- a/test/functional/s3api/test_acl.py
+++ b/test/functional/s3api/test_acl.py
@@ -17,6 +17,7 @@ import unittest
import os
import test.functional as tf
from swift.common.middleware.s3api.etree import fromstring
+from unittest import SkipTest
from test.functional.s3api import S3ApiBase
from test.functional.s3api.s3_test_client import Connection
from test.functional.s3api.utils import get_error_code
@@ -37,7 +38,7 @@ class TestS3Acl(S3ApiBase):
self.obj = 'object'
if 's3_access_key3' not in tf.config or \
's3_secret_key3' not in tf.config:
- raise tf.SkipTest(
+ raise SkipTest(
'TestS3Acl requires s3_access_key3 and s3_secret_key3 '
'configured for reduced-access user')
status, headers, body = self.conn.make_request('PUT', self.bucket)
diff --git a/test/functional/s3api/test_bucket.py b/test/functional/s3api/test_bucket.py
index cbc558986..ed0c41111 100644
--- a/test/functional/s3api/test_bucket.py
+++ b/test/functional/s3api/test_bucket.py
@@ -17,6 +17,7 @@ import botocore
import datetime
import unittest
import os
+from unittest import SkipTest
import test.functional as tf
from swift.common.utils import config_true_value
@@ -123,7 +124,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
def test_bucket_listing_with_staticweb(self):
if 'staticweb' not in tf.cluster_info:
- raise tf.SkipTest('Staticweb not enabled')
+ raise SkipTest('Staticweb not enabled')
bucket = 'bucket'
resp = self.conn.create_bucket(Bucket=bucket)
@@ -176,7 +177,7 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
if config_true_value(tf.cluster_info['s3api'].get('s3_acl')):
if 's3_access_key2' not in tf.config or \
's3_secret_key2' not in tf.config:
- raise tf.SkipTest(
+ raise SkipTest(
'Cannot test for BucketAlreadyExists with second user; '
'need s3_access_key2 and s3_secret_key2 configured')
@@ -196,8 +197,8 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
def test_put_bucket_error_key3(self):
if 's3_access_key3' not in tf.config or \
's3_secret_key3' not in tf.config:
- raise tf.SkipTest('Cannot test for AccessDenied; need '
- 's3_access_key3 and s3_secret_key3 configured')
+ raise SkipTest('Cannot test for AccessDenied; need '
+ 's3_access_key3 and s3_secret_key3 configured')
self.conn.create_bucket(Bucket='bucket')
# If the user can't create buckets, they shouldn't even know
diff --git a/test/functional/s3api/test_multi_upload.py b/test/functional/s3api/test_multi_upload.py
index 1ff0b5e8b..bdb17689b 100644
--- a/test/functional/s3api/test_multi_upload.py
+++ b/test/functional/s3api/test_multi_upload.py
@@ -52,7 +52,7 @@ class TestS3ApiMultiUpload(S3ApiBase):
def setUp(self):
super(TestS3ApiMultiUpload, self).setUp()
if not tf.cluster_info['s3api'].get('allow_multipart_uploads', False):
- raise tf.SkipTest('multipart upload is not enebled')
+ self.skipTest('multipart upload is not enebled')
self.min_segment_size = int(tf.cluster_info['s3api'].get(
'min_segment_size', 5242880))
diff --git a/test/functional/s3api/test_versioning.py b/test/functional/s3api/test_versioning.py
index bd80a9580..8d783ab61 100644
--- a/test/functional/s3api/test_versioning.py
+++ b/test/functional/s3api/test_versioning.py
@@ -33,8 +33,8 @@ class TestS3ApiVersioning(S3ApiBase):
super(TestS3ApiVersioning, self).setUp()
if 'object_versioning' not in tf.cluster_info:
# Alternatively, maybe we should assert we get 501s...
- raise tf.SkipTest('S3 versioning requires that Swift object '
- 'versioning be enabled')
+ self.skipTest('S3 versioning requires that Swift object '
+ 'versioning be enabled')
status, headers, body = self.conn.make_request('PUT', 'bucket')
self.assertEqual(status, 200)
diff --git a/test/functional/s3api/test_xxe_injection.py b/test/functional/s3api/test_xxe_injection.py
index b046fed75..d26c284a8 100644
--- a/test/functional/s3api/test_xxe_injection.py
+++ b/test/functional/s3api/test_xxe_injection.py
@@ -208,6 +208,9 @@ class TestS3ApiXxeInjection(S3ApiBaseBoto3):
self.assertEqual(204, response_metadata.get('HTTPStatusCode'))
def test_put_bucket_versioning(self):
+ if 'object_versioning' not in tf.cluster_info:
+ raise tf.SkipTest('S3 versioning requires that Swift object '
+ 'versioning be enabled')
self._create_bucket()
url = self._presign_url(
diff --git a/test/functional/test_dlo.py b/test/functional/test_dlo.py
index d12a3654f..1ce3512cd 100644
--- a/test/functional/test_dlo.py
+++ b/test/functional/test_dlo.py
@@ -250,7 +250,7 @@ class TestDlo(Base):
def test_dlo_referer_on_segment_container(self):
if 'username3' not in tf.config:
- raise tf.SkipTest('Requires user 3')
+ self.skipTest('Requires user 3')
# First the account2 (test3) should fail
config2 = tf.config.copy()
config2['username'] = tf.config['username3']
diff --git a/test/functional/test_object.py b/test/functional/test_object.py
index 768de19c0..85b6894d7 100644
--- a/test/functional/test_object.py
+++ b/test/functional/test_object.py
@@ -20,13 +20,14 @@ import json
import unittest
from uuid import uuid4
import time
+from unittest import SkipTest
from xml.dom import minidom
import six
from six.moves import range
from test.functional import check_response, retry, requires_acls, \
- requires_policies, SkipTest, requires_bulk
+ requires_policies, requires_bulk
import test.functional as tf
from swift.common.utils import md5
@@ -941,7 +942,7 @@ class TestObject(unittest.TestCase):
@requires_acls
def test_read_only(self):
if tf.skip3:
- raise tf.SkipTest
+ raise SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
@@ -1542,7 +1543,7 @@ class TestObject(unittest.TestCase):
def put_obj(url, token, parsed, conn, obj):
conn.request(
'PUT', '%s/%s/%s' % (parsed.path, self.container, obj),
- 'test', {'X-Auth-Token': token})
+ 'test', {'X-Auth-Token': token, 'X-Object-Meta-Color': 'red'})
return check_response(conn)
def check_cors(url, token, parsed, conn,
@@ -1576,6 +1577,8 @@ class TestObject(unittest.TestCase):
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'*')
+ # Just a pre-flight; this doesn't show up yet
+ self.assertNotIn('access-control-expose-headers', headers)
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'})
@@ -1583,6 +1586,8 @@ class TestObject(unittest.TestCase):
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'*')
+ self.assertIn('x-object-meta-color', headers.get(
+ 'access-control-expose-headers').split(', '))
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com',
@@ -1591,6 +1596,8 @@ class TestObject(unittest.TestCase):
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'*')
+ self.assertIn('x-object-meta-color', headers.get(
+ 'access-control-expose-headers').split(', '))
####################
diff --git a/test/functional/test_object_versioning.py b/test/functional/test_object_versioning.py
index 32eb092c8..4eb91f5f2 100644
--- a/test/functional/test_object_versioning.py
+++ b/test/functional/test_object_versioning.py
@@ -22,6 +22,7 @@ import six
from copy import deepcopy
from six.moves.urllib.parse import quote, unquote
+from unittest import SkipTest
import test.functional as tf
@@ -31,7 +32,7 @@ from swift.common.middleware.versioned_writes.object_versioning import \
DELETE_MARKER_CONTENT_TYPE
from test.functional.tests import Base, Base2, BaseEnv, Utils
-from test.functional import cluster_info, SkipTest
+from test.functional import cluster_info
from test.functional.swift_test_client import Connection, \
ResponseError
from test.functional.test_tempurl import TestContainerTempurlEnv, \
diff --git a/test/functional/test_slo.py b/test/functional/test_slo.py
index 178e12680..7ef6e484c 100644
--- a/test/functional/test_slo.py
+++ b/test/functional/test_slo.py
@@ -19,6 +19,7 @@ import email.parser
import itertools
import json
from copy import deepcopy
+from unittest import SkipTest
import six
@@ -26,7 +27,7 @@ from swift.common.swob import normalize_etag
from swift.common.utils import md5
import test.functional as tf
-from test.functional import cluster_info, SkipTest
+from test.functional import cluster_info
from test.functional.tests import Utils, Base, Base2, BaseEnv
from test.functional.swift_test_client import Connection, ResponseError
diff --git a/test/functional/test_tempurl.py b/test/functional/test_tempurl.py
index 6f442e479..a7b4e8453 100644
--- a/test/functional/test_tempurl.py
+++ b/test/functional/test_tempurl.py
@@ -23,12 +23,13 @@ from copy import deepcopy
import six
from six.moves import urllib
from time import time, strftime, gmtime
+from unittest import SkipTest
import test.functional as tf
from swift.common.middleware import tempurl
from test.functional import cluster_info
from test.functional.tests import Utils, Base, Base2, BaseEnv
-from test.functional import requires_acls, SkipTest
+from test.functional import requires_acls
from test.functional.swift_test_client import Account, Connection, \
ResponseError
diff --git a/test/functional/test_versioned_writes.py b/test/functional/test_versioned_writes.py
index 1dd381af9..1f8023729 100644
--- a/test/functional/test_versioned_writes.py
+++ b/test/functional/test_versioned_writes.py
@@ -20,12 +20,13 @@ import time
import unittest
import six
from six.moves.urllib.parse import quote, unquote
+from unittest import SkipTest
import test.functional as tf
from swift.common.utils import MD5_OF_EMPTY_STRING
from test.functional.tests import Base, Base2, BaseEnv, Utils
-from test.functional import cluster_info, SkipTest
+from test.functional import cluster_info
from test.functional.swift_test_client import Account, Connection, \
ResponseError
@@ -560,44 +561,46 @@ class TestObjectVersioning(Base):
break
self.assertEqual(backup_file.read(), b"never argue with the data")
- # user3 (some random user with no access to any of account1)
- # tries to read from versioned container
- self.assertRaises(ResponseError, backup_file.read,
- cfg={'use_token': self.env.storage_token3})
-
- # create an object user3 can try to copy
- a2_container = self.env.account2.container(Utils.create_name())
- a2_container.create(
- hdrs={'X-Container-Read': self.env.conn3.user_acl},
- cfg={'use_token': self.env.storage_token2})
- a2_obj = a2_container.file(Utils.create_name())
- self.assertTrue(a2_obj.write(b"unused",
- cfg={'use_token': self.env.storage_token2}))
-
- # user3 cannot write, delete, or copy to/from source container either
- number_of_versions = versions_container.info()['object_count']
- self.assertRaises(ResponseError, versioned_obj.write,
- b"some random user trying to write data",
- cfg={'use_token': self.env.storage_token3})
- self.assertEqual(number_of_versions,
- versions_container.info()['object_count'])
- self.assertRaises(ResponseError, versioned_obj.delete,
- cfg={'use_token': self.env.storage_token3})
- self.assertEqual(number_of_versions,
- versions_container.info()['object_count'])
- self.assertRaises(
- ResponseError, versioned_obj.write,
- hdrs={'X-Copy-From': '%s/%s' % (a2_container.name, a2_obj.name),
- 'X-Copy-From-Account': self.env.conn2.account_name},
- cfg={'use_token': self.env.storage_token3})
- self.assertEqual(number_of_versions,
- versions_container.info()['object_count'])
- self.assertRaises(
- ResponseError, a2_obj.copy_account,
- self.env.conn.account_name, container.name, obj_name,
- cfg={'use_token': self.env.storage_token3})
- self.assertEqual(number_of_versions,
- versions_container.info()['object_count'])
+ if not tf.skip3:
+ # user3 (some random user with no access to any of account1)
+ # tries to read from versioned container
+ self.assertRaises(ResponseError, backup_file.read,
+ cfg={'use_token': self.env.storage_token3})
+
+ # create an object user3 can try to copy
+ a2_container = self.env.account2.container(Utils.create_name())
+ a2_container.create(
+ hdrs={'X-Container-Read': self.env.conn3.user_acl},
+ cfg={'use_token': self.env.storage_token2})
+ a2_obj = a2_container.file(Utils.create_name())
+ self.assertTrue(a2_obj.write(b"unused",
+ cfg={'use_token': self.env.storage_token2}))
+
+ # user3 also cannot write, delete, or copy to/from source container
+ number_of_versions = versions_container.info()['object_count']
+ self.assertRaises(ResponseError, versioned_obj.write,
+ b"some random user trying to write data",
+ cfg={'use_token': self.env.storage_token3})
+ self.assertEqual(number_of_versions,
+ versions_container.info()['object_count'])
+ self.assertRaises(ResponseError, versioned_obj.delete,
+ cfg={'use_token': self.env.storage_token3})
+ self.assertEqual(number_of_versions,
+ versions_container.info()['object_count'])
+ self.assertRaises(
+ ResponseError, versioned_obj.write,
+ hdrs={
+ 'X-Copy-From': '%s/%s' % (a2_container.name, a2_obj.name),
+ 'X-Copy-From-Account': self.env.conn2.account_name},
+ cfg={'use_token': self.env.storage_token3})
+ self.assertEqual(number_of_versions,
+ versions_container.info()['object_count'])
+ self.assertRaises(
+ ResponseError, a2_obj.copy_account,
+ self.env.conn.account_name, container.name, obj_name,
+ cfg={'use_token': self.env.storage_token3})
+ self.assertEqual(number_of_versions,
+ versions_container.info()['object_count'])
# user2 can't read or delete from versions-location
self.assertRaises(ResponseError, backup_file.read,
@@ -614,7 +617,8 @@ class TestObjectVersioning(Base):
# tear-down since we create these containers here
# and not in self.env
- a2_container.delete_recursive()
+ if not tf.skip3:
+ a2_container.delete_recursive()
versions_container.delete_recursive()
container.delete_recursive()
diff --git a/test/probe/test_container_failures.py b/test/probe/test_container_failures.py
index 6a3beb104..b965a90d4 100644
--- a/test/probe/test_container_failures.py
+++ b/test/probe/test_container_failures.py
@@ -13,13 +13,15 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import time
from unittest import main
from uuid import uuid4
from eventlet import GreenPool, Timeout
import eventlet
from sqlite3 import connect
+
+from swift.common.manager import Manager
from swiftclient import client
from swift.common import direct_client
@@ -71,6 +73,75 @@ class TestContainerFailures(ReplProbeTest):
self.assertEqual(headers['x-account-object-count'], '1')
self.assertEqual(headers['x-account-bytes-used'], '3')
+ def test_metadata_replicated_with_no_timestamp_update(self):
+ self.maxDiff = None
+ # Create container1
+ container1 = 'container-%s' % uuid4()
+ cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
+ client.put_container(self.url, self.token, container1)
+ Manager(['container-replicator']).once()
+
+ exp_hdrs = None
+ for cnode in cnodes:
+ hdrs = direct_client.direct_head_container(
+ cnode, cpart, self.account, container1)
+ hdrs.pop('Date')
+ if exp_hdrs:
+ self.assertEqual(exp_hdrs, hdrs)
+ exp_hdrs = hdrs
+ self.assertIsNotNone(exp_hdrs)
+ self.assertIn('Last-Modified', exp_hdrs)
+ put_time = float(exp_hdrs['X-Backend-Put-Timestamp'])
+
+ # Post to only one replica of container1 at least 1 second after the
+ # put (to reveal any unexpected change in Last-Modified which is
+ # rounded to seconds)
+ time.sleep(put_time + 1 - time.time())
+ post_hdrs = {'x-container-meta-foo': 'bar',
+ 'x-backend-no-timestamp-update': 'true'}
+ direct_client.direct_post_container(
+ cnodes[1], cpart, self.account, container1, headers=post_hdrs)
+
+ # verify that put_timestamp was not modified
+ exp_hdrs.update({'x-container-meta-foo': 'bar'})
+ hdrs = direct_client.direct_head_container(
+ cnodes[1], cpart, self.account, container1)
+ hdrs.pop('Date')
+ self.assertDictEqual(exp_hdrs, hdrs)
+
+ # Get to a final state
+ Manager(['container-replicator']).once()
+
+ # Assert all container1 servers have consistent metadata
+ for cnode in cnodes:
+ hdrs = direct_client.direct_head_container(
+ cnode, cpart, self.account, container1)
+ hdrs.pop('Date')
+ self.assertDictEqual(exp_hdrs, hdrs)
+
+ # sanity check: verify the put_timestamp is modified without
+ # x-backend-no-timestamp-update
+ post_hdrs = {'x-container-meta-foo': 'baz'}
+ exp_hdrs.update({'x-container-meta-foo': 'baz'})
+ direct_client.direct_post_container(
+ cnodes[1], cpart, self.account, container1, headers=post_hdrs)
+
+ # verify that put_timestamp was modified
+ hdrs = direct_client.direct_head_container(
+ cnodes[1], cpart, self.account, container1)
+ self.assertLess(exp_hdrs['x-backend-put-timestamp'],
+ hdrs['x-backend-put-timestamp'])
+ self.assertNotEqual(exp_hdrs['last-modified'], hdrs['last-modified'])
+ hdrs.pop('Date')
+ for key in ('x-backend-put-timestamp',
+ 'x-put-timestamp',
+ 'last-modified'):
+ self.assertNotEqual(exp_hdrs[key], hdrs[key])
+ exp_hdrs.pop(key)
+ hdrs.pop(key)
+
+ self.assertDictEqual(exp_hdrs, hdrs)
+
def test_two_nodes_fail(self):
# Create container1
container1 = 'container-%s' % uuid4()
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
index 51b3ff952..6f731b70a 100644
--- a/test/unit/__init__.py
+++ b/test/unit/__init__.py
@@ -64,11 +64,7 @@ import functools
from gzip import GzipFile
import mock as mocklib
import inspect
-import unittest
-
-
-class SkipTest(unittest.SkipTest):
- pass
+from unittest import SkipTest
EMPTY_ETAG = md5(usedforsecurity=False).hexdigest()
diff --git a/test/unit/common/middleware/crypto/test_decrypter.py b/test/unit/common/middleware/crypto/test_decrypter.py
index e6d83b78f..16ebf2823 100644
--- a/test/unit/common/middleware/crypto/test_decrypter.py
+++ b/test/unit/common/middleware/crypto/test_decrypter.py
@@ -125,6 +125,7 @@ class TestDecrypterObjectRequests(unittest.TestCase):
resp.headers['X-Object-Sysmeta-Container-Update-Override-Etag'])
self.assertNotIn('X-Object-Sysmeta-Crypto-Body-Meta', resp.headers)
self.assertNotIn('X-Object-Sysmeta-Crypto-Etag', resp.headers)
+ self.assertNotIn('Access-Control-Expose-Headers', resp.headers)
return resp
def test_GET_success(self):
@@ -226,6 +227,7 @@ class TestDecrypterObjectRequests(unittest.TestCase):
self.assertEqual(plaintext_etag, resp.headers['Etag'])
self.assertEqual('text/plain', resp.headers['Content-Type'])
self.assertEqual('encrypt me', resp.headers['x-object-meta-test'])
+ self.assertNotIn('Access-Control-Expose-Headers', resp.headers)
return resp
def test_GET_unencrypted_data_and_encrypted_metadata(self):
@@ -259,6 +261,7 @@ class TestDecrypterObjectRequests(unittest.TestCase):
self.assertEqual(plaintext_etag, resp.headers['Etag'])
self.assertEqual('text/plain', resp.headers['Content-Type'])
self.assertEqual('unencrypted', resp.headers['x-object-meta-test'])
+ self.assertNotIn('Access-Control-Expose-Headers', resp.headers)
return resp
def test_GET_encrypted_data_and_unencrypted_metadata(self):
@@ -271,7 +274,8 @@ class TestDecrypterObjectRequests(unittest.TestCase):
def test_headers_case(self):
body = b'fAkE ApP'
- req = Request.blank('/v1/a/c/o', body='FaKe')
+ req = Request.blank('/v1/a/c/o', body='FaKe', headers={
+ 'Origin': 'http://example.com'})
req.environ[CRYPTO_KEY_CALLBACK] = fetch_crypto_keys
plaintext_etag = md5hex(body)
body_key = os.urandom(32)
@@ -281,7 +285,10 @@ class TestDecrypterObjectRequests(unittest.TestCase):
hdrs.update({
'x-Object-mEta-ignoRes-caSe': 'thIs pArt WilL bE cOol',
+ 'access-control-Expose-Headers': 'x-object-meta-ignores-case',
+ 'access-control-allow-origin': '*',
})
+ self.assertNotIn('x-object-meta-test', [k.lower() for k in hdrs])
self.app.register(
'GET', '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs)
@@ -296,6 +303,11 @@ class TestDecrypterObjectRequests(unittest.TestCase):
'X-Object-Meta-Ignores-Case': 'thIs pArt WilL bE cOol',
'X-Object-Sysmeta-Test': 'do not encrypt me',
'Content-Type': 'text/plain',
+ 'Access-Control-Expose-Headers': ', '.join([
+ 'x-object-meta-ignores-case',
+ 'x-object-meta-test',
+ ]),
+ 'Access-Control-Allow-Origin': '*',
}
self.assertEqual(dict(headers), expected)
self.assertEqual(b'fAkE ApP', b''.join(app_iter))
diff --git a/test/unit/common/middleware/test_account_quotas.py b/test/unit/common/middleware/test_account_quotas.py
index 4bf9e6b2d..072bbac40 100644
--- a/test/unit/common/middleware/test_account_quotas.py
+++ b/test/unit/common/middleware/test_account_quotas.py
@@ -13,12 +13,13 @@
import unittest
-from swift.common.swob import Request, wsgify, HTTPForbidden
+from swift.common.swob import Request, wsgify, HTTPForbidden, HTTPOk, \
+ HTTPServiceUnavailable, HTTPNotFound
from swift.common.middleware import account_quotas, copy
-from swift.proxy.controllers.base import get_cache_key, \
- headers_to_account_info, headers_to_object_info
+from test.unit import patch_policies
+from test.unit.common.middleware.helpers import FakeSwift
class FakeCache(object):
@@ -32,46 +33,6 @@ class FakeCache(object):
pass
-class FakeBadApp(object):
- def __init__(self, headers=None):
- if headers is None:
- headers = []
- self.headers = headers
-
- def __call__(self, env, start_response):
- start_response('404 NotFound', self.headers)
- return []
-
-
-class FakeApp(object):
- def __init__(self, headers=None):
- if headers is None:
- headers = []
- self.headers = headers
-
- def __call__(self, env, start_response):
- if 'swift.authorize' in env:
- aresp = env['swift.authorize'](Request(env))
- if aresp:
- return aresp(env, start_response)
- if env['REQUEST_METHOD'] == "HEAD" and \
- env['PATH_INFO'] == '/v1/a/c2/o2':
- cache_key = get_cache_key('a', 'c2', 'o2')
- env.setdefault('swift.infocache', {})[cache_key] = \
- headers_to_object_info(self.headers, 200)
- start_response('200 OK', self.headers)
- elif env['REQUEST_METHOD'] == "HEAD" and \
- env['PATH_INFO'] == '/v1/a/c2/o3':
- start_response('404 Not Found', [])
- else:
- # Cache the account_info (same as a real application)
- cache_key = get_cache_key('a')
- env.setdefault('swift.infocache', {})[cache_key] = \
- headers_to_account_info(self.headers, 200)
- start_response('200 OK', self.headers)
- return []
-
-
class FakeAuthFilter(object):
def __init__(self, app):
@@ -89,9 +50,17 @@ class FakeAuthFilter(object):
class TestAccountQuota(unittest.TestCase):
+ def setUp(self):
+ self.app = FakeSwift()
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1000'})
+ self.app.register('HEAD', '/v1/a/c', HTTPOk, {
+ 'x-backend-storage-policy-index': '1'})
+ self.app.register('POST', '/v1/a', HTTPOk, {})
+ self.app.register('PUT', '/v1/a/c/o', HTTPOk, {})
+
def test_unauthorized(self):
- headers = [('x-account-bytes-used', '1000'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -101,8 +70,7 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_no_quotas(self):
- headers = [('x-account-bytes-used', '1000'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -114,8 +82,7 @@ class TestAccountQuota(unittest.TestCase):
# If you try to set X-Account-Meta-* on an object, it's ignored, so
# the quota middleware shouldn't complain about it even if we're not a
# reseller admin.
- headers = [('x-account-bytes-used', '1000')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
headers={'X-Account-Meta-Quota-Bytes': '99999'},
@@ -127,8 +94,8 @@ class TestAccountQuota(unittest.TestCase):
def test_container_request_ignores_attempt_to_set_quotas(self):
# As with an object, if you try to set X-Account-Meta-* on a
# container, it's ignored.
- headers = [('x-account-bytes-used', '1000')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('PUT', '/v1/a/c', HTTPOk, {})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c',
headers={'X-Account-Meta-Quota-Bytes': '99999'},
@@ -140,9 +107,10 @@ class TestAccountQuota(unittest.TestCase):
def test_bogus_quota_is_ignored(self):
# This can happen if the metadata was set by a user prior to the
# activation of the account-quota middleware
- headers = [('x-account-bytes-used', '1000'),
- ('x-account-meta-quota-bytes', 'pasty-plastogene')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1000',
+ 'x-account-meta-quota-bytes': 'pasty-plastogene'})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -151,9 +119,10 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota(self):
- headers = [('x-account-bytes-used', '1000'),
- ('x-account-meta-quota-bytes', '0')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1000',
+ 'x-account-meta-quota-bytes': '0'})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -162,11 +131,53 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 413)
self.assertEqual(res.body, b'Upload exceeds quota.')
+ @patch_policies
+ def test_exceed_per_policy_quota(self):
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '100',
+ 'x-account-storage-policy-unu-bytes-used': '100',
+ 'x-account-sysmeta-quota-bytes-policy-1': '10',
+ 'x-account-meta-quota-bytes': '1000'})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
+ cache = FakeCache(None)
+ req = Request.blank('/v1/a/c/o',
+ environ={'REQUEST_METHOD': 'PUT',
+ 'swift.cache': cache})
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 413)
+ self.assertEqual(res.body, b'Upload exceeds policy quota.')
+
+ @patch_policies
+ def test_policy_quota_translation(self):
+ def do_test(method):
+ self.app.register(method, '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '100',
+ 'x-account-storage-policy-unu-bytes-used': '100',
+ 'x-account-sysmeta-quota-bytes-policy-1': '10',
+ 'x-account-meta-quota-bytes': '1000'})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
+ cache = FakeCache(None)
+ req = Request.blank('/v1/a', method=method, environ={
+ 'swift.cache': cache})
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.headers.get(
+ 'X-Account-Meta-Quota-Bytes'), '1000')
+ self.assertEqual(res.headers.get(
+ 'X-Account-Sysmeta-Quota-Bytes-Policy-1'), '10')
+ self.assertEqual(res.headers.get(
+ 'X-Account-Quota-Bytes-Policy-Unu'), '10')
+ self.assertEqual(res.headers.get(
+ 'X-Account-Storage-Policy-Unu-Bytes-Used'), '100')
+
+ do_test('GET')
+ do_test('HEAD')
+
def test_exceed_quota_not_authorized(self):
- headers = [('x-account-bytes-used', '1000'),
- ('x-account-meta-quota-bytes', '0')]
- app = FakeAuthFilter(
- account_quotas.AccountQuotaMiddleware(FakeApp(headers)))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1000',
+ 'x-account-meta-quota-bytes': '0'})
+ app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'bad-secret'},
@@ -175,10 +186,10 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 403)
def test_exceed_quota_authorized(self):
- headers = [('x-account-bytes-used', '1000'),
- ('x-account-meta-quota-bytes', '0')]
- app = FakeAuthFilter(
- account_quotas.AccountQuotaMiddleware(FakeApp(headers)))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1000',
+ 'x-account-meta-quota-bytes': '0'})
+ app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'secret'},
@@ -187,10 +198,10 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 413)
def test_under_quota_not_authorized(self):
- headers = [('x-account-bytes-used', '0'),
- ('x-account-meta-quota-bytes', '1000')]
- app = FakeAuthFilter(
- account_quotas.AccountQuotaMiddleware(FakeApp(headers)))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '0',
+ 'x-account-meta-quota-bytes': '1000'})
+ app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'bad-secret'},
@@ -199,10 +210,10 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 403)
def test_under_quota_authorized(self):
- headers = [('x-account-bytes-used', '0'),
- ('x-account-meta-quota-bytes', '1000')]
- app = FakeAuthFilter(
- account_quotas.AccountQuotaMiddleware(FakeApp(headers)))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '0',
+ 'x-account-meta-quota-bytes': '1000'})
+ app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'secret'},
@@ -211,10 +222,10 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_exceed_quota_bytes_on_empty_account_not_authorized(self):
- headers = [('x-account-bytes-used', '0'),
- ('x-account-meta-quota-bytes', '10')]
- app = FakeAuthFilter(
- account_quotas.AccountQuotaMiddleware(FakeApp(headers)))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '0',
+ 'x-account-meta-quota-bytes': '10'})
+ app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'secret',
@@ -225,10 +236,10 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_exceed_quota_bytes_not_authorized(self):
- headers = [('x-account-bytes-used', '100'),
- ('x-account-meta-quota-bytes', '1000')]
- app = FakeAuthFilter(
- account_quotas.AccountQuotaMiddleware(FakeApp(headers)))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '100',
+ 'x-account-meta-quota-bytes': '1000'})
+ app = FakeAuthFilter(account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'x-auth-token': 'secret',
@@ -239,9 +250,11 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_over_quota_container_create_still_works(self):
- headers = [('x-account-bytes-used', '1001'),
- ('x-account-meta-quota-bytes', '1000')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1001',
+ 'x-account-meta-quota-bytes': '1000'})
+ self.app.register('PUT', '/v1/a/new_container', HTTPOk, {})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/new_container',
environ={'REQUEST_METHOD': 'PUT',
@@ -251,9 +264,11 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_over_quota_container_post_still_works(self):
- headers = [('x-account-bytes-used', '1001'),
- ('x-account-meta-quota-bytes', '1000')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1001',
+ 'x-account-meta-quota-bytes': '1000'})
+ self.app.register('POST', '/v1/a/new_container', HTTPOk, {})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/new_container',
environ={'REQUEST_METHOD': 'POST',
@@ -263,9 +278,11 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_over_quota_obj_post_still_works(self):
- headers = [('x-account-bytes-used', '1001'),
- ('x-account-meta-quota-bytes', '1000')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1001',
+ 'x-account-meta-quota-bytes': '1000'})
+ self.app.register('POST', '/v1/a/c/o', HTTPOk, {})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'POST',
@@ -275,9 +292,11 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota_reseller(self):
- headers = [('x-account-bytes-used', '1000'),
- ('x-account-meta-quota-bytes', '0')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1000',
+ 'x-account-meta-quota-bytes': '0'})
+ self.app.register('PUT', '/v1/a', HTTPOk, {})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'PUT',
@@ -287,10 +306,13 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota_reseller_copy_from(self):
- headers = [('x-account-bytes-used', '500'),
- ('x-account-meta-quota-bytes', '1000'),
- ('content-length', '1000')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '500',
+ 'x-account-meta-quota-bytes': '1000'})
+ self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {
+ 'content-length': '1000'}, b'a' * 1000)
+ app = copy.filter_factory({})(
+ account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -301,10 +323,13 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_exceed_bytes_quota_reseller_copy_verb(self):
- headers = [('x-account-bytes-used', '500'),
- ('x-account-meta-quota-bytes', '1000'),
- ('content-length', '1000')]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '500',
+ 'x-account-meta-quota-bytes': '1000'})
+ self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {
+ 'content-length': '1000'}, b'a' * 1000)
+ app = copy.filter_factory({})(
+ account_quotas.AccountQuotaMiddleware(self.app))
cache = FakeCache(None)
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
@@ -315,8 +340,8 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_bad_application_quota(self):
- headers = []
- app = account_quotas.AccountQuotaMiddleware(FakeBadApp(headers))
+ self.app.register('PUT', '/v1/a/c/o', HTTPNotFound, {})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -325,8 +350,7 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 404)
def test_no_info_quota(self):
- headers = []
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -335,9 +359,10 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_not_exceed_bytes_quota(self):
- headers = [('x-account-bytes-used', '1000'),
- ('x-account-meta-quota-bytes', 2000)]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('HEAD', '/v1/a', HTTPOk, {
+ 'x-account-bytes-used': '1000',
+ 'x-account-meta-quota-bytes': '2000'})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -346,8 +371,7 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_invalid_quotas(self):
- headers = [('x-account-bytes-used', '0'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
@@ -356,10 +380,23 @@ class TestAccountQuota(unittest.TestCase):
'reseller_request': True})
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
+ self.assertEqual(self.app.calls, [])
+
+ @patch_policies
+ def test_invalid_policy_quota(self):
+ app = account_quotas.AccountQuotaMiddleware(self.app)
+ cache = FakeCache(None)
+ req = Request.blank('/v1/a', environ={
+ 'REQUEST_METHOD': 'POST',
+ 'swift.cache': cache,
+ 'HTTP_X_ACCOUNT_QUOTA_BYTES_POLICY_NULO': 'abc',
+ 'reseller_request': True})
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 400)
+ self.assertEqual(self.app.calls, [])
def test_valid_quotas_admin(self):
- headers = [('x-account-bytes-used', '0'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
@@ -367,10 +404,22 @@ class TestAccountQuota(unittest.TestCase):
'HTTP_X_ACCOUNT_META_QUOTA_BYTES': '100'})
res = req.get_response(app)
self.assertEqual(res.status_int, 403)
+ self.assertEqual(self.app.calls, [])
+
+ @patch_policies
+ def test_valid_policy_quota_admin(self):
+ app = account_quotas.AccountQuotaMiddleware(self.app)
+ cache = FakeCache(None)
+ req = Request.blank('/v1/a', environ={
+ 'REQUEST_METHOD': 'POST',
+ 'swift.cache': cache,
+ 'HTTP_X_ACCOUNT_QUOTA_BYTES_POLICY_UNU': '100'})
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 403)
+ self.assertEqual(self.app.calls, [])
def test_valid_quotas_reseller(self):
- headers = [('x-account-bytes-used', '0'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
@@ -379,10 +428,28 @@ class TestAccountQuota(unittest.TestCase):
'reseller_request': True})
res = req.get_response(app)
self.assertEqual(res.status_int, 200)
+ self.assertEqual(self.app.calls_with_headers, [
+ ('POST', '/v1/a', {'Host': 'localhost:80',
+ 'X-Account-Meta-Quota-Bytes': '100'})])
+
+ @patch_policies
+ def test_valid_policy_quota_reseller(self):
+ app = account_quotas.AccountQuotaMiddleware(self.app)
+ cache = FakeCache(None)
+ req = Request.blank('/v1/a', environ={
+ 'REQUEST_METHOD': 'POST',
+ 'swift.cache': cache,
+ 'HTTP_X_ACCOUNT_QUOTA_BYTES_POLICY_NULO': '100',
+ 'reseller_request': True})
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self.app.calls_with_headers, [
+ ('POST', '/v1/a', {
+ 'Host': 'localhost:80',
+ 'X-Account-Sysmeta-Quota-Bytes-Policy-0': '100'})])
def test_delete_quotas(self):
- headers = [('x-account-bytes-used', '0'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
@@ -392,8 +459,7 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 403)
def test_delete_quotas_with_remove_header(self):
- headers = [('x-account-bytes-used', '0'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a', environ={
'REQUEST_METHOD': 'POST',
@@ -403,8 +469,7 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 403)
def test_delete_quotas_reseller(self):
- headers = [('x-account-bytes-used', '0'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
req = Request.blank('/v1/a',
environ={'REQUEST_METHOD': 'POST',
'HTTP_X_ACCOUNT_META_QUOTA_BYTES': '',
@@ -413,8 +478,7 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_delete_quotas_with_remove_header_reseller(self):
- headers = [('x-account-bytes-used', '0'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1/a', environ={
'REQUEST_METHOD': 'POST',
@@ -425,29 +489,32 @@ class TestAccountQuota(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_invalid_request_exception(self):
- headers = [('x-account-bytes-used', '1000'), ]
- app = account_quotas.AccountQuotaMiddleware(FakeApp(headers))
+ self.app.register('PUT', '/v1', HTTPServiceUnavailable, {})
+ app = account_quotas.AccountQuotaMiddleware(self.app)
cache = FakeCache(None)
req = Request.blank('/v1',
environ={'REQUEST_METHOD': 'PUT',
'swift.cache': cache})
res = req.get_response(app)
- # Response code of 200 because authentication itself is not done here
- self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.status_int, 503)
class AccountQuotaCopyingTestCases(unittest.TestCase):
def setUp(self):
- self.app = FakeApp()
+ self.headers = []
+ self.app = FakeSwift()
+ self.app.register('HEAD', '/v1/a', HTTPOk, self.headers)
+ self.app.register('HEAD', '/v1/a/c', HTTPOk, {
+ 'x-backend-storage-policy-index': '1'})
+ self.app.register('GET', '/v1/a/c2/o2', HTTPOk, {
+ 'content-length': '1000'})
self.aq_filter = account_quotas.filter_factory({})(self.app)
self.copy_filter = copy.filter_factory({})(self.aq_filter)
def test_exceed_bytes_quota_copy_from(self):
- headers = [('x-account-bytes-used', '500'),
- ('x-account-meta-quota-bytes', '1000'),
- ('content-length', '1000')]
- self.app.headers = headers
+ self.headers[:] = [('x-account-bytes-used', '500'),
+ ('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -458,10 +525,8 @@ class AccountQuotaCopyingTestCases(unittest.TestCase):
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_exceed_bytes_quota_copy_verb(self):
- headers = [('x-account-bytes-used', '500'),
- ('x-account-meta-quota-bytes', '1000'),
- ('content-length', '1000')]
- self.app.headers = headers
+ self.headers[:] = [('x-account-bytes-used', '500'),
+ ('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
@@ -472,10 +537,9 @@ class AccountQuotaCopyingTestCases(unittest.TestCase):
self.assertEqual(res.body, b'Upload exceeds quota.')
def test_not_exceed_bytes_quota_copy_from(self):
- headers = [('x-account-bytes-used', '0'),
- ('x-account-meta-quota-bytes', '1000'),
- ('content-length', '1000')]
- self.app.headers = headers
+ self.app.register('PUT', '/v1/a/c/o', HTTPOk, {})
+ self.headers[:] = [('x-account-bytes-used', '0'),
+ ('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -485,10 +549,9 @@ class AccountQuotaCopyingTestCases(unittest.TestCase):
self.assertEqual(res.status_int, 200)
def test_not_exceed_bytes_quota_copy_verb(self):
- headers = [('x-account-bytes-used', '0'),
- ('x-account-meta-quota-bytes', '1000'),
- ('content-length', '1000')]
- self.app.headers = headers
+ self.app.register('PUT', '/v1/a/c/o', HTTPOk, {})
+ self.headers[:] = [('x-account-bytes-used', '0'),
+ ('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c2/o2',
environ={'REQUEST_METHOD': 'COPY',
@@ -497,22 +560,9 @@ class AccountQuotaCopyingTestCases(unittest.TestCase):
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 200)
- def test_quota_copy_from_no_src(self):
- headers = [('x-account-bytes-used', '0'),
- ('x-account-meta-quota-bytes', '1000')]
- self.app.headers = headers
- cache = FakeCache(None)
- req = Request.blank('/v1/a/c/o',
- environ={'REQUEST_METHOD': 'PUT',
- 'swift.cache': cache},
- headers={'x-copy-from': '/c2/o3'})
- res = req.get_response(self.copy_filter)
- self.assertEqual(res.status_int, 200)
-
def test_quota_copy_from_bad_src(self):
- headers = [('x-account-bytes-used', '0'),
- ('x-account-meta-quota-bytes', '1000')]
- self.app.headers = headers
+ self.headers[:] = [('x-account-bytes-used', '0'),
+ ('x-account-meta-quota-bytes', '1000')]
cache = FakeCache(None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
@@ -521,6 +571,11 @@ class AccountQuotaCopyingTestCases(unittest.TestCase):
res = req.get_response(self.copy_filter)
self.assertEqual(res.status_int, 412)
+ self.headers[:] = [('x-account-bytes-used', '1000'),
+ ('x-account-meta-quota-bytes', '0')]
+ res = req.get_response(self.copy_filter)
+ self.assertEqual(res.status_int, 412)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py
index 74e8acaae..e1b57be9e 100644
--- a/test/unit/common/ring/test_builder.py
+++ b/test/unit/common/ring/test_builder.py
@@ -85,6 +85,12 @@ class TestRingBuilder(unittest.TestCase):
ring.RingBuilder(33, 3, 1)
self.assertEqual(str(ctx.exception), expected_msg)
+ def test_oversmall_part_powers(self):
+ expected_msg = 'part_power must be at least 0 (was -1)'
+ with self.assertRaises(ValueError) as ctx:
+ ring.RingBuilder(-1, 3, 1)
+ self.assertEqual(str(ctx.exception), expected_msg)
+
def test_insufficient_replicas(self):
expected_msg = 'replicas must be at least 1 (was 0.999000)'
with self.assertRaises(ValueError) as ctx:
diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py
index cfc186d8d..b1d02c610 100644
--- a/test/unit/common/test_direct_client.py
+++ b/test/unit/common/test_direct_client.py
@@ -620,6 +620,22 @@ class TestDirectClient(unittest.TestCase):
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('PUT' in str(raised.exception))
+ def test_direct_post_container(self):
+ headers = {'x-foo': 'bar', 'User-Agent': 'my UA'}
+
+ with mocked_http_conn(204) as conn:
+ resp = direct_client.direct_post_container(
+ self.node, self.part, self.account, self.container,
+ headers=headers)
+ self.assertEqual(conn.host, self.node['ip'])
+ self.assertEqual(conn.port, self.node['port'])
+ self.assertEqual(conn.method, 'POST')
+ self.assertEqual(conn.path, self.container_path)
+ self.assertEqual(conn.req_headers['User-Agent'], 'my UA')
+ self.assertTrue('x-timestamp' in conn.req_headers)
+ self.assertEqual('bar', conn.req_headers.get('x-foo'))
+ self.assertEqual(204, resp.status)
+
def test_direct_delete_container_object(self):
with mocked_http_conn(204) as conn:
rv = direct_client.direct_delete_container_object(
diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py
index d26ef0e2d..65bbed9a9 100644
--- a/test/unit/common/test_internal_client.py
+++ b/test/unit/common/test_internal_client.py
@@ -30,6 +30,7 @@ from swift.common import exceptions, internal_client, request_helpers, swob, \
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import StoragePolicy
from swift.common.middleware.proxy_logging import ProxyLoggingMiddleware
+from swift.common.middleware.gatekeeper import GatekeeperMiddleware
from test.debug_logger import debug_logger
from test.unit import with_tempdir, write_fake_ring, patch_policies
@@ -392,6 +393,21 @@ class TestInternalClient(unittest.TestCase):
conf_path, user_agent, request_tries=0)
mock_loadapp.assert_not_called()
+ # if we load it with the gatekeeper middleware then we also get a
+ # value error
+ gate_keeper_app = GatekeeperMiddleware(app, {})
+ gate_keeper_app._pipeline_final_app = app
+ gate_keeper_app._pipeline = [gate_keeper_app, app]
+ with mock.patch.object(
+ internal_client, 'loadapp', return_value=gate_keeper_app) \
+ as mock_loadapp, self.assertRaises(ValueError) as err:
+ internal_client.InternalClient(
+ conf_path, user_agent, request_tries)
+ self.assertEqual(
+ str(err.exception),
+ ('Gatekeeper middleware is not allowed in the InternalClient '
+ 'proxy pipeline'))
+
with mock.patch.object(
internal_client, 'loadapp', return_value=app) as mock_loadapp:
client = internal_client.InternalClient(
@@ -421,6 +437,72 @@ class TestInternalClient(unittest.TestCase):
self.assertEqual(request_tries, client.request_tries)
self.assertTrue(client.use_replication_network)
+ def test_init_allow_modify_pipeline(self):
+ conf_path = 'some_path'
+ app = FakeSwift()
+ user_agent = 'some_user_agent'
+
+ with mock.patch.object(
+ internal_client, 'loadapp', return_value=app) as mock_loadapp,\
+ self.assertRaises(ValueError) as cm:
+ internal_client.InternalClient(
+ conf_path, user_agent, 1, allow_modify_pipeline=True)
+ mock_loadapp.assert_not_called()
+ self.assertIn("'allow_modify_pipeline' is no longer supported",
+ str(cm.exception))
+
+ with mock.patch.object(
+ internal_client, 'loadapp', return_value=app) as mock_loadapp:
+ internal_client.InternalClient(
+ conf_path, user_agent, 1, allow_modify_pipeline=False)
+ mock_loadapp.assert_called_once_with(
+ conf_path, allow_modify_pipeline=False, global_conf=None)
+
+ def test_gatekeeper_not_loaded(self):
+ app = FakeSwift()
+ pipeline = [app]
+
+ class RandomMiddleware(object):
+ def __init__(self, app):
+ self.app = app
+ self._pipeline_final_app = app
+ self._pipeline = pipeline
+ self._pipeline.insert(0, self)
+
+ # if there is no Gatekeeper middleware then it's false
+ # just the final app
+ self.assertFalse(
+ internal_client.InternalClient.check_gatekeeper_not_loaded(app))
+
+ # now with a bunch of middlewares
+ app_no_gatekeeper = app
+ for i in range(5):
+ app_no_gatekeeper = RandomMiddleware(app_no_gatekeeper)
+ self.assertFalse(
+ internal_client.InternalClient.check_gatekeeper_not_loaded(
+ app_no_gatekeeper))
+
+ # But if we put the gatekeeper on the end, it will be found
+ app_with_gatekeeper = GatekeeperMiddleware(app_no_gatekeeper, {})
+ pipeline.insert(0, app_with_gatekeeper)
+ app_with_gatekeeper._pipeline = pipeline
+ with self.assertRaises(ValueError) as err:
+ internal_client.InternalClient.check_gatekeeper_not_loaded(
+ app_with_gatekeeper)
+ self.assertEqual(str(err.exception),
+ ('Gatekeeper middleware is not allowed in the '
+ 'InternalClient proxy pipeline'))
+
+ # even if we bury deep into the pipeline
+ for i in range(5):
+ app_with_gatekeeper = RandomMiddleware(app_with_gatekeeper)
+ with self.assertRaises(ValueError) as err:
+ internal_client.InternalClient.check_gatekeeper_not_loaded(
+ app_with_gatekeeper)
+ self.assertEqual(str(err.exception),
+ ('Gatekeeper middleware is not allowed in the '
+ 'InternalClient proxy pipeline'))
+
def test_make_request_sets_user_agent(self):
class FakeApp(FakeSwift):
def __init__(self, test):
diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py
index 018a0804c..e477ee85d 100644
--- a/test/unit/common/test_utils.py
+++ b/test/unit/common/test_utils.py
@@ -23,7 +23,6 @@ from test.debug_logger import debug_logger
from test.unit import temptree, make_timestamp_iter, with_tempdir, \
mock_timestamp_now, FakeIterable
-import ctypes
import contextlib
import errno
import eventlet
@@ -33,7 +32,6 @@ import eventlet.patcher
import functools
import grp
import logging
-import platform
import os
import mock
import posix
@@ -198,855 +196,6 @@ class TestUTC(unittest.TestCase):
self.assertEqual(utils.UTC.tzname(None), 'UTC')
-class TestTimestamp(unittest.TestCase):
- """Tests for swift.common.utils.Timestamp"""
-
- def test_invalid_input(self):
- self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
- self.assertRaises(ValueError, utils.Timestamp, '123.456_78_90')
-
- def test_invalid_string_conversion(self):
- t = utils.Timestamp.now()
- self.assertRaises(TypeError, str, t)
-
- def test_offset_limit(self):
- t = 1417462430.78693
- # can't have a offset above MAX_OFFSET
- self.assertRaises(ValueError, utils.Timestamp, t,
- offset=utils.MAX_OFFSET + 1)
- # exactly max offset is fine
- ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
- self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
- # but you can't offset it further
- self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
- # unless you start below it
- ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
- self.assertEqual(utils.Timestamp(ts.internal, offset=1),
- '1417462430.78693_ffffffffffffffff')
-
- def test_normal_format_no_offset(self):
- expected = '1402436408.91203'
- test_values = (
- '1402436408.91203',
- '1402436408.91203_00000000',
- '1402436408.912030000',
- '1402436408.912030000_0000000000000',
- '000001402436408.912030000',
- '000001402436408.912030000_0000000000',
- 1402436408.91203,
- 1402436408.912029,
- 1402436408.9120300000000000,
- 1402436408.91202999999999999,
- utils.Timestamp(1402436408.91203),
- utils.Timestamp(1402436408.91203, offset=0),
- utils.Timestamp(1402436408.912029),
- utils.Timestamp(1402436408.912029, offset=0),
- utils.Timestamp('1402436408.91203'),
- utils.Timestamp('1402436408.91203', offset=0),
- utils.Timestamp('1402436408.91203_00000000'),
- utils.Timestamp('1402436408.91203_00000000', offset=0),
- )
- for value in test_values:
- timestamp = utils.Timestamp(value)
- self.assertEqual(timestamp.normal, expected)
- # timestamp instance can also compare to string or float
- self.assertEqual(timestamp, expected)
- self.assertEqual(timestamp, float(expected))
- self.assertEqual(timestamp, utils.normalize_timestamp(expected))
-
- def test_isoformat(self):
- expected = '2014-06-10T22:47:32.054580'
- test_values = (
- '1402440452.05458',
- '1402440452.054579',
- '1402440452.05458_00000000',
- '1402440452.054579_00000000',
- '1402440452.054580000',
- '1402440452.054579999',
- '1402440452.054580000_0000000000000',
- '1402440452.054579999_0000ff00',
- '000001402440452.054580000',
- '000001402440452.0545799',
- '000001402440452.054580000_0000000000',
- '000001402440452.054579999999_00000fffff',
- 1402440452.05458,
- 1402440452.054579,
- 1402440452.0545800000000000,
- 1402440452.054579999,
- utils.Timestamp(1402440452.05458),
- utils.Timestamp(1402440452.0545799),
- utils.Timestamp(1402440452.05458, offset=0),
- utils.Timestamp(1402440452.05457999999, offset=0),
- utils.Timestamp(1402440452.05458, offset=100),
- utils.Timestamp(1402440452.054579, offset=100),
- utils.Timestamp('1402440452.05458'),
- utils.Timestamp('1402440452.054579999'),
- utils.Timestamp('1402440452.05458', offset=0),
- utils.Timestamp('1402440452.054579', offset=0),
- utils.Timestamp('1402440452.05458', offset=300),
- utils.Timestamp('1402440452.05457999', offset=300),
- utils.Timestamp('1402440452.05458_00000000'),
- utils.Timestamp('1402440452.05457999_00000000'),
- utils.Timestamp('1402440452.05458_00000000', offset=0),
- utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
- utils.Timestamp('1402440452.05458_00000000', offset=400),
- utils.Timestamp('1402440452.054579_0a', offset=400),
- )
- for value in test_values:
- self.assertEqual(utils.Timestamp(value).isoformat, expected)
- expected = '1970-01-01T00:00:00.000000'
- test_values = (
- '0',
- '0000000000.00000',
- '0000000000.00000_ffffffffffff',
- 0,
- 0.0,
- )
- for value in test_values:
- self.assertEqual(utils.Timestamp(value).isoformat, expected)
-
- def test_from_isoformat(self):
- ts = utils.Timestamp.from_isoformat('2014-06-10T22:47:32.054580')
- self.assertIsInstance(ts, utils.Timestamp)
- self.assertEqual(1402440452.05458, float(ts))
- self.assertEqual('2014-06-10T22:47:32.054580', ts.isoformat)
-
- ts = utils.Timestamp.from_isoformat('1970-01-01T00:00:00.000000')
- self.assertIsInstance(ts, utils.Timestamp)
- self.assertEqual(0.0, float(ts))
- self.assertEqual('1970-01-01T00:00:00.000000', ts.isoformat)
-
- ts = utils.Timestamp(1402440452.05458)
- self.assertIsInstance(ts, utils.Timestamp)
- self.assertEqual(ts, utils.Timestamp.from_isoformat(ts.isoformat))
-
- def test_ceil(self):
- self.assertEqual(0.0, utils.Timestamp(0).ceil())
- self.assertEqual(1.0, utils.Timestamp(0.00001).ceil())
- self.assertEqual(1.0, utils.Timestamp(0.000001).ceil())
- self.assertEqual(12345678.0, utils.Timestamp(12345678.0).ceil())
- self.assertEqual(12345679.0, utils.Timestamp(12345678.000001).ceil())
-
- def test_not_equal(self):
- ts = '1402436408.91203_0000000000000001'
- test_values = (
- utils.Timestamp('1402436408.91203_0000000000000002'),
- utils.Timestamp('1402436408.91203'),
- utils.Timestamp(1402436408.91203),
- utils.Timestamp(1402436408.91204),
- utils.Timestamp(1402436408.91203, offset=0),
- utils.Timestamp(1402436408.91203, offset=2),
- )
- for value in test_values:
- self.assertTrue(value != ts)
-
- self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
- self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
- self.assertIs(False, utils.Timestamp(ts) != ts)
- self.assertIs(False, utils.Timestamp(ts) is None)
- self.assertIs(True, utils.Timestamp(ts) is not None)
-
- def test_no_force_internal_no_offset(self):
- """Test that internal is the same as normal with no offset"""
- with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
- self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
- self.assertEqual(utils.Timestamp(1402437380.58186).internal,
- '1402437380.58186')
- self.assertEqual(utils.Timestamp(1402437380.581859).internal,
- '1402437380.58186')
- self.assertEqual(utils.Timestamp(0).internal,
- utils.normalize_timestamp(0))
-
- def test_no_force_internal_with_offset(self):
- """Test that internal always includes the offset if significant"""
- with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
- self.assertEqual(utils.Timestamp(0, offset=1).internal,
- '0000000000.00000_0000000000000001')
- self.assertEqual(
- utils.Timestamp(1402437380.58186, offset=16).internal,
- '1402437380.58186_0000000000000010')
- self.assertEqual(
- utils.Timestamp(1402437380.581859, offset=240).internal,
- '1402437380.58186_00000000000000f0')
- self.assertEqual(
- utils.Timestamp('1402437380.581859_00000001',
- offset=240).internal,
- '1402437380.58186_00000000000000f1')
-
- def test_force_internal(self):
- """Test that internal always includes the offset if forced"""
- with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
- self.assertEqual(utils.Timestamp(0).internal,
- '0000000000.00000_0000000000000000')
- self.assertEqual(utils.Timestamp(1402437380.58186).internal,
- '1402437380.58186_0000000000000000')
- self.assertEqual(utils.Timestamp(1402437380.581859).internal,
- '1402437380.58186_0000000000000000')
- self.assertEqual(utils.Timestamp(0, offset=1).internal,
- '0000000000.00000_0000000000000001')
- self.assertEqual(
- utils.Timestamp(1402437380.58186, offset=16).internal,
- '1402437380.58186_0000000000000010')
- self.assertEqual(
- utils.Timestamp(1402437380.581859, offset=16).internal,
- '1402437380.58186_0000000000000010')
-
- def test_internal_format_no_offset(self):
- expected = '1402436408.91203_0000000000000000'
- test_values = (
- '1402436408.91203',
- '1402436408.91203_00000000',
- '1402436408.912030000',
- '1402436408.912030000_0000000000000',
- '000001402436408.912030000',
- '000001402436408.912030000_0000000000',
- 1402436408.91203,
- 1402436408.9120300000000000,
- 1402436408.912029,
- 1402436408.912029999999999999,
- utils.Timestamp(1402436408.91203),
- utils.Timestamp(1402436408.91203, offset=0),
- utils.Timestamp(1402436408.912029),
- utils.Timestamp(1402436408.91202999999999999, offset=0),
- utils.Timestamp('1402436408.91203'),
- utils.Timestamp('1402436408.91203', offset=0),
- utils.Timestamp('1402436408.912029'),
- utils.Timestamp('1402436408.912029', offset=0),
- utils.Timestamp('1402436408.912029999999999'),
- utils.Timestamp('1402436408.912029999999999', offset=0),
- )
- for value in test_values:
- # timestamp instance is always equivalent
- self.assertEqual(utils.Timestamp(value), expected)
- if utils.FORCE_INTERNAL:
- # the FORCE_INTERNAL flag makes the internal format always
- # include the offset portion of the timestamp even when it's
- # not significant and would be bad during upgrades
- self.assertEqual(utils.Timestamp(value).internal, expected)
- else:
- # unless we FORCE_INTERNAL, when there's no offset the
- # internal format is equivalent to the normalized format
- self.assertEqual(utils.Timestamp(value).internal,
- '1402436408.91203')
-
- def test_internal_format_with_offset(self):
- expected = '1402436408.91203_00000000000000f0'
- test_values = (
- '1402436408.91203_000000f0',
- u'1402436408.91203_000000f0',
- b'1402436408.91203_000000f0',
- '1402436408.912030000_0000000000f0',
- '1402436408.912029_000000f0',
- '1402436408.91202999999_0000000000f0',
- '000001402436408.912030000_000000000f0',
- '000001402436408.9120299999_000000000f0',
- utils.Timestamp(1402436408.91203, offset=240),
- utils.Timestamp(1402436408.912029, offset=240),
- utils.Timestamp('1402436408.91203', offset=240),
- utils.Timestamp('1402436408.91203_00000000', offset=240),
- utils.Timestamp('1402436408.91203_0000000f', offset=225),
- utils.Timestamp('1402436408.9120299999', offset=240),
- utils.Timestamp('1402436408.9120299999_00000000', offset=240),
- utils.Timestamp('1402436408.9120299999_00000010', offset=224),
- )
- for value in test_values:
- timestamp = utils.Timestamp(value)
- self.assertEqual(timestamp.internal, expected)
- # can compare with offset if the string is internalized
- self.assertEqual(timestamp, expected)
- # if comparison value only includes the normalized portion and the
- # timestamp includes an offset, it is considered greater
- normal = utils.Timestamp(expected).normal
- self.assertTrue(timestamp > normal,
- '%r is not bigger than %r given %r' % (
- timestamp, normal, value))
- self.assertTrue(timestamp > float(normal),
- '%r is not bigger than %f given %r' % (
- timestamp, float(normal), value))
-
- def test_short_format_with_offset(self):
- expected = '1402436408.91203_f0'
- timestamp = utils.Timestamp(1402436408.91203, 0xf0)
- self.assertEqual(expected, timestamp.short)
-
- expected = '1402436408.91203'
- timestamp = utils.Timestamp(1402436408.91203)
- self.assertEqual(expected, timestamp.short)
-
- def test_raw(self):
- expected = 140243640891203
- timestamp = utils.Timestamp(1402436408.91203)
- self.assertEqual(expected, timestamp.raw)
-
- # 'raw' does not include offset
- timestamp = utils.Timestamp(1402436408.91203, 0xf0)
- self.assertEqual(expected, timestamp.raw)
-
- def test_delta(self):
- def _assertWithinBounds(expected, timestamp):
- tolerance = 0.00001
- minimum = expected - tolerance
- maximum = expected + tolerance
- self.assertTrue(float(timestamp) > minimum)
- self.assertTrue(float(timestamp) < maximum)
-
- timestamp = utils.Timestamp(1402436408.91203, delta=100)
- _assertWithinBounds(1402436408.91303, timestamp)
- self.assertEqual(140243640891303, timestamp.raw)
-
- timestamp = utils.Timestamp(1402436408.91203, delta=-100)
- _assertWithinBounds(1402436408.91103, timestamp)
- self.assertEqual(140243640891103, timestamp.raw)
-
- timestamp = utils.Timestamp(1402436408.91203, delta=0)
- _assertWithinBounds(1402436408.91203, timestamp)
- self.assertEqual(140243640891203, timestamp.raw)
-
- # delta is independent of offset
- timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
- self.assertEqual(140243640891303, timestamp.raw)
- self.assertEqual(42, timestamp.offset)
-
- # cannot go negative
- self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
- delta=-140243640891203)
-
- def test_int(self):
- expected = 1402437965
- test_values = (
- '1402437965.91203',
- '1402437965.91203_00000000',
- '1402437965.912030000',
- '1402437965.912030000_0000000000000',
- '000001402437965.912030000',
- '000001402437965.912030000_0000000000',
- 1402437965.91203,
- 1402437965.9120300000000000,
- 1402437965.912029,
- 1402437965.912029999999999999,
- utils.Timestamp(1402437965.91203),
- utils.Timestamp(1402437965.91203, offset=0),
- utils.Timestamp(1402437965.91203, offset=500),
- utils.Timestamp(1402437965.912029),
- utils.Timestamp(1402437965.91202999999999999, offset=0),
- utils.Timestamp(1402437965.91202999999999999, offset=300),
- utils.Timestamp('1402437965.91203'),
- utils.Timestamp('1402437965.91203', offset=0),
- utils.Timestamp('1402437965.91203', offset=400),
- utils.Timestamp('1402437965.912029'),
- utils.Timestamp('1402437965.912029', offset=0),
- utils.Timestamp('1402437965.912029', offset=200),
- utils.Timestamp('1402437965.912029999999999'),
- utils.Timestamp('1402437965.912029999999999', offset=0),
- utils.Timestamp('1402437965.912029999999999', offset=100),
- )
- for value in test_values:
- timestamp = utils.Timestamp(value)
- self.assertEqual(int(timestamp), expected)
- self.assertTrue(timestamp > expected)
-
- def test_float(self):
- expected = 1402438115.91203
- test_values = (
- '1402438115.91203',
- '1402438115.91203_00000000',
- '1402438115.912030000',
- '1402438115.912030000_0000000000000',
- '000001402438115.912030000',
- '000001402438115.912030000_0000000000',
- 1402438115.91203,
- 1402438115.9120300000000000,
- 1402438115.912029,
- 1402438115.912029999999999999,
- utils.Timestamp(1402438115.91203),
- utils.Timestamp(1402438115.91203, offset=0),
- utils.Timestamp(1402438115.91203, offset=500),
- utils.Timestamp(1402438115.912029),
- utils.Timestamp(1402438115.91202999999999999, offset=0),
- utils.Timestamp(1402438115.91202999999999999, offset=300),
- utils.Timestamp('1402438115.91203'),
- utils.Timestamp('1402438115.91203', offset=0),
- utils.Timestamp('1402438115.91203', offset=400),
- utils.Timestamp('1402438115.912029'),
- utils.Timestamp('1402438115.912029', offset=0),
- utils.Timestamp('1402438115.912029', offset=200),
- utils.Timestamp('1402438115.912029999999999'),
- utils.Timestamp('1402438115.912029999999999', offset=0),
- utils.Timestamp('1402438115.912029999999999', offset=100),
- )
- tolerance = 0.00001
- minimum = expected - tolerance
- maximum = expected + tolerance
- for value in test_values:
- timestamp = utils.Timestamp(value)
- self.assertTrue(float(timestamp) > minimum,
- '%f is not bigger than %f given %r' % (
- timestamp, minimum, value))
- self.assertTrue(float(timestamp) < maximum,
- '%f is not smaller than %f given %r' % (
- timestamp, maximum, value))
- # direct comparison of timestamp works too
- self.assertTrue(timestamp > minimum,
- '%s is not bigger than %f given %r' % (
- timestamp.normal, minimum, value))
- self.assertTrue(timestamp < maximum,
- '%s is not smaller than %f given %r' % (
- timestamp.normal, maximum, value))
- # ... even against strings
- self.assertTrue(timestamp > '%f' % minimum,
- '%s is not bigger than %s given %r' % (
- timestamp.normal, minimum, value))
- self.assertTrue(timestamp < '%f' % maximum,
- '%s is not smaller than %s given %r' % (
- timestamp.normal, maximum, value))
-
- def test_false(self):
- self.assertFalse(utils.Timestamp(0))
- self.assertFalse(utils.Timestamp(0, offset=0))
- self.assertFalse(utils.Timestamp('0'))
- self.assertFalse(utils.Timestamp('0', offset=0))
- self.assertFalse(utils.Timestamp(0.0))
- self.assertFalse(utils.Timestamp(0.0, offset=0))
- self.assertFalse(utils.Timestamp('0.0'))
- self.assertFalse(utils.Timestamp('0.0', offset=0))
- self.assertFalse(utils.Timestamp(00000000.00000000))
- self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
- self.assertFalse(utils.Timestamp('00000000.00000000'))
- self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
-
- def test_true(self):
- self.assertTrue(utils.Timestamp(1))
- self.assertTrue(utils.Timestamp(1, offset=1))
- self.assertTrue(utils.Timestamp(0, offset=1))
- self.assertTrue(utils.Timestamp('1'))
- self.assertTrue(utils.Timestamp('1', offset=1))
- self.assertTrue(utils.Timestamp('0', offset=1))
- self.assertTrue(utils.Timestamp(1.1))
- self.assertTrue(utils.Timestamp(1.1, offset=1))
- self.assertTrue(utils.Timestamp(0.0, offset=1))
- self.assertTrue(utils.Timestamp('1.1'))
- self.assertTrue(utils.Timestamp('1.1', offset=1))
- self.assertTrue(utils.Timestamp('0.0', offset=1))
- self.assertTrue(utils.Timestamp(11111111.11111111))
- self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
- self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
- self.assertTrue(utils.Timestamp('11111111.11111111'))
- self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
- self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
-
- def test_greater_no_offset(self):
- now = time.time()
- older = now - 1
- timestamp = utils.Timestamp(now)
- test_values = (
- 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
- 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
- 1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
- older, '%f' % older, '%f_0000ffff' % older,
- )
- for value in test_values:
- other = utils.Timestamp(value)
- self.assertNotEqual(timestamp, other) # sanity
- self.assertTrue(timestamp > value,
- '%r is not greater than %r given %r' % (
- timestamp, value, value))
- self.assertTrue(timestamp > other,
- '%r is not greater than %r given %r' % (
- timestamp, other, value))
- self.assertTrue(timestamp > other.normal,
- '%r is not greater than %r given %r' % (
- timestamp, other.normal, value))
- self.assertTrue(timestamp > other.internal,
- '%r is not greater than %r given %r' % (
- timestamp, other.internal, value))
- self.assertTrue(timestamp > float(other),
- '%r is not greater than %r given %r' % (
- timestamp, float(other), value))
- self.assertTrue(timestamp > int(other),
- '%r is not greater than %r given %r' % (
- timestamp, int(other), value))
-
- def _test_greater_with_offset(self, now, test_values):
- for offset in range(1, 1000, 100):
- timestamp = utils.Timestamp(now, offset=offset)
- for value in test_values:
- other = utils.Timestamp(value)
- self.assertNotEqual(timestamp, other) # sanity
- self.assertTrue(timestamp > value,
- '%r is not greater than %r given %r' % (
- timestamp, value, value))
- self.assertTrue(timestamp > other,
- '%r is not greater than %r given %r' % (
- timestamp, other, value))
- self.assertTrue(timestamp > other.normal,
- '%r is not greater than %r given %r' % (
- timestamp, other.normal, value))
- self.assertTrue(timestamp > other.internal,
- '%r is not greater than %r given %r' % (
- timestamp, other.internal, value))
- self.assertTrue(timestamp > float(other),
- '%r is not greater than %r given %r' % (
- timestamp, float(other), value))
- self.assertTrue(timestamp > int(other),
- '%r is not greater than %r given %r' % (
- timestamp, int(other), value))
-
- def test_greater_with_offset(self):
- # Part 1: use the natural time of the Python. This is deliciously
- # unpredictable, but completely legitimate and realistic. Finds bugs!
- now = time.time()
- older = now - 1
- test_values = (
- 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
- 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
- 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
- older, now,
- )
- self._test_greater_with_offset(now, test_values)
- # Part 2: Same as above, but with fixed time values that reproduce
- # specific corner cases.
- now = 1519830570.6949348
- older = now - 1
- test_values = (
- 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
- 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
- 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
- older, now,
- )
- self._test_greater_with_offset(now, test_values)
- # Part 3: The '%f' problem. Timestamps cannot be converted to %f
- # strings, then back to timestamps, then compared with originals.
- # You can only "import" a floating point representation once.
- now = 1519830570.6949348
- now = float('%f' % now)
- older = now - 1
- test_values = (
- 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
- 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
- older, '%f' % older, '%f_0000ffff' % older,
- now, '%f' % now, '%s_00000000' % now,
- )
- self._test_greater_with_offset(now, test_values)
-
- def test_smaller_no_offset(self):
- now = time.time()
- newer = now + 1
- timestamp = utils.Timestamp(now)
- test_values = (
- 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
- newer, '%f' % newer, '%f_0000ffff' % newer,
- )
- for value in test_values:
- other = utils.Timestamp(value)
- self.assertNotEqual(timestamp, other) # sanity
- self.assertTrue(timestamp < value,
- '%r is not smaller than %r given %r' % (
- timestamp, value, value))
- self.assertTrue(timestamp < other,
- '%r is not smaller than %r given %r' % (
- timestamp, other, value))
- self.assertTrue(timestamp < other.normal,
- '%r is not smaller than %r given %r' % (
- timestamp, other.normal, value))
- self.assertTrue(timestamp < other.internal,
- '%r is not smaller than %r given %r' % (
- timestamp, other.internal, value))
- self.assertTrue(timestamp < float(other),
- '%r is not smaller than %r given %r' % (
- timestamp, float(other), value))
- self.assertTrue(timestamp < int(other),
- '%r is not smaller than %r given %r' % (
- timestamp, int(other), value))
-
- def test_smaller_with_offset(self):
- now = time.time()
- newer = now + 1
- test_values = (
- 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
- newer, '%f' % newer, '%f_0000ffff' % newer,
- )
- for offset in range(1, 1000, 100):
- timestamp = utils.Timestamp(now, offset=offset)
- for value in test_values:
- other = utils.Timestamp(value)
- self.assertNotEqual(timestamp, other) # sanity
- self.assertTrue(timestamp < value,
- '%r is not smaller than %r given %r' % (
- timestamp, value, value))
- self.assertTrue(timestamp < other,
- '%r is not smaller than %r given %r' % (
- timestamp, other, value))
- self.assertTrue(timestamp < other.normal,
- '%r is not smaller than %r given %r' % (
- timestamp, other.normal, value))
- self.assertTrue(timestamp < other.internal,
- '%r is not smaller than %r given %r' % (
- timestamp, other.internal, value))
- self.assertTrue(timestamp < float(other),
- '%r is not smaller than %r given %r' % (
- timestamp, float(other), value))
- self.assertTrue(timestamp < int(other),
- '%r is not smaller than %r given %r' % (
- timestamp, int(other), value))
-
- def test_cmp_with_none(self):
- self.assertGreater(utils.Timestamp(0), None)
- self.assertGreater(utils.Timestamp(1.0), None)
- self.assertGreater(utils.Timestamp(1.0, 42), None)
-
- def test_ordering(self):
- given = [
- '1402444820.62590_000000000000000a',
- '1402444820.62589_0000000000000001',
- '1402444821.52589_0000000000000004',
- '1402444920.62589_0000000000000004',
- '1402444821.62589_000000000000000a',
- '1402444821.72589_000000000000000a',
- '1402444920.62589_0000000000000002',
- '1402444820.62589_0000000000000002',
- '1402444820.62589_000000000000000a',
- '1402444820.62590_0000000000000004',
- '1402444920.62589_000000000000000a',
- '1402444820.62590_0000000000000002',
- '1402444821.52589_0000000000000002',
- '1402444821.52589_0000000000000000',
- '1402444920.62589',
- '1402444821.62589_0000000000000004',
- '1402444821.72589_0000000000000001',
- '1402444820.62590',
- '1402444820.62590_0000000000000001',
- '1402444820.62589_0000000000000004',
- '1402444821.72589_0000000000000000',
- '1402444821.52589_000000000000000a',
- '1402444821.72589_0000000000000004',
- '1402444821.62589',
- '1402444821.52589_0000000000000001',
- '1402444821.62589_0000000000000001',
- '1402444821.62589_0000000000000002',
- '1402444821.72589_0000000000000002',
- '1402444820.62589',
- '1402444920.62589_0000000000000001']
- expected = [
- '1402444820.62589',
- '1402444820.62589_0000000000000001',
- '1402444820.62589_0000000000000002',
- '1402444820.62589_0000000000000004',
- '1402444820.62589_000000000000000a',
- '1402444820.62590',
- '1402444820.62590_0000000000000001',
- '1402444820.62590_0000000000000002',
- '1402444820.62590_0000000000000004',
- '1402444820.62590_000000000000000a',
- '1402444821.52589',
- '1402444821.52589_0000000000000001',
- '1402444821.52589_0000000000000002',
- '1402444821.52589_0000000000000004',
- '1402444821.52589_000000000000000a',
- '1402444821.62589',
- '1402444821.62589_0000000000000001',
- '1402444821.62589_0000000000000002',
- '1402444821.62589_0000000000000004',
- '1402444821.62589_000000000000000a',
- '1402444821.72589',
- '1402444821.72589_0000000000000001',
- '1402444821.72589_0000000000000002',
- '1402444821.72589_0000000000000004',
- '1402444821.72589_000000000000000a',
- '1402444920.62589',
- '1402444920.62589_0000000000000001',
- '1402444920.62589_0000000000000002',
- '1402444920.62589_0000000000000004',
- '1402444920.62589_000000000000000a',
- ]
- # less visual version
- """
- now = time.time()
- given = [
- utils.Timestamp(now + i, offset=offset).internal
- for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
- for offset in (0, 1, 2, 4, 10)
- ]
- expected = [t for t in given]
- random.shuffle(given)
- """
- self.assertEqual(len(given), len(expected)) # sanity
- timestamps = [utils.Timestamp(t) for t in given]
- # our expected values don't include insignificant offsets
- with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
- self.assertEqual(
- [t.internal for t in sorted(timestamps)], expected)
- # string sorting works as well
- self.assertEqual(
- sorted([t.internal for t in timestamps]), expected)
-
- def test_hashable(self):
- ts_0 = utils.Timestamp('1402444821.72589')
- ts_0_also = utils.Timestamp('1402444821.72589')
- self.assertEqual(ts_0, ts_0_also) # sanity
- self.assertEqual(hash(ts_0), hash(ts_0_also))
- d = {ts_0: 'whatever'}
- self.assertIn(ts_0, d) # sanity
- self.assertIn(ts_0_also, d)
-
- def test_out_of_range_comparisons(self):
- now = utils.Timestamp.now()
-
- def check_is_later(val):
- self.assertTrue(now != val)
- self.assertFalse(now == val)
- self.assertTrue(now <= val)
- self.assertTrue(now < val)
- self.assertTrue(val > now)
- self.assertTrue(val >= now)
-
- check_is_later(1e30)
- check_is_later(1579753284000) # someone gave us ms instead of s!
- check_is_later('1579753284000')
- check_is_later(b'1e15')
- check_is_later(u'1.e+10_f')
-
- def check_is_earlier(val):
- self.assertTrue(now != val)
- self.assertFalse(now == val)
- self.assertTrue(now >= val)
- self.assertTrue(now > val)
- self.assertTrue(val < now)
- self.assertTrue(val <= now)
-
- check_is_earlier(-1)
- check_is_earlier(-0.1)
- check_is_earlier('-9999999')
- check_is_earlier(b'-9999.999')
- check_is_earlier(u'-1234_5678')
-
- def test_inversion(self):
- ts = utils.Timestamp(0)
- self.assertIsInstance(~ts, utils.Timestamp)
- self.assertEqual((~ts).internal, '9999999999.99999')
-
- ts = utils.Timestamp(123456.789)
- self.assertIsInstance(~ts, utils.Timestamp)
- self.assertEqual(ts.internal, '0000123456.78900')
- self.assertEqual((~ts).internal, '9999876543.21099')
-
- timestamps = sorted(utils.Timestamp(random.random() * 1e10)
- for _ in range(20))
- self.assertEqual([x.internal for x in timestamps],
- sorted(x.internal for x in timestamps))
- self.assertEqual([(~x).internal for x in reversed(timestamps)],
- sorted((~x).internal for x in timestamps))
-
- ts = utils.Timestamp.now()
- self.assertGreater(~ts, ts) # NB: will break around 2128
-
- ts = utils.Timestamp.now(offset=1)
- with self.assertRaises(ValueError) as caught:
- ~ts
- self.assertEqual(caught.exception.args[0],
- 'Cannot invert timestamps with offsets')
-
-
-class TestTimestampEncoding(unittest.TestCase):
-
- def setUp(self):
- t0 = utils.Timestamp(0.0)
- t1 = utils.Timestamp(997.9996)
- t2 = utils.Timestamp(999)
- t3 = utils.Timestamp(1000, 24)
- t4 = utils.Timestamp(1001)
- t5 = utils.Timestamp(1002.00040)
-
- # encodings that are expected when explicit = False
- self.non_explicit_encodings = (
- ('0000001000.00000_18', (t3, t3, t3)),
- ('0000001000.00000_18', (t3, t3, None)),
- )
-
- # mappings that are expected when explicit = True
- self.explicit_encodings = (
- ('0000001000.00000_18+0+0', (t3, t3, t3)),
- ('0000001000.00000_18+0', (t3, t3, None)),
- )
-
- # mappings that are expected when explicit = True or False
- self.encodings = (
- ('0000001000.00000_18+0+186a0', (t3, t3, t4)),
- ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
- ('0000001000.00000_18-186a0+0', (t3, t2, t2)),
- ('0000001000.00000_18+0-186a0', (t3, t3, t2)),
- ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
- ('0000001000.00000_18', (t3, None, None)),
- ('0000001000.00000_18+186a0', (t3, t4, None)),
- ('0000001000.00000_18-186a0', (t3, t2, None)),
- ('0000001000.00000_18', (t3, None, t1)),
- ('0000001000.00000_18-5f5e100', (t3, t0, None)),
- ('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
- ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
- )
-
- # decodings that are expected when explicit = False
- self.non_explicit_decodings = (
- ('0000001000.00000_18', (t3, t3, t3)),
- ('0000001000.00000_18+186a0', (t3, t4, t4)),
- ('0000001000.00000_18-186a0', (t3, t2, t2)),
- ('0000001000.00000_18+186a0', (t3, t4, t4)),
- ('0000001000.00000_18-186a0', (t3, t2, t2)),
- ('0000001000.00000_18-5f5e100', (t3, t0, t0)),
- )
-
- # decodings that are expected when explicit = True
- self.explicit_decodings = (
- ('0000001000.00000_18+0+0', (t3, t3, t3)),
- ('0000001000.00000_18+0', (t3, t3, None)),
- ('0000001000.00000_18', (t3, None, None)),
- ('0000001000.00000_18+186a0', (t3, t4, None)),
- ('0000001000.00000_18-186a0', (t3, t2, None)),
- ('0000001000.00000_18-5f5e100', (t3, t0, None)),
- )
-
- # decodings that are expected when explicit = True or False
- self.decodings = (
- ('0000001000.00000_18+0+186a0', (t3, t3, t4)),
- ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
- ('0000001000.00000_18-186a0+0', (t3, t2, t2)),
- ('0000001000.00000_18+0-186a0', (t3, t3, t2)),
- ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
- ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
- )
-
- def _assertEqual(self, expected, actual, test):
- self.assertEqual(expected, actual,
- 'Got %s but expected %s for parameters %s'
- % (actual, expected, test))
-
- def test_encoding(self):
- for test in self.explicit_encodings:
- actual = utils.encode_timestamps(test[1][0], test[1][1],
- test[1][2], True)
- self._assertEqual(test[0], actual, test[1])
- for test in self.non_explicit_encodings:
- actual = utils.encode_timestamps(test[1][0], test[1][1],
- test[1][2], False)
- self._assertEqual(test[0], actual, test[1])
- for explicit in (True, False):
- for test in self.encodings:
- actual = utils.encode_timestamps(test[1][0], test[1][1],
- test[1][2], explicit)
- self._assertEqual(test[0], actual, test[1])
-
- def test_decoding(self):
- for test in self.explicit_decodings:
- actual = utils.decode_timestamps(test[0], True)
- self._assertEqual(test[1], actual, test[0])
- for test in self.non_explicit_decodings:
- actual = utils.decode_timestamps(test[0], False)
- self._assertEqual(test[1], actual, test[0])
- for explicit in (True, False):
- for test in self.decodings:
- actual = utils.decode_timestamps(test[0], explicit)
- self._assertEqual(test[1], actual, test[0])
-
-
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
@@ -2204,6 +1353,36 @@ class TestUtils(unittest.TestCase):
finally:
base_logger.logger.removeHandler(handler)
+ @reset_logger_state
+ def test_nested_prefixlogger(self):
+ # setup stream logging
+ sio = StringIO()
+ base_logger = utils.get_logger(None)
+ handler = logging.StreamHandler(sio)
+ base_logger.logger.addHandler(handler)
+ inner_logger = utils.PrefixLoggerAdapter(base_logger, {})
+ inner_logger.set_prefix('one: ')
+ outer_logger = utils.PrefixLoggerAdapter(inner_logger, {})
+ outer_logger.set_prefix('two: ')
+
+ def strip_value(sio):
+ sio.seek(0)
+ v = sio.getvalue()
+ sio.truncate(0)
+ return v
+
+ try:
+ # establish base case
+ self.assertEqual(strip_value(sio), '')
+ inner_logger.info('test')
+ self.assertEqual(strip_value(sio), 'one: test\n')
+
+ outer_logger.info('test')
+ self.assertEqual(strip_value(sio), 'one: two: test\n')
+ self.assertEqual(strip_value(sio), '')
+ finally:
+ base_logger.logger.removeHandler(handler)
+
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
@@ -3498,44 +2677,6 @@ cluster_dfw1 = http://dfw1.host/v1/
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertIsNone(ts)
- def test_config_fallocate_value(self):
- fallocate_value, is_percent = utils.config_fallocate_value('10%')
- self.assertEqual(fallocate_value, 10)
- self.assertTrue(is_percent)
- fallocate_value, is_percent = utils.config_fallocate_value('10')
- self.assertEqual(fallocate_value, 10)
- self.assertFalse(is_percent)
- try:
- fallocate_value, is_percent = utils.config_fallocate_value('ab%')
- except ValueError as err:
- exc = err
- self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
- 'fallocate_reserve.')
- try:
- fallocate_value, is_percent = utils.config_fallocate_value('ab')
- except ValueError as err:
- exc = err
- self.assertEqual(str(exc), 'Error: ab is an invalid value for '
- 'fallocate_reserve.')
- try:
- fallocate_value, is_percent = utils.config_fallocate_value('1%%')
- except ValueError as err:
- exc = err
- self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
- 'fallocate_reserve.')
- try:
- fallocate_value, is_percent = utils.config_fallocate_value('10.0')
- except ValueError as err:
- exc = err
- self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
- 'fallocate_reserve.')
- fallocate_value, is_percent = utils.config_fallocate_value('10.5%')
- self.assertEqual(fallocate_value, 10.5)
- self.assertTrue(is_percent)
- fallocate_value, is_percent = utils.config_fallocate_value('10.000%')
- self.assertEqual(fallocate_value, 10.000)
- self.assertTrue(is_percent)
-
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
@@ -4363,110 +3504,6 @@ cluster_dfw1 = http://dfw1.host/v1/
self.assertRaises(ValueError, utils.make_db_file_path,
'/path/to/hash.db', 'bad epoch')
- def test_modify_priority(self):
- pid = os.getpid()
- logger = debug_logger()
- called = {}
-
- def _fake_setpriority(*args):
- called['setpriority'] = args
-
- def _fake_syscall(*args):
- called['syscall'] = args
-
- # Test if current architecture supports changing of priority
- try:
- utils.NR_ioprio_set()
- except OSError as e:
- raise unittest.SkipTest(e)
-
- with patch('swift.common.utils._libc_setpriority',
- _fake_setpriority), \
- patch('swift.common.utils._posix_syscall', _fake_syscall):
- called = {}
- # not set / default
- utils.modify_priority({}, logger)
- self.assertEqual(called, {})
- called = {}
- # just nice
- utils.modify_priority({'nice_priority': '1'}, logger)
- self.assertEqual(called, {'setpriority': (0, pid, 1)})
- called = {}
- # just ionice class uses default priority 0
- utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger)
- architecture = os.uname()[4]
- arch_bits = platform.architecture()[0]
- if architecture == 'x86_64' and arch_bits == '64bit':
- self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)})
- elif architecture == 'aarch64' and arch_bits == '64bit':
- self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)})
- else:
- self.fail("Unexpected call: %r" % called)
- called = {}
- # just ionice priority is ignored
- utils.modify_priority({'ionice_priority': '4'}, logger)
- self.assertEqual(called, {})
- called = {}
- # bad ionice class
- utils.modify_priority({'ionice_class': 'class_foo'}, logger)
- self.assertEqual(called, {})
- called = {}
- # ionice class & priority
- utils.modify_priority({
- 'ionice_class': 'IOPRIO_CLASS_BE',
- 'ionice_priority': '4',
- }, logger)
- if architecture == 'x86_64' and arch_bits == '64bit':
- self.assertEqual(called, {
- 'syscall': (251, 1, pid, 2 << 13 | 4)
- })
- elif architecture == 'aarch64' and arch_bits == '64bit':
- self.assertEqual(called, {
- 'syscall': (30, 1, pid, 2 << 13 | 4)
- })
- else:
- self.fail("Unexpected call: %r" % called)
- called = {}
- # all
- utils.modify_priority({
- 'nice_priority': '-15',
- 'ionice_class': 'IOPRIO_CLASS_IDLE',
- 'ionice_priority': '6',
- }, logger)
- if architecture == 'x86_64' and arch_bits == '64bit':
- self.assertEqual(called, {
- 'setpriority': (0, pid, -15),
- 'syscall': (251, 1, pid, 3 << 13 | 6),
- })
- elif architecture == 'aarch64' and arch_bits == '64bit':
- self.assertEqual(called, {
- 'setpriority': (0, pid, -15),
- 'syscall': (30, 1, pid, 3 << 13 | 6),
- })
- else:
- self.fail("Unexpected call: %r" % called)
-
- def test__NR_ioprio_set(self):
- with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
- patch('platform.architecture', return_value=('64bit', '')):
- self.assertEqual(251, utils.NR_ioprio_set())
-
- with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
- patch('platform.architecture', return_value=('32bit', '')):
- self.assertRaises(OSError, utils.NR_ioprio_set)
-
- with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
- patch('platform.architecture', return_value=('64bit', '')):
- self.assertEqual(30, utils.NR_ioprio_set())
-
- with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
- patch('platform.architecture', return_value=('32bit', '')):
- self.assertRaises(OSError, utils.NR_ioprio_set)
-
- with patch('os.uname', return_value=('', '', '', '', 'alpha')), \
- patch('platform.architecture', return_value=('64bit', '')):
- self.assertRaises(OSError, utils.NR_ioprio_set)
-
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_linkat_success(self):
tempdir = mkdtemp()
@@ -5910,6 +4947,7 @@ class UnsafeXrange(object):
"""
Like range(limit), but with extra context switching to screw things up.
"""
+
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
@@ -6656,6 +5694,24 @@ class TestMetricsPrefixLoggerAdapter(unittest.TestCase):
mock.call('test3'), mock.call('test')],
mock_increment.call_args_list)
+ def test_wrapped_prefixing(self):
+ logger = utils.get_logger({}, 'logger_name')
+ adapter1 = utils.MetricsPrefixLoggerAdapter(logger, {}, 'one')
+ adapter2 = utils.MetricsPrefixLoggerAdapter(adapter1, {}, 'two')
+ self.assertEqual('logger_name', logger.name)
+ self.assertEqual('logger_name', adapter1.logger.name)
+ self.assertEqual('logger_name', adapter2.logger.name)
+
+ with mock.patch.object(logger, 'increment') as mock_increment:
+ adapter1.increment('test1')
+ adapter2.increment('test2')
+ logger.increment('test')
+ self.assertEqual(
+ [mock.call('one.test1'),
+ mock.call('one.two.test2'),
+ mock.call('test')],
+ mock_increment.call_args_list)
+
class TestAuditLocationGenerator(unittest.TestCase):
@@ -8211,6 +7267,470 @@ class TestShardName(unittest.TestCase):
utils.ShardName.create('a', 'root', None, '1235678', 'bad')
+class TestNamespace(unittest.TestCase):
+ def test_lower_setter(self):
+ ns = utils.Namespace('a/c', 'b', '')
+ # sanity checks
+ self.assertEqual('b', ns.lower_str)
+ self.assertEqual(ns.MAX, ns.upper)
+
+ def do_test(good_value, expected):
+ ns.lower = good_value
+ self.assertEqual(expected, ns.lower)
+ self.assertEqual(ns.MAX, ns.upper)
+
+ do_test(utils.Namespace.MIN, utils.Namespace.MIN)
+ do_test(utils.Namespace.MAX, utils.Namespace.MAX)
+ do_test(b'', utils.Namespace.MIN)
+ do_test(u'', utils.Namespace.MIN)
+ do_test(None, utils.Namespace.MIN)
+ do_test(b'a', 'a')
+ do_test(b'y', 'y')
+ do_test(u'a', 'a')
+ do_test(u'y', 'y')
+
+ expected = u'\N{SNOWMAN}'
+ if six.PY2:
+ expected = expected.encode('utf-8')
+ with warnings.catch_warnings(record=True) as captured_warnings:
+ do_test(u'\N{SNOWMAN}', expected)
+ do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected)
+ self.assertFalse(captured_warnings)
+
+ ns = utils.Namespace('a/c', 'b', 'y')
+ ns.lower = ''
+ self.assertEqual(ns.MIN, ns.lower)
+
+ ns = utils.Namespace('a/c', 'b', 'y')
+ with self.assertRaises(ValueError) as cm:
+ ns.lower = 'z'
+ self.assertIn("must be less than or equal to upper", str(cm.exception))
+ self.assertEqual('b', ns.lower_str)
+ self.assertEqual('y', ns.upper_str)
+
+ def do_test(bad_value):
+ with self.assertRaises(TypeError) as cm:
+ ns.lower = bad_value
+ self.assertIn("lower must be a string", str(cm.exception))
+ self.assertEqual('b', ns.lower_str)
+ self.assertEqual('y', ns.upper_str)
+
+ do_test(1)
+ do_test(1.234)
+
+ def test_upper_setter(self):
+ ns = utils.Namespace('a/c', '', 'y')
+ # sanity checks
+ self.assertEqual(ns.MIN, ns.lower)
+ self.assertEqual('y', ns.upper_str)
+
+ def do_test(good_value, expected):
+ ns.upper = good_value
+ self.assertEqual(expected, ns.upper)
+ self.assertEqual(ns.MIN, ns.lower)
+
+ do_test(utils.Namespace.MIN, utils.Namespace.MIN)
+ do_test(utils.Namespace.MAX, utils.Namespace.MAX)
+ do_test(b'', utils.Namespace.MAX)
+ do_test(u'', utils.Namespace.MAX)
+ do_test(None, utils.Namespace.MAX)
+ do_test(b'z', 'z')
+ do_test(b'b', 'b')
+ do_test(u'z', 'z')
+ do_test(u'b', 'b')
+
+ expected = u'\N{SNOWMAN}'
+ if six.PY2:
+ expected = expected.encode('utf-8')
+ with warnings.catch_warnings(record=True) as captured_warnings:
+ do_test(u'\N{SNOWMAN}', expected)
+ do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected)
+ self.assertFalse(captured_warnings)
+
+ ns = utils.Namespace('a/c', 'b', 'y')
+ ns.upper = ''
+ self.assertEqual(ns.MAX, ns.upper)
+
+ ns = utils.Namespace('a/c', 'b', 'y')
+ with self.assertRaises(ValueError) as cm:
+ ns.upper = 'a'
+ self.assertIn(
+ "must be greater than or equal to lower",
+ str(cm.exception))
+ self.assertEqual('b', ns.lower_str)
+ self.assertEqual('y', ns.upper_str)
+
+ def do_test(bad_value):
+ with self.assertRaises(TypeError) as cm:
+ ns.upper = bad_value
+ self.assertIn("upper must be a string", str(cm.exception))
+ self.assertEqual('b', ns.lower_str)
+ self.assertEqual('y', ns.upper_str)
+
+ do_test(1)
+ do_test(1.234)
+
+ def test_end_marker(self):
+ ns = utils.Namespace('a/c', '', 'y')
+ self.assertEqual('y\x00', ns.end_marker)
+ ns = utils.Namespace('a/c', '', '')
+ self.assertEqual('', ns.end_marker)
+
+ def test_bounds_serialization(self):
+ ns = utils.Namespace('a/c', None, None)
+ self.assertEqual('a/c', ns.name)
+ self.assertEqual(utils.Namespace.MIN, ns.lower)
+ self.assertEqual('', ns.lower_str)
+ self.assertEqual(utils.Namespace.MAX, ns.upper)
+ self.assertEqual('', ns.upper_str)
+ self.assertEqual('', ns.end_marker)
+
+ lower = u'\u00e4'
+ upper = u'\u00fb'
+ ns = utils.Namespace('a/%s-%s' % (lower, upper), lower, upper)
+ exp_lower = lower
+ exp_upper = upper
+ if six.PY2:
+ exp_lower = exp_lower.encode('utf-8')
+ exp_upper = exp_upper.encode('utf-8')
+ self.assertEqual(exp_lower, ns.lower)
+ self.assertEqual(exp_lower, ns.lower_str)
+ self.assertEqual(exp_upper, ns.upper)
+ self.assertEqual(exp_upper, ns.upper_str)
+ self.assertEqual(exp_upper + '\x00', ns.end_marker)
+
+ def test_entire_namespace(self):
+ # test entire range (no boundaries)
+ entire = utils.Namespace('a/test', None, None)
+ self.assertEqual(utils.Namespace.MAX, entire.upper)
+ self.assertEqual(utils.Namespace.MIN, entire.lower)
+ self.assertIs(True, entire.entire_namespace())
+
+ for x in range(100):
+ self.assertTrue(str(x) in entire)
+ self.assertTrue(chr(x) in entire)
+
+ for x in ('a', 'z', 'zzzz', '124fsdf', u'\u00e4'):
+ self.assertTrue(x in entire, '%r should be in %r' % (x, entire))
+
+ entire.lower = 'a'
+ self.assertIs(False, entire.entire_namespace())
+
+ def test_comparisons(self):
+ # upper (if provided) *must* be greater than lower
+ with self.assertRaises(ValueError):
+ utils.Namespace('f-a', 'f', 'a')
+
+ # test basic boundaries
+ btoc = utils.Namespace('a/b-c', 'b', 'c')
+ atof = utils.Namespace('a/a-f', 'a', 'f')
+ ftol = utils.Namespace('a/f-l', 'f', 'l')
+ ltor = utils.Namespace('a/l-r', 'l', 'r')
+ rtoz = utils.Namespace('a/r-z', 'r', 'z')
+ lower = utils.Namespace('a/lower', '', 'mid')
+ upper = utils.Namespace('a/upper', 'mid', '')
+ entire = utils.Namespace('a/test', None, None)
+
+ # overlapping ranges
+ dtof = utils.Namespace('a/d-f', 'd', 'f')
+ dtom = utils.Namespace('a/d-m', 'd', 'm')
+
+ # test range > and <
+ # non-adjacent
+ self.assertFalse(rtoz < atof)
+ self.assertTrue(atof < ltor)
+ self.assertTrue(ltor > atof)
+ self.assertFalse(ftol > rtoz)
+
+ # adjacent
+ self.assertFalse(rtoz < ltor)
+ self.assertTrue(ltor < rtoz)
+ self.assertFalse(ltor > rtoz)
+ self.assertTrue(rtoz > ltor)
+
+ # wholly within
+ self.assertFalse(btoc < atof)
+ self.assertFalse(btoc > atof)
+ self.assertFalse(atof < btoc)
+ self.assertFalse(atof > btoc)
+
+ self.assertFalse(atof < dtof)
+ self.assertFalse(dtof > atof)
+ self.assertFalse(atof > dtof)
+ self.assertFalse(dtof < atof)
+
+ self.assertFalse(dtof < dtom)
+ self.assertFalse(dtof > dtom)
+ self.assertFalse(dtom > dtof)
+ self.assertFalse(dtom < dtof)
+
+ # overlaps
+ self.assertFalse(atof < dtom)
+ self.assertFalse(atof > dtom)
+ self.assertFalse(ltor > dtom)
+
+ # ranges including min/max bounds
+ self.assertTrue(upper > lower)
+ self.assertTrue(lower < upper)
+ self.assertFalse(upper < lower)
+ self.assertFalse(lower > upper)
+
+ self.assertFalse(lower < entire)
+ self.assertFalse(entire > lower)
+ self.assertFalse(lower > entire)
+ self.assertFalse(entire < lower)
+
+ self.assertFalse(upper < entire)
+ self.assertFalse(entire > upper)
+ self.assertFalse(upper > entire)
+ self.assertFalse(entire < upper)
+
+ self.assertFalse(entire < entire)
+ self.assertFalse(entire > entire)
+
+ # test range < and > to an item
+ # range is > lower and <= upper to lower boundary isn't
+ # actually included
+ self.assertTrue(ftol > 'f')
+ self.assertFalse(atof < 'f')
+ self.assertTrue(ltor < 'y')
+
+ self.assertFalse(ftol < 'f')
+ self.assertFalse(atof > 'f')
+ self.assertFalse(ltor > 'y')
+
+ self.assertTrue('f' < ftol)
+ self.assertFalse('f' > atof)
+ self.assertTrue('y' > ltor)
+
+ self.assertFalse('f' > ftol)
+ self.assertFalse('f' < atof)
+ self.assertFalse('y' < ltor)
+
+ # Now test ranges with only 1 boundary
+ start_to_l = utils.Namespace('a/None-l', '', 'l')
+ l_to_end = utils.Namespace('a/l-None', 'l', '')
+
+ for x in ('l', 'm', 'z', 'zzz1231sd'):
+ if x == 'l':
+ self.assertFalse(x in l_to_end)
+ self.assertFalse(start_to_l < x)
+ self.assertFalse(x > start_to_l)
+ else:
+ self.assertTrue(x in l_to_end)
+ self.assertTrue(start_to_l < x)
+ self.assertTrue(x > start_to_l)
+
+ # Now test some of the range to range checks with missing boundaries
+ self.assertFalse(atof < start_to_l)
+ self.assertFalse(start_to_l < entire)
+
+ # Now test ShardRange.overlaps(other)
+ self.assertTrue(atof.overlaps(atof))
+ self.assertFalse(atof.overlaps(ftol))
+ self.assertFalse(ftol.overlaps(atof))
+ self.assertTrue(atof.overlaps(dtof))
+ self.assertTrue(dtof.overlaps(atof))
+ self.assertFalse(dtof.overlaps(ftol))
+ self.assertTrue(dtom.overlaps(ftol))
+ self.assertTrue(ftol.overlaps(dtom))
+ self.assertFalse(start_to_l.overlaps(l_to_end))
+
+ def test_contains(self):
+ lower = utils.Namespace('a/-h', '', 'h')
+ mid = utils.Namespace('a/h-p', 'h', 'p')
+ upper = utils.Namespace('a/p-', 'p', '')
+ entire = utils.Namespace('a/all', '', '')
+
+ self.assertTrue('a' in entire)
+ self.assertTrue('x' in entire)
+
+ # the empty string is not a valid object name, so it cannot be in any
+ # range
+ self.assertFalse('' in lower)
+ self.assertFalse('' in upper)
+ self.assertFalse('' in entire)
+
+ self.assertTrue('a' in lower)
+ self.assertTrue('h' in lower)
+ self.assertFalse('i' in lower)
+
+ self.assertFalse('h' in mid)
+ self.assertTrue('p' in mid)
+
+ self.assertFalse('p' in upper)
+ self.assertTrue('x' in upper)
+
+ self.assertIn(utils.Namespace.MAX, entire)
+ self.assertNotIn(utils.Namespace.MAX, lower)
+ self.assertIn(utils.Namespace.MAX, upper)
+
+ # lower bound is excluded so MIN cannot be in any range.
+ self.assertNotIn(utils.Namespace.MIN, entire)
+ self.assertNotIn(utils.Namespace.MIN, upper)
+ self.assertNotIn(utils.Namespace.MIN, lower)
+
+ def test_includes(self):
+ _to_h = utils.Namespace('a/-h', '', 'h')
+ d_to_t = utils.Namespace('a/d-t', 'd', 't')
+ d_to_k = utils.Namespace('a/d-k', 'd', 'k')
+ e_to_l = utils.Namespace('a/e-l', 'e', 'l')
+ k_to_t = utils.Namespace('a/k-t', 'k', 't')
+ p_to_ = utils.Namespace('a/p-', 'p', '')
+ t_to_ = utils.Namespace('a/t-', 't', '')
+ entire = utils.Namespace('a/all', '', '')
+
+ self.assertTrue(entire.includes(entire))
+ self.assertTrue(d_to_t.includes(d_to_t))
+ self.assertTrue(_to_h.includes(_to_h))
+ self.assertTrue(p_to_.includes(p_to_))
+
+ self.assertTrue(entire.includes(_to_h))
+ self.assertTrue(entire.includes(d_to_t))
+ self.assertTrue(entire.includes(p_to_))
+
+ self.assertTrue(d_to_t.includes(d_to_k))
+ self.assertTrue(d_to_t.includes(e_to_l))
+ self.assertTrue(d_to_t.includes(k_to_t))
+ self.assertTrue(p_to_.includes(t_to_))
+
+ self.assertFalse(_to_h.includes(d_to_t))
+ self.assertFalse(p_to_.includes(d_to_t))
+ self.assertFalse(k_to_t.includes(d_to_k))
+ self.assertFalse(d_to_k.includes(e_to_l))
+ self.assertFalse(k_to_t.includes(e_to_l))
+ self.assertFalse(t_to_.includes(p_to_))
+
+ self.assertFalse(_to_h.includes(entire))
+ self.assertFalse(p_to_.includes(entire))
+ self.assertFalse(d_to_t.includes(entire))
+
+ def test_expand(self):
+ bounds = (('', 'd'), ('d', 'k'), ('k', 't'), ('t', ''))
+ donors = [
+ utils.Namespace('a/c-%d' % i, b[0], b[1])
+ for i, b in enumerate(bounds)
+ ]
+ acceptor = utils.Namespace('a/c-acc', 'f', 's')
+ self.assertTrue(acceptor.expand(donors[:1]))
+ self.assertEqual((utils.Namespace.MIN, 's'),
+ (acceptor.lower, acceptor.upper))
+
+ acceptor = utils.Namespace('a/c-acc', 'f', 's')
+ self.assertTrue(acceptor.expand(donors[:2]))
+ self.assertEqual((utils.Namespace.MIN, 's'),
+ (acceptor.lower, acceptor.upper))
+
+ acceptor = utils.Namespace('a/c-acc', 'f', 's')
+ self.assertTrue(acceptor.expand(donors[1:3]))
+ self.assertEqual(('d', 't'),
+ (acceptor.lower, acceptor.upper))
+
+ acceptor = utils.Namespace('a/c-acc', 'f', 's')
+ self.assertTrue(acceptor.expand(donors))
+ self.assertEqual((utils.Namespace.MIN, utils.Namespace.MAX),
+ (acceptor.lower, acceptor.upper))
+
+ acceptor = utils.Namespace('a/c-acc', 'f', 's')
+ self.assertTrue(acceptor.expand(donors[1:2] + donors[3:]))
+ self.assertEqual(('d', utils.Namespace.MAX),
+ (acceptor.lower, acceptor.upper))
+
+ acceptor = utils.Namespace('a/c-acc', '', 'd')
+ self.assertFalse(acceptor.expand(donors[:1]))
+ self.assertEqual((utils.Namespace.MIN, 'd'),
+ (acceptor.lower, acceptor.upper))
+
+ acceptor = utils.Namespace('a/c-acc', 'b', 'v')
+ self.assertFalse(acceptor.expand(donors[1:3]))
+ self.assertEqual(('b', 'v'),
+ (acceptor.lower, acceptor.upper))
+
+ def test_total_ordering(self):
+ a_start_ns = utils.Namespace('a/-a', '', 'a')
+ a_atob_ns = utils.Namespace('a/a-b', 'a', 'b')
+ a_atof_ns = utils.Namespace('a/a-f', 'a', 'f')
+ a_ftol_ns = utils.Namespace('a/f-l', 'f', 'l')
+ a_ltor_ns = utils.Namespace('a/l-r', 'l', 'r')
+ a_rtoz_ns = utils.Namespace('a/r-z', 'r', 'z')
+ a_end_ns = utils.Namespace('a/z-', 'z', '')
+ b_start_ns = utils.Namespace('b/-a', '', 'a')
+ self.assertEqual(a_start_ns, b_start_ns)
+ self.assertNotEqual(a_start_ns, a_atob_ns)
+ self.assertLess(a_start_ns, a_atob_ns)
+ self.assertLess(a_atof_ns, a_ftol_ns)
+ self.assertLess(a_ftol_ns, a_ltor_ns)
+ self.assertLess(a_ltor_ns, a_rtoz_ns)
+ self.assertLess(a_rtoz_ns, a_end_ns)
+ self.assertLessEqual(a_start_ns, a_atof_ns)
+ self.assertLessEqual(a_atof_ns, a_rtoz_ns)
+ self.assertLessEqual(a_atof_ns, a_atof_ns)
+ self.assertGreater(a_end_ns, a_atof_ns)
+ self.assertGreater(a_rtoz_ns, a_ftol_ns)
+ self.assertGreater(a_end_ns, a_start_ns)
+ self.assertGreaterEqual(a_atof_ns, a_atof_ns)
+ self.assertGreaterEqual(a_end_ns, a_atof_ns)
+ self.assertGreaterEqual(a_rtoz_ns, a_start_ns)
+
+
+class TestNamespaceBoundList(unittest.TestCase):
+ def setUp(self):
+ start = ['', 'a/-a']
+ self.start_ns = utils.Namespace('a/-a', '', 'a')
+ atof = ['a', 'a/a-f']
+ self.atof_ns = utils.Namespace('a/a-f', 'a', 'f')
+ ftol = ['f', 'a/f-l']
+ self.ftol_ns = utils.Namespace('a/f-l', 'f', 'l')
+ ltor = ['l', 'a/l-r']
+ self.ltor_ns = utils.Namespace('a/l-r', 'l', 'r')
+ rtoz = ['r', 'a/r-z']
+ self.rtoz_ns = utils.Namespace('a/r-z', 'r', 'z')
+ end = ['z', 'a/z-']
+ self.end_ns = utils.Namespace('a/z-', 'z', '')
+ self.lowerbounds = [start, atof, ftol, ltor, rtoz, end]
+
+ def test_get_namespace(self):
+ namespace_list = utils.NamespaceBoundList(self.lowerbounds)
+ self.assertEqual(namespace_list.bounds, self.lowerbounds)
+ self.assertEqual(namespace_list.get_namespace('1'), self.start_ns)
+ self.assertEqual(namespace_list.get_namespace('a'), self.start_ns)
+ self.assertEqual(namespace_list.get_namespace('b'), self.atof_ns)
+ self.assertEqual(namespace_list.get_namespace('f'), self.atof_ns)
+ self.assertEqual(namespace_list.get_namespace('f\x00'), self.ftol_ns)
+ self.assertEqual(namespace_list.get_namespace('l'), self.ftol_ns)
+ self.assertEqual(namespace_list.get_namespace('x'), self.rtoz_ns)
+ self.assertEqual(namespace_list.get_namespace('r'), self.ltor_ns)
+ self.assertEqual(namespace_list.get_namespace('}'), self.end_ns)
+
+ def test_parse(self):
+ namespaces_list = utils.NamespaceBoundList.parse(None)
+ self.assertEqual(namespaces_list, None)
+ namespaces = [self.start_ns, self.atof_ns, self.ftol_ns,
+ self.ltor_ns, self.rtoz_ns, self.end_ns]
+ namespace_list = utils.NamespaceBoundList.parse(namespaces)
+ self.assertEqual(namespace_list.bounds, self.lowerbounds)
+ self.assertEqual(namespace_list.get_namespace('1'), self.start_ns)
+ self.assertEqual(namespace_list.get_namespace('l'), self.ftol_ns)
+ self.assertEqual(namespace_list.get_namespace('x'), self.rtoz_ns)
+ self.assertEqual(namespace_list.get_namespace('r'), self.ltor_ns)
+ self.assertEqual(namespace_list.get_namespace('}'), self.end_ns)
+ self.assertEqual(namespace_list.bounds, self.lowerbounds)
+ overlap_f_ns = utils.Namespace('a/-f', '', 'f')
+ overlapping_namespaces = [self.start_ns, self.atof_ns, overlap_f_ns,
+ self.ftol_ns, self.ltor_ns, self.rtoz_ns,
+ self.end_ns]
+ namespace_list = utils.NamespaceBoundList.parse(
+ overlapping_namespaces)
+ self.assertEqual(namespace_list.bounds, self.lowerbounds)
+ overlap_l_ns = utils.Namespace('a/a-l', 'a', 'l')
+ overlapping_namespaces = [self.start_ns, self.atof_ns, self.ftol_ns,
+ overlap_l_ns, self.ltor_ns, self.rtoz_ns,
+ self.end_ns]
+ namespace_list = utils.NamespaceBoundList.parse(
+ overlapping_namespaces)
+ self.assertEqual(namespace_list.bounds, self.lowerbounds)
+
+
class TestShardRange(unittest.TestCase):
def setUp(self):
self.ts_iter = make_timestamp_iter()
@@ -8230,7 +7750,7 @@ class TestShardRange(unittest.TestCase):
def test_min_max_bounds(self):
with self.assertRaises(TypeError):
- utils.ShardRangeOuterBound()
+ utils.NamespaceOuterBound()
# max
self.assertEqual(utils.ShardRange.MAX, utils.ShardRange.MAX)
@@ -8750,348 +8270,6 @@ class TestShardRange(unittest.TestCase):
self.assertEqual(now, sr.timestamp)
self.assertIs(True, sr.deleted)
- def test_lower_setter(self):
- sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', '')
- # sanity checks
- self.assertEqual('b', sr.lower_str)
- self.assertEqual(sr.MAX, sr.upper)
-
- def do_test(good_value, expected):
- sr.lower = good_value
- self.assertEqual(expected, sr.lower)
- self.assertEqual(sr.MAX, sr.upper)
-
- do_test(utils.ShardRange.MIN, utils.ShardRange.MIN)
- do_test(utils.ShardRange.MAX, utils.ShardRange.MAX)
- do_test(b'', utils.ShardRange.MIN)
- do_test(u'', utils.ShardRange.MIN)
- do_test(None, utils.ShardRange.MIN)
- do_test(b'a', 'a')
- do_test(b'y', 'y')
- do_test(u'a', 'a')
- do_test(u'y', 'y')
-
- expected = u'\N{SNOWMAN}'
- if six.PY2:
- expected = expected.encode('utf-8')
- with warnings.catch_warnings(record=True) as captured_warnings:
- do_test(u'\N{SNOWMAN}', expected)
- do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected)
- self.assertFalse(captured_warnings)
-
- sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
- sr.lower = ''
- self.assertEqual(sr.MIN, sr.lower)
-
- sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
- with self.assertRaises(ValueError) as cm:
- sr.lower = 'z'
- self.assertIn("must be less than or equal to upper", str(cm.exception))
- self.assertEqual('b', sr.lower_str)
- self.assertEqual('y', sr.upper_str)
-
- def do_test(bad_value):
- with self.assertRaises(TypeError) as cm:
- sr.lower = bad_value
- self.assertIn("lower must be a string", str(cm.exception))
- self.assertEqual('b', sr.lower_str)
- self.assertEqual('y', sr.upper_str)
-
- do_test(1)
- do_test(1.234)
-
- def test_upper_setter(self):
- sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', 'y')
- # sanity checks
- self.assertEqual(sr.MIN, sr.lower)
- self.assertEqual('y', sr.upper_str)
-
- def do_test(good_value, expected):
- sr.upper = good_value
- self.assertEqual(expected, sr.upper)
- self.assertEqual(sr.MIN, sr.lower)
-
- do_test(utils.ShardRange.MIN, utils.ShardRange.MIN)
- do_test(utils.ShardRange.MAX, utils.ShardRange.MAX)
- do_test(b'', utils.ShardRange.MAX)
- do_test(u'', utils.ShardRange.MAX)
- do_test(None, utils.ShardRange.MAX)
- do_test(b'z', 'z')
- do_test(b'b', 'b')
- do_test(u'z', 'z')
- do_test(u'b', 'b')
-
- expected = u'\N{SNOWMAN}'
- if six.PY2:
- expected = expected.encode('utf-8')
- with warnings.catch_warnings(record=True) as captured_warnings:
- do_test(u'\N{SNOWMAN}', expected)
- do_test(u'\N{SNOWMAN}'.encode('utf-8'), expected)
- self.assertFalse(captured_warnings)
-
- sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
- sr.upper = ''
- self.assertEqual(sr.MAX, sr.upper)
-
- sr = utils.ShardRange('a/c', utils.Timestamp.now(), 'b', 'y')
- with self.assertRaises(ValueError) as cm:
- sr.upper = 'a'
- self.assertIn(
- "must be greater than or equal to lower",
- str(cm.exception))
- self.assertEqual('b', sr.lower_str)
- self.assertEqual('y', sr.upper_str)
-
- def do_test(bad_value):
- with self.assertRaises(TypeError) as cm:
- sr.upper = bad_value
- self.assertIn("upper must be a string", str(cm.exception))
- self.assertEqual('b', sr.lower_str)
- self.assertEqual('y', sr.upper_str)
-
- do_test(1)
- do_test(1.234)
-
- def test_end_marker(self):
- sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', 'y')
- self.assertEqual('y\x00', sr.end_marker)
- sr = utils.ShardRange('a/c', utils.Timestamp.now(), '', '')
- self.assertEqual('', sr.end_marker)
-
- def test_bounds_serialization(self):
- sr = utils.ShardRange('a/c', utils.Timestamp.now())
- self.assertEqual('a/c', sr.name)
- self.assertEqual(utils.ShardRange.MIN, sr.lower)
- self.assertEqual('', sr.lower_str)
- self.assertEqual(utils.ShardRange.MAX, sr.upper)
- self.assertEqual('', sr.upper_str)
- self.assertEqual('', sr.end_marker)
-
- lower = u'\u00e4'
- upper = u'\u00fb'
- sr = utils.ShardRange('a/%s-%s' % (lower, upper),
- utils.Timestamp.now(), lower, upper)
- exp_lower = lower
- exp_upper = upper
- if six.PY2:
- exp_lower = exp_lower.encode('utf-8')
- exp_upper = exp_upper.encode('utf-8')
- self.assertEqual(exp_lower, sr.lower)
- self.assertEqual(exp_lower, sr.lower_str)
- self.assertEqual(exp_upper, sr.upper)
- self.assertEqual(exp_upper, sr.upper_str)
- self.assertEqual(exp_upper + '\x00', sr.end_marker)
-
- def test_entire_namespace(self):
- # test entire range (no boundaries)
- entire = utils.ShardRange('a/test', utils.Timestamp.now())
- self.assertEqual(utils.ShardRange.MAX, entire.upper)
- self.assertEqual(utils.ShardRange.MIN, entire.lower)
- self.assertIs(True, entire.entire_namespace())
-
- for x in range(100):
- self.assertTrue(str(x) in entire)
- self.assertTrue(chr(x) in entire)
-
- for x in ('a', 'z', 'zzzz', '124fsdf', u'\u00e4'):
- self.assertTrue(x in entire, '%r should be in %r' % (x, entire))
-
- entire.lower = 'a'
- self.assertIs(False, entire.entire_namespace())
-
- def test_comparisons(self):
- ts = utils.Timestamp.now().internal
-
- # upper (if provided) *must* be greater than lower
- with self.assertRaises(ValueError):
- utils.ShardRange('f-a', ts, 'f', 'a')
-
- # test basic boundaries
- btoc = utils.ShardRange('a/b-c', ts, 'b', 'c')
- atof = utils.ShardRange('a/a-f', ts, 'a', 'f')
- ftol = utils.ShardRange('a/f-l', ts, 'f', 'l')
- ltor = utils.ShardRange('a/l-r', ts, 'l', 'r')
- rtoz = utils.ShardRange('a/r-z', ts, 'r', 'z')
- lower = utils.ShardRange('a/lower', ts, '', 'mid')
- upper = utils.ShardRange('a/upper', ts, 'mid', '')
- entire = utils.ShardRange('a/test', utils.Timestamp.now())
-
- # overlapping ranges
- dtof = utils.ShardRange('a/d-f', ts, 'd', 'f')
- dtom = utils.ShardRange('a/d-m', ts, 'd', 'm')
-
- # test range > and <
- # non-adjacent
- self.assertFalse(rtoz < atof)
- self.assertTrue(atof < ltor)
- self.assertTrue(ltor > atof)
- self.assertFalse(ftol > rtoz)
-
- # adjacent
- self.assertFalse(rtoz < ltor)
- self.assertTrue(ltor < rtoz)
- self.assertFalse(ltor > rtoz)
- self.assertTrue(rtoz > ltor)
-
- # wholly within
- self.assertFalse(btoc < atof)
- self.assertFalse(btoc > atof)
- self.assertFalse(atof < btoc)
- self.assertFalse(atof > btoc)
-
- self.assertFalse(atof < dtof)
- self.assertFalse(dtof > atof)
- self.assertFalse(atof > dtof)
- self.assertFalse(dtof < atof)
-
- self.assertFalse(dtof < dtom)
- self.assertFalse(dtof > dtom)
- self.assertFalse(dtom > dtof)
- self.assertFalse(dtom < dtof)
-
- # overlaps
- self.assertFalse(atof < dtom)
- self.assertFalse(atof > dtom)
- self.assertFalse(ltor > dtom)
-
- # ranges including min/max bounds
- self.assertTrue(upper > lower)
- self.assertTrue(lower < upper)
- self.assertFalse(upper < lower)
- self.assertFalse(lower > upper)
-
- self.assertFalse(lower < entire)
- self.assertFalse(entire > lower)
- self.assertFalse(lower > entire)
- self.assertFalse(entire < lower)
-
- self.assertFalse(upper < entire)
- self.assertFalse(entire > upper)
- self.assertFalse(upper > entire)
- self.assertFalse(entire < upper)
-
- self.assertFalse(entire < entire)
- self.assertFalse(entire > entire)
-
- # test range < and > to an item
- # range is > lower and <= upper to lower boundary isn't
- # actually included
- self.assertTrue(ftol > 'f')
- self.assertFalse(atof < 'f')
- self.assertTrue(ltor < 'y')
-
- self.assertFalse(ftol < 'f')
- self.assertFalse(atof > 'f')
- self.assertFalse(ltor > 'y')
-
- self.assertTrue('f' < ftol)
- self.assertFalse('f' > atof)
- self.assertTrue('y' > ltor)
-
- self.assertFalse('f' > ftol)
- self.assertFalse('f' < atof)
- self.assertFalse('y' < ltor)
-
- # Now test ranges with only 1 boundary
- start_to_l = utils.ShardRange('a/None-l', ts, '', 'l')
- l_to_end = utils.ShardRange('a/l-None', ts, 'l', '')
-
- for x in ('l', 'm', 'z', 'zzz1231sd'):
- if x == 'l':
- self.assertFalse(x in l_to_end)
- self.assertFalse(start_to_l < x)
- self.assertFalse(x > start_to_l)
- else:
- self.assertTrue(x in l_to_end)
- self.assertTrue(start_to_l < x)
- self.assertTrue(x > start_to_l)
-
- # Now test some of the range to range checks with missing boundaries
- self.assertFalse(atof < start_to_l)
- self.assertFalse(start_to_l < entire)
-
- # Now test ShardRange.overlaps(other)
- self.assertTrue(atof.overlaps(atof))
- self.assertFalse(atof.overlaps(ftol))
- self.assertFalse(ftol.overlaps(atof))
- self.assertTrue(atof.overlaps(dtof))
- self.assertTrue(dtof.overlaps(atof))
- self.assertFalse(dtof.overlaps(ftol))
- self.assertTrue(dtom.overlaps(ftol))
- self.assertTrue(ftol.overlaps(dtom))
- self.assertFalse(start_to_l.overlaps(l_to_end))
-
- def test_contains(self):
- ts = utils.Timestamp.now().internal
- lower = utils.ShardRange('a/-h', ts, '', 'h')
- mid = utils.ShardRange('a/h-p', ts, 'h', 'p')
- upper = utils.ShardRange('a/p-', ts, 'p', '')
- entire = utils.ShardRange('a/all', ts, '', '')
-
- self.assertTrue('a' in entire)
- self.assertTrue('x' in entire)
-
- # the empty string is not a valid object name, so it cannot be in any
- # range
- self.assertFalse('' in lower)
- self.assertFalse('' in upper)
- self.assertFalse('' in entire)
-
- self.assertTrue('a' in lower)
- self.assertTrue('h' in lower)
- self.assertFalse('i' in lower)
-
- self.assertFalse('h' in mid)
- self.assertTrue('p' in mid)
-
- self.assertFalse('p' in upper)
- self.assertTrue('x' in upper)
-
- self.assertIn(utils.ShardRange.MAX, entire)
- self.assertNotIn(utils.ShardRange.MAX, lower)
- self.assertIn(utils.ShardRange.MAX, upper)
-
- # lower bound is excluded so MIN cannot be in any range.
- self.assertNotIn(utils.ShardRange.MIN, entire)
- self.assertNotIn(utils.ShardRange.MIN, upper)
- self.assertNotIn(utils.ShardRange.MIN, lower)
-
- def test_includes(self):
- ts = utils.Timestamp.now().internal
- _to_h = utils.ShardRange('a/-h', ts, '', 'h')
- d_to_t = utils.ShardRange('a/d-t', ts, 'd', 't')
- d_to_k = utils.ShardRange('a/d-k', ts, 'd', 'k')
- e_to_l = utils.ShardRange('a/e-l', ts, 'e', 'l')
- k_to_t = utils.ShardRange('a/k-t', ts, 'k', 't')
- p_to_ = utils.ShardRange('a/p-', ts, 'p', '')
- t_to_ = utils.ShardRange('a/t-', ts, 't', '')
- entire = utils.ShardRange('a/all', ts, '', '')
-
- self.assertTrue(entire.includes(entire))
- self.assertTrue(d_to_t.includes(d_to_t))
- self.assertTrue(_to_h.includes(_to_h))
- self.assertTrue(p_to_.includes(p_to_))
-
- self.assertTrue(entire.includes(_to_h))
- self.assertTrue(entire.includes(d_to_t))
- self.assertTrue(entire.includes(p_to_))
-
- self.assertTrue(d_to_t.includes(d_to_k))
- self.assertTrue(d_to_t.includes(e_to_l))
- self.assertTrue(d_to_t.includes(k_to_t))
- self.assertTrue(p_to_.includes(t_to_))
-
- self.assertFalse(_to_h.includes(d_to_t))
- self.assertFalse(p_to_.includes(d_to_t))
- self.assertFalse(k_to_t.includes(d_to_k))
- self.assertFalse(d_to_k.includes(e_to_l))
- self.assertFalse(k_to_t.includes(e_to_l))
- self.assertFalse(t_to_.includes(p_to_))
-
- self.assertFalse(_to_h.includes(entire))
- self.assertFalse(p_to_.includes(entire))
- self.assertFalse(d_to_t.includes(entire))
-
def test_repr(self):
ts = next(self.ts_iter)
ts.offset = 1234
@@ -9380,47 +8558,6 @@ class TestShardRange(unittest.TestCase):
self.assertEqual([a1_r1_gp1_p1, a1_r1],
a1_r1_gp1_p1_c1.find_ancestors(all_shard_ranges))
- def test_expand(self):
- bounds = (('', 'd'), ('d', 'k'), ('k', 't'), ('t', ''))
- donors = [
- utils.ShardRange('a/c-%d' % i, utils.Timestamp.now(), b[0], b[1])
- for i, b in enumerate(bounds)
- ]
- acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
- self.assertTrue(acceptor.expand(donors[:1]))
- self.assertEqual((utils.ShardRange.MIN, 's'),
- (acceptor.lower, acceptor.upper))
-
- acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
- self.assertTrue(acceptor.expand(donors[:2]))
- self.assertEqual((utils.ShardRange.MIN, 's'),
- (acceptor.lower, acceptor.upper))
-
- acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
- self.assertTrue(acceptor.expand(donors[1:3]))
- self.assertEqual(('d', 't'),
- (acceptor.lower, acceptor.upper))
-
- acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
- self.assertTrue(acceptor.expand(donors))
- self.assertEqual((utils.ShardRange.MIN, utils.ShardRange.MAX),
- (acceptor.lower, acceptor.upper))
-
- acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'f', 's')
- self.assertTrue(acceptor.expand(donors[1:2] + donors[3:]))
- self.assertEqual(('d', utils.ShardRange.MAX),
- (acceptor.lower, acceptor.upper))
-
- acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), '', 'd')
- self.assertFalse(acceptor.expand(donors[:1]))
- self.assertEqual((utils.ShardRange.MIN, 'd'),
- (acceptor.lower, acceptor.upper))
-
- acceptor = utils.ShardRange('a/c-acc', utils.Timestamp.now(), 'b', 'v')
- self.assertFalse(acceptor.expand(donors[1:3]))
- self.assertEqual(('b', 'v'),
- (acceptor.lower, acceptor.upper))
-
class TestShardRangeList(unittest.TestCase):
def setUp(self):
@@ -9601,422 +8738,6 @@ class TestShardRangeList(unittest.TestCase):
do_test([utils.ShardRange.ACTIVE]))
-@patch('ctypes.get_errno')
-@patch.object(utils, '_sys_posix_fallocate')
-@patch.object(utils, '_sys_fallocate')
-@patch.object(utils, 'FALLOCATE_RESERVE', 0)
-class TestFallocate(unittest.TestCase):
- def test_fallocate(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
-
- utils.fallocate(1234, 5000 * 2 ** 20)
-
- # We can't use sys_fallocate_mock.assert_called_once_with because no
- # two ctypes.c_uint64 objects are equal even if their values are
- # equal. Yes, ctypes.c_uint64(123) != ctypes.c_uint64(123).
- calls = sys_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 4)
- self.assertEqual(args[0], 1234)
- self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
- self.assertEqual(args[2].value, 0)
- self.assertEqual(args[3].value, 5000 * 2 ** 20)
-
- sys_posix_fallocate_mock.assert_not_called()
-
- def test_fallocate_offset(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
-
- utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
- calls = sys_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 4)
- self.assertEqual(args[0], 1234)
- self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
- self.assertEqual(args[2].value, 3 * 2 ** 30)
- self.assertEqual(args[3].value, 5000 * 2 ** 20)
-
- sys_posix_fallocate_mock.assert_not_called()
-
- def test_fallocate_fatal_error(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = -1
- get_errno_mock.return_value = errno.EIO
-
- with self.assertRaises(OSError) as cm:
- utils.fallocate(1234, 5000 * 2 ** 20)
- self.assertEqual(cm.exception.errno, errno.EIO)
-
- def test_fallocate_silent_errors(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = -1
-
- for silent_error in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL):
- get_errno_mock.return_value = silent_error
- try:
- utils.fallocate(1234, 5678)
- except OSError:
- self.fail("fallocate() raised an error on %d", silent_error)
-
- def test_posix_fallocate_fallback(self, sys_fallocate_mock,
- sys_posix_fallocate_mock,
- get_errno_mock):
- sys_fallocate_mock.available = False
- sys_fallocate_mock.side_effect = NotImplementedError
-
- sys_posix_fallocate_mock.available = True
- sys_posix_fallocate_mock.return_value = 0
-
- utils.fallocate(1234, 567890)
- sys_fallocate_mock.assert_not_called()
-
- calls = sys_posix_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 3)
- self.assertEqual(args[0], 1234)
- self.assertEqual(args[1].value, 0)
- self.assertEqual(args[2].value, 567890)
-
- def test_posix_fallocate_offset(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = False
- sys_fallocate_mock.side_effect = NotImplementedError
-
- sys_posix_fallocate_mock.available = True
- sys_posix_fallocate_mock.return_value = 0
-
- utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
- calls = sys_posix_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 3)
- self.assertEqual(args[0], 1234)
- self.assertEqual(args[1].value, 3 * 2 ** 30)
- self.assertEqual(args[2].value, 5000 * 2 ** 20)
-
- sys_fallocate_mock.assert_not_called()
-
- def test_no_fallocates_available(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = False
- sys_posix_fallocate_mock.available = False
-
- with mock.patch("logging.warning") as warning_mock, \
- mock.patch.object(utils, "_fallocate_warned_about_missing",
- False):
- utils.fallocate(321, 654)
- utils.fallocate(321, 654)
-
- sys_fallocate_mock.assert_not_called()
- sys_posix_fallocate_mock.assert_not_called()
- get_errno_mock.assert_not_called()
-
- self.assertEqual(len(warning_mock.mock_calls), 1)
-
- def test_arg_bounds(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
- with self.assertRaises(ValueError):
- utils.fallocate(0, 1 << 64, 0)
- with self.assertRaises(ValueError):
- utils.fallocate(0, 0, -1)
- with self.assertRaises(ValueError):
- utils.fallocate(0, 0, 1 << 64)
- self.assertEqual([], sys_fallocate_mock.mock_calls)
- # sanity check
- utils.fallocate(0, 0, 0)
- self.assertEqual(
- [mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
- sys_fallocate_mock.mock_calls)
- # Go confirm the ctypes values separately; apparently == doesn't
- # work the way you'd expect with ctypes :-/
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
- sys_fallocate_mock.reset_mock()
-
- # negative size will be adjusted as 0
- utils.fallocate(0, -1, 0)
- self.assertEqual(
- [mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
- sys_fallocate_mock.mock_calls)
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
-
-
-@patch.object(os, 'fstatvfs')
-@patch.object(utils, '_sys_fallocate', available=True, return_value=0)
-@patch.object(utils, 'FALLOCATE_RESERVE', 0)
-@patch.object(utils, 'FALLOCATE_IS_PERCENT', False)
-@patch.object(utils, '_fallocate_enabled', True)
-class TestFallocateReserve(unittest.TestCase):
- def _statvfs_result(self, f_frsize, f_bavail):
- # Only 3 values are relevant to us, so use zeros for the rest
- f_blocks = 100
- return posix.statvfs_result((0, f_frsize, f_blocks, 0, f_bavail,
- 0, 0, 0, 0, 0))
-
- def test_disabled(self, sys_fallocate_mock, fstatvfs_mock):
- utils.disable_fallocate()
- utils.fallocate(123, 456)
-
- sys_fallocate_mock.assert_not_called()
- fstatvfs_mock.assert_not_called()
-
- def test_zero_reserve(self, sys_fallocate_mock, fstatvfs_mock):
- utils.fallocate(123, 456)
-
- fstatvfs_mock.assert_not_called()
- self.assertEqual(len(sys_fallocate_mock.mock_calls), 1)
-
- def test_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1023 allocated, and have 2 blocks
- # of size 1024 free, so succeed
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
- utils.fallocate(88, 1023)
-
- def test_not_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1024 allocated, and have 2 blocks
- # of size 1024 free, so fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 1024)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
- def test_not_enough_space_large(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1GB allocated, and have 2 blocks
- # of size 1024 free, so fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 1 << 30)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail %g <= 1024'
- % (errno.ENOSPC, ((2 * 1024) - (1 << 30))))
- sys_fallocate_mock.assert_not_called()
-
- def test_enough_space_small_blocks(self, sys_fallocate_mock,
- fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1023 allocated, and have 4 blocks
- # of size 512 free, so succeed
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(512, 4)
- utils.fallocate(88, 1023)
-
- def test_not_enough_space_small_blocks(self, sys_fallocate_mock,
- fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1024 allocated, and have 4 blocks
- # of size 512 free, so fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(512, 4)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 1024)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
- def test_free_space_under_reserve(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 2048 bytes in reserve but have only 3 blocks of size 512, so
- # allocating even 0 bytes fails
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('2048')
-
- fstatvfs_mock.return_value = self._statvfs_result(512, 3)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 0)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 1536 <= 2048'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
- def test_all_reserved(self, sys_fallocate_mock, fstatvfs_mock):
- # Filesystem is empty, but our reserve is bigger than the
- # filesystem, so any allocation will fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('9999999999999')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
- self.assertRaises(OSError, utils.fallocate, 88, 0)
- sys_fallocate_mock.assert_not_called()
-
- def test_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
- # and file size is 2047, so succeed
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1%')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
- utils.fallocate(88, 2047)
-
- def test_not_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
- # and file size is 2048, so fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1%')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 2048)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 1 <= 1'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
- def test_all_space_reserved_pct(self, sys_fallocate_mock, fstatvfs_mock):
- # Filesystem is empty, but our reserve is the whole filesystem, so
- # any allocation will fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('100%')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 0)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 100 <= 100'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
-
-@patch('ctypes.get_errno')
-@patch.object(utils, '_sys_fallocate')
-class TestPunchHole(unittest.TestCase):
- def test_punch_hole(self, sys_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
-
- utils.punch_hole(123, 456, 789)
-
- calls = sys_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 4)
- self.assertEqual(args[0], 123)
- self.assertEqual(
- args[1], utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE)
- self.assertEqual(args[2].value, 456)
- self.assertEqual(args[3].value, 789)
-
- def test_error(self, sys_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = -1
- get_errno_mock.return_value = errno.EISDIR
-
- with self.assertRaises(OSError) as cm:
- utils.punch_hole(123, 456, 789)
- self.assertEqual(cm.exception.errno, errno.EISDIR)
-
- def test_arg_bounds(self, sys_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
-
- with self.assertRaises(ValueError):
- utils.punch_hole(0, 1, -1)
- with self.assertRaises(ValueError):
- utils.punch_hole(0, 1 << 64, 1)
- with self.assertRaises(ValueError):
- utils.punch_hole(0, -1, 1)
- with self.assertRaises(ValueError):
- utils.punch_hole(0, 1, 0)
- with self.assertRaises(ValueError):
- utils.punch_hole(0, 1, 1 << 64)
- self.assertEqual([], sys_fallocate_mock.mock_calls)
-
- # sanity check
- utils.punch_hole(0, 0, 1)
- self.assertEqual(
- [mock.call(
- 0, utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE,
- mock.ANY, mock.ANY)],
- sys_fallocate_mock.mock_calls)
- # Go confirm the ctypes values separately; apparently == doesn't
- # work the way you'd expect with ctypes :-/
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 1)
-
- def test_no_fallocate(self, sys_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = False
-
- with self.assertRaises(OSError) as cm:
- utils.punch_hole(123, 456, 789)
- self.assertEqual(cm.exception.errno, errno.ENOTSUP)
-
-
-class TestPunchHoleReally(unittest.TestCase):
- def setUp(self):
- if not utils._sys_fallocate.available:
- raise unittest.SkipTest("utils._sys_fallocate not available")
-
- def test_punch_a_hole(self):
- with TemporaryFile() as tf:
- tf.write(b"x" * 64 + b"y" * 64 + b"z" * 64)
- tf.flush()
-
- # knock out the first half of the "y"s
- utils.punch_hole(tf.fileno(), 64, 32)
-
- tf.seek(0)
- contents = tf.read(4096)
- self.assertEqual(
- contents,
- b"x" * 64 + b"\0" * 32 + b"y" * 32 + b"z" * 64)
-
-
-class Test_LibcWrapper(unittest.TestCase):
- def test_available_function(self):
- # This should pretty much always exist
- getpid_wrapper = utils._LibcWrapper('getpid')
- self.assertTrue(getpid_wrapper.available)
- self.assertEqual(getpid_wrapper(), os.getpid())
-
- def test_unavailable_function(self):
- # This won't exist
- no_func_wrapper = utils._LibcWrapper('diffractively_protectorship')
- self.assertFalse(no_func_wrapper.available)
- self.assertRaises(NotImplementedError, no_func_wrapper)
-
- def test_argument_plumbing(self):
- lseek_wrapper = utils._LibcWrapper('lseek')
- with TemporaryFile() as tf:
- tf.write(b"abcdefgh")
- tf.flush()
- lseek_wrapper(tf.fileno(),
- ctypes.c_uint64(3),
- # 0 is SEEK_SET
- 0)
- self.assertEqual(tf.read(100), b"defgh")
-
-
class TestWatchdog(unittest.TestCase):
def test_start_stop(self):
w = utils.Watchdog()
diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py
index d43f6730b..5cddc7164 100644
--- a/test/unit/common/test_wsgi.py
+++ b/test/unit/common/test_wsgi.py
@@ -1642,6 +1642,12 @@ class TestPipelineModification(unittest.TestCase):
self.assertIs(app.app.app, app._pipeline_final_app)
self.assertIs(app.app.app, app.app._pipeline_final_app)
self.assertIs(app.app.app, app.app.app._pipeline_final_app)
+ exp_pipeline = [app, app.app, app.app.app]
+ self.assertEqual(exp_pipeline, app._pipeline)
+ self.assertEqual(exp_pipeline, app.app._pipeline)
+ self.assertEqual(exp_pipeline, app.app.app._pipeline)
+ self.assertIs(app._pipeline, app.app._pipeline)
+ self.assertIs(app._pipeline, app.app.app._pipeline)
# make sure you can turn off the pipeline modification if you want
def blow_up(*_, **__):
diff --git a/test/unit/common/utils/__init__.py b/test/unit/common/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/test/unit/common/utils/__init__.py
diff --git a/test/unit/common/utils/test_libc.py b/test/unit/common/utils/test_libc.py
new file mode 100644
index 000000000..5357ce34d
--- /dev/null
+++ b/test/unit/common/utils/test_libc.py
@@ -0,0 +1,599 @@
+# Copyright (c) 2010-2023 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for swift.common.utils.libc"""
+
+import ctypes
+import errno
+import os
+import platform
+import posix
+import tempfile
+import unittest
+
+import mock
+
+from swift.common.utils import libc
+
+from test.debug_logger import debug_logger
+
+
+@mock.patch('ctypes.get_errno')
+@mock.patch.object(libc, '_sys_posix_fallocate')
+@mock.patch.object(libc, '_sys_fallocate')
+@mock.patch.object(libc, 'FALLOCATE_RESERVE', 0)
+class TestFallocate(unittest.TestCase):
+ def test_config_fallocate_value(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ fallocate_value, is_percent = libc.config_fallocate_value('10%')
+ self.assertEqual(fallocate_value, 10)
+ self.assertTrue(is_percent)
+ fallocate_value, is_percent = libc.config_fallocate_value('10')
+ self.assertEqual(fallocate_value, 10)
+ self.assertFalse(is_percent)
+ try:
+ fallocate_value, is_percent = libc.config_fallocate_value('ab%')
+ except ValueError as err:
+ exc = err
+ self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
+ 'fallocate_reserve.')
+ try:
+ fallocate_value, is_percent = libc.config_fallocate_value('ab')
+ except ValueError as err:
+ exc = err
+ self.assertEqual(str(exc), 'Error: ab is an invalid value for '
+ 'fallocate_reserve.')
+ try:
+ fallocate_value, is_percent = libc.config_fallocate_value('1%%')
+ except ValueError as err:
+ exc = err
+ self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
+ 'fallocate_reserve.')
+ try:
+ fallocate_value, is_percent = libc.config_fallocate_value('10.0')
+ except ValueError as err:
+ exc = err
+ self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
+ 'fallocate_reserve.')
+ fallocate_value, is_percent = libc.config_fallocate_value('10.5%')
+ self.assertEqual(fallocate_value, 10.5)
+ self.assertTrue(is_percent)
+ fallocate_value, is_percent = libc.config_fallocate_value('10.000%')
+ self.assertEqual(fallocate_value, 10.000)
+ self.assertTrue(is_percent)
+
+ def test_fallocate(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+
+ libc.fallocate(1234, 5000 * 2 ** 20)
+
+ # We can't use sys_fallocate_mock.assert_called_once_with because no
+ # two ctypes.c_uint64 objects are equal even if their values are
+ # equal. Yes, ctypes.c_uint64(123) != ctypes.c_uint64(123).
+ calls = sys_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 4)
+ self.assertEqual(args[0], 1234)
+ self.assertEqual(args[1], libc.FALLOC_FL_KEEP_SIZE)
+ self.assertEqual(args[2].value, 0)
+ self.assertEqual(args[3].value, 5000 * 2 ** 20)
+
+ sys_posix_fallocate_mock.assert_not_called()
+
+ def test_fallocate_offset(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+
+ libc.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
+ calls = sys_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 4)
+ self.assertEqual(args[0], 1234)
+ self.assertEqual(args[1], libc.FALLOC_FL_KEEP_SIZE)
+ self.assertEqual(args[2].value, 3 * 2 ** 30)
+ self.assertEqual(args[3].value, 5000 * 2 ** 20)
+
+ sys_posix_fallocate_mock.assert_not_called()
+
+ def test_fallocate_fatal_error(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = -1
+ get_errno_mock.return_value = errno.EIO
+
+ with self.assertRaises(OSError) as cm:
+ libc.fallocate(1234, 5000 * 2 ** 20)
+ self.assertEqual(cm.exception.errno, errno.EIO)
+
+ def test_fallocate_silent_errors(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = -1
+
+ for silent_error in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL):
+ get_errno_mock.return_value = silent_error
+ try:
+ libc.fallocate(1234, 5678)
+ except OSError:
+ self.fail("fallocate() raised an error on %d", silent_error)
+
+ def test_posix_fallocate_fallback(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock,
+ get_errno_mock):
+ sys_fallocate_mock.available = False
+ sys_fallocate_mock.side_effect = NotImplementedError
+
+ sys_posix_fallocate_mock.available = True
+ sys_posix_fallocate_mock.return_value = 0
+
+ libc.fallocate(1234, 567890)
+ sys_fallocate_mock.assert_not_called()
+
+ calls = sys_posix_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 3)
+ self.assertEqual(args[0], 1234)
+ self.assertEqual(args[1].value, 0)
+ self.assertEqual(args[2].value, 567890)
+
+ def test_posix_fallocate_offset(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = False
+ sys_fallocate_mock.side_effect = NotImplementedError
+
+ sys_posix_fallocate_mock.available = True
+ sys_posix_fallocate_mock.return_value = 0
+
+ libc.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
+ calls = sys_posix_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 3)
+ self.assertEqual(args[0], 1234)
+ self.assertEqual(args[1].value, 3 * 2 ** 30)
+ self.assertEqual(args[2].value, 5000 * 2 ** 20)
+
+ sys_fallocate_mock.assert_not_called()
+
+ def test_no_fallocates_available(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = False
+ sys_posix_fallocate_mock.available = False
+
+ with mock.patch("logging.warning") as warning_mock, \
+ mock.patch.object(libc, "_fallocate_warned_about_missing",
+ False):
+ libc.fallocate(321, 654)
+ libc.fallocate(321, 654)
+
+ sys_fallocate_mock.assert_not_called()
+ sys_posix_fallocate_mock.assert_not_called()
+ get_errno_mock.assert_not_called()
+
+ self.assertEqual(len(warning_mock.mock_calls), 1)
+
+ def test_arg_bounds(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+ with self.assertRaises(ValueError):
+ libc.fallocate(0, 1 << 64, 0)
+ with self.assertRaises(ValueError):
+ libc.fallocate(0, 0, -1)
+ with self.assertRaises(ValueError):
+ libc.fallocate(0, 0, 1 << 64)
+ self.assertEqual([], sys_fallocate_mock.mock_calls)
+ # sanity check
+ libc.fallocate(0, 0, 0)
+ self.assertEqual(
+ [mock.call(0, libc.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
+ sys_fallocate_mock.mock_calls)
+ # Go confirm the ctypes values separately; apparently == doesn't
+ # work the way you'd expect with ctypes :-/
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
+ sys_fallocate_mock.reset_mock()
+
+ # negative size will be adjusted as 0
+ libc.fallocate(0, -1, 0)
+ self.assertEqual(
+ [mock.call(0, libc.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
+ sys_fallocate_mock.mock_calls)
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
+
+
+@mock.patch.object(os, 'fstatvfs')
+@mock.patch.object(libc, '_sys_fallocate', available=True, return_value=0)
+@mock.patch.object(libc, 'FALLOCATE_RESERVE', 0)
+@mock.patch.object(libc, 'FALLOCATE_IS_PERCENT', False)
+@mock.patch.object(libc, '_fallocate_enabled', True)
+class TestFallocateReserve(unittest.TestCase):
+ def _statvfs_result(self, f_frsize, f_bavail):
+ # Only 3 values are relevant to us, so use zeros for the rest
+ f_blocks = 100
+ return posix.statvfs_result((0, f_frsize, f_blocks, 0, f_bavail,
+ 0, 0, 0, 0, 0))
+
+ def test_disabled(self, sys_fallocate_mock, fstatvfs_mock):
+ libc.disable_fallocate()
+ libc.fallocate(123, 456)
+
+ sys_fallocate_mock.assert_not_called()
+ fstatvfs_mock.assert_not_called()
+
+ def test_zero_reserve(self, sys_fallocate_mock, fstatvfs_mock):
+ libc.fallocate(123, 456)
+
+ fstatvfs_mock.assert_not_called()
+ self.assertEqual(len(sys_fallocate_mock.mock_calls), 1)
+
+ def test_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1023 allocated, and have 2 blocks
+ # of size 1024 free, so succeed
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
+ libc.fallocate(88, 1023)
+
+ def test_not_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1024 allocated, and have 2 blocks
+ # of size 1024 free, so fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 1024)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_not_enough_space_large(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1GB allocated, and have 2 blocks
+ # of size 1024 free, so fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 1 << 30)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail %g <= 1024'
+ % (errno.ENOSPC, ((2 * 1024) - (1 << 30))))
+ sys_fallocate_mock.assert_not_called()
+
+ def test_enough_space_small_blocks(self, sys_fallocate_mock,
+ fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1023 allocated, and have 4 blocks
+ # of size 512 free, so succeed
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(512, 4)
+ libc.fallocate(88, 1023)
+
+ def test_not_enough_space_small_blocks(self, sys_fallocate_mock,
+ fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1024 allocated, and have 4 blocks
+ # of size 512 free, so fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(512, 4)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 1024)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_free_space_under_reserve(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 2048 bytes in reserve but have only 3 blocks of size 512, so
+ # allocating even 0 bytes fails
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('2048')
+
+ fstatvfs_mock.return_value = self._statvfs_result(512, 3)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 0)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 1536 <= 2048'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_all_reserved(self, sys_fallocate_mock, fstatvfs_mock):
+ # Filesystem is empty, but our reserve is bigger than the
+ # filesystem, so any allocation will fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('9999999999999')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
+ self.assertRaises(OSError, libc.fallocate, 88, 0)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
+ # and file size is 2047, so succeed
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1%')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
+ libc.fallocate(88, 2047)
+
+ def test_not_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
+ # and file size is 2048, so fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1%')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 2048)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 1 <= 1'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_all_space_reserved_pct(self, sys_fallocate_mock, fstatvfs_mock):
+ # Filesystem is empty, but our reserve is the whole filesystem, so
+ # any allocation will fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('100%')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 0)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 100 <= 100'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+
+@mock.patch('ctypes.get_errno')
+@mock.patch.object(libc, '_sys_fallocate')
+class TestPunchHole(unittest.TestCase):
+ def test_punch_hole(self, sys_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+
+ libc.punch_hole(123, 456, 789)
+
+ calls = sys_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 4)
+ self.assertEqual(args[0], 123)
+ self.assertEqual(
+ args[1], libc.FALLOC_FL_PUNCH_HOLE | libc.FALLOC_FL_KEEP_SIZE)
+ self.assertEqual(args[2].value, 456)
+ self.assertEqual(args[3].value, 789)
+
+ def test_error(self, sys_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = -1
+ get_errno_mock.return_value = errno.EISDIR
+
+ with self.assertRaises(OSError) as cm:
+ libc.punch_hole(123, 456, 789)
+ self.assertEqual(cm.exception.errno, errno.EISDIR)
+
+ def test_arg_bounds(self, sys_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, 1, -1)
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, 1 << 64, 1)
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, -1, 1)
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, 1, 0)
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, 1, 1 << 64)
+ self.assertEqual([], sys_fallocate_mock.mock_calls)
+
+ # sanity check
+ libc.punch_hole(0, 0, 1)
+ self.assertEqual(
+ [mock.call(
+ 0, libc.FALLOC_FL_PUNCH_HOLE | libc.FALLOC_FL_KEEP_SIZE,
+ mock.ANY, mock.ANY)],
+ sys_fallocate_mock.mock_calls)
+ # Go confirm the ctypes values separately; apparently == doesn't
+ # work the way you'd expect with ctypes :-/
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 1)
+
+ def test_no_fallocate(self, sys_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = False
+
+ with self.assertRaises(OSError) as cm:
+ libc.punch_hole(123, 456, 789)
+ self.assertEqual(cm.exception.errno, errno.ENOTSUP)
+
+
+class TestPunchHoleReally(unittest.TestCase):
+ def setUp(self):
+ if not libc._sys_fallocate.available:
+ raise unittest.SkipTest("libc._sys_fallocate not available")
+
+ def test_punch_a_hole(self):
+ with tempfile.TemporaryFile() as tf:
+ tf.write(b"x" * 64 + b"y" * 64 + b"z" * 64)
+ tf.flush()
+
+ # knock out the first half of the "y"s
+ libc.punch_hole(tf.fileno(), 64, 32)
+
+ tf.seek(0)
+ contents = tf.read(4096)
+ self.assertEqual(
+ contents,
+ b"x" * 64 + b"\0" * 32 + b"y" * 32 + b"z" * 64)
+
+
+class Test_LibcWrapper(unittest.TestCase):
+ def test_available_function(self):
+ # This should pretty much always exist
+ getpid_wrapper = libc._LibcWrapper('getpid')
+ self.assertTrue(getpid_wrapper.available)
+ self.assertEqual(getpid_wrapper(), os.getpid())
+
+ def test_unavailable_function(self):
+ # This won't exist
+ no_func_wrapper = libc._LibcWrapper('diffractively_protectorship')
+ self.assertFalse(no_func_wrapper.available)
+ self.assertRaises(NotImplementedError, no_func_wrapper)
+
+ def test_argument_plumbing(self):
+ lseek_wrapper = libc._LibcWrapper('lseek')
+ with tempfile.TemporaryFile() as tf:
+ tf.write(b"abcdefgh")
+ tf.flush()
+ lseek_wrapper(tf.fileno(),
+ ctypes.c_uint64(3),
+ # 0 is SEEK_SET
+ 0)
+ self.assertEqual(tf.read(100), b"defgh")
+
+
+class TestModifyPriority(unittest.TestCase):
+ def test_modify_priority(self):
+ pid = os.getpid()
+ logger = debug_logger()
+ called = {}
+
+ def _fake_setpriority(*args):
+ called['setpriority'] = args
+
+ def _fake_syscall(*args):
+ called['syscall'] = args
+
+ # Test if current architecture supports changing of priority
+ try:
+ libc.NR_ioprio_set()
+ except OSError as e:
+ raise unittest.SkipTest(e)
+
+ with mock.patch('swift.common.utils.libc._libc_setpriority',
+ _fake_setpriority), \
+ mock.patch('swift.common.utils.libc._posix_syscall',
+ _fake_syscall):
+ called = {}
+ # not set / default
+ libc.modify_priority({}, logger)
+ self.assertEqual(called, {})
+ called = {}
+ # just nice
+ libc.modify_priority({'nice_priority': '1'}, logger)
+ self.assertEqual(called, {'setpriority': (0, pid, 1)})
+ called = {}
+ # just ionice class uses default priority 0
+ libc.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger)
+ architecture = os.uname()[4]
+ arch_bits = platform.architecture()[0]
+ if architecture == 'x86_64' and arch_bits == '64bit':
+ self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)})
+ elif architecture == 'aarch64' and arch_bits == '64bit':
+ self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)})
+ else:
+ self.fail("Unexpected call: %r" % called)
+ called = {}
+ # just ionice priority is ignored
+ libc.modify_priority({'ionice_priority': '4'}, logger)
+ self.assertEqual(called, {})
+ called = {}
+ # bad ionice class
+ libc.modify_priority({'ionice_class': 'class_foo'}, logger)
+ self.assertEqual(called, {})
+ called = {}
+ # ionice class & priority
+ libc.modify_priority({
+ 'ionice_class': 'IOPRIO_CLASS_BE',
+ 'ionice_priority': '4',
+ }, logger)
+ if architecture == 'x86_64' and arch_bits == '64bit':
+ self.assertEqual(called, {
+ 'syscall': (251, 1, pid, 2 << 13 | 4)
+ })
+ elif architecture == 'aarch64' and arch_bits == '64bit':
+ self.assertEqual(called, {
+ 'syscall': (30, 1, pid, 2 << 13 | 4)
+ })
+ else:
+ self.fail("Unexpected call: %r" % called)
+ called = {}
+ # all
+ libc.modify_priority({
+ 'nice_priority': '-15',
+ 'ionice_class': 'IOPRIO_CLASS_IDLE',
+ 'ionice_priority': '6',
+ }, logger)
+ if architecture == 'x86_64' and arch_bits == '64bit':
+ self.assertEqual(called, {
+ 'setpriority': (0, pid, -15),
+ 'syscall': (251, 1, pid, 3 << 13 | 6),
+ })
+ elif architecture == 'aarch64' and arch_bits == '64bit':
+ self.assertEqual(called, {
+ 'setpriority': (0, pid, -15),
+ 'syscall': (30, 1, pid, 3 << 13 | 6),
+ })
+ else:
+ self.fail("Unexpected call: %r" % called)
+
+ def test__NR_ioprio_set(self):
+ with mock.patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
+ mock.patch('platform.architecture',
+ return_value=('64bit', '')):
+ self.assertEqual(251, libc.NR_ioprio_set())
+
+ with mock.patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
+ mock.patch('platform.architecture',
+ return_value=('32bit', '')):
+ self.assertRaises(OSError, libc.NR_ioprio_set)
+
+ with mock.patch('os.uname',
+ return_value=('', '', '', '', 'aarch64')), \
+ mock.patch('platform.architecture',
+ return_value=('64bit', '')):
+ self.assertEqual(30, libc.NR_ioprio_set())
+
+ with mock.patch('os.uname',
+ return_value=('', '', '', '', 'aarch64')), \
+ mock.patch('platform.architecture',
+ return_value=('32bit', '')):
+ self.assertRaises(OSError, libc.NR_ioprio_set)
+
+ with mock.patch('os.uname', return_value=('', '', '', '', 'alpha')), \
+ mock.patch('platform.architecture',
+ return_value=('64bit', '')):
+ self.assertRaises(OSError, libc.NR_ioprio_set)
diff --git a/test/unit/common/utils/test_timestamp.py b/test/unit/common/utils/test_timestamp.py
new file mode 100644
index 000000000..23f2535e4
--- /dev/null
+++ b/test/unit/common/utils/test_timestamp.py
@@ -0,0 +1,882 @@
+# Copyright (c) 2010-2023 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for swift.common.utils.timestamp"""
+import random
+import time
+import unittest
+
+import mock
+
+from swift.common.utils import timestamp
+
+
+class TestTimestamp(unittest.TestCase):
+ """Tests for swift.common.utils.timestamp.Timestamp"""
+
+ def test_invalid_input(self):
+ with self.assertRaises(ValueError):
+ timestamp.Timestamp(time.time(), offset=-1)
+ with self.assertRaises(ValueError):
+ timestamp.Timestamp('123.456_78_90')
+
+ def test_invalid_string_conversion(self):
+ t = timestamp.Timestamp.now()
+ self.assertRaises(TypeError, str, t)
+
+ def test_offset_limit(self):
+ t = 1417462430.78693
+ # can't have a offset above MAX_OFFSET
+ with self.assertRaises(ValueError):
+ timestamp.Timestamp(t, offset=timestamp.MAX_OFFSET + 1)
+ # exactly max offset is fine
+ ts = timestamp.Timestamp(t, offset=timestamp.MAX_OFFSET)
+ self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
+ # but you can't offset it further
+ with self.assertRaises(ValueError):
+ timestamp.Timestamp(ts.internal, offset=1)
+ # unless you start below it
+ ts = timestamp.Timestamp(t, offset=timestamp.MAX_OFFSET - 1)
+ self.assertEqual(timestamp.Timestamp(ts.internal, offset=1),
+ '1417462430.78693_ffffffffffffffff')
+
+ def test_normal_format_no_offset(self):
+ expected = '1402436408.91203'
+ test_values = (
+ '1402436408.91203',
+ '1402436408.91203_00000000',
+ '1402436408.912030000',
+ '1402436408.912030000_0000000000000',
+ '000001402436408.912030000',
+ '000001402436408.912030000_0000000000',
+ 1402436408.91203,
+ 1402436408.912029,
+ 1402436408.9120300000000000,
+ 1402436408.91202999999999999,
+ timestamp.Timestamp(1402436408.91203),
+ timestamp.Timestamp(1402436408.91203, offset=0),
+ timestamp.Timestamp(1402436408.912029),
+ timestamp.Timestamp(1402436408.912029, offset=0),
+ timestamp.Timestamp('1402436408.91203'),
+ timestamp.Timestamp('1402436408.91203', offset=0),
+ timestamp.Timestamp('1402436408.91203_00000000'),
+ timestamp.Timestamp('1402436408.91203_00000000', offset=0),
+ )
+ for value in test_values:
+ ts = timestamp.Timestamp(value)
+ self.assertEqual(ts.normal, expected)
+ # timestamp instance can also compare to string or float
+ self.assertEqual(ts, expected)
+ self.assertEqual(ts, float(expected))
+ self.assertEqual(ts, timestamp.normalize_timestamp(expected))
+
+ def test_isoformat(self):
+ expected = '2014-06-10T22:47:32.054580'
+ test_values = (
+ '1402440452.05458',
+ '1402440452.054579',
+ '1402440452.05458_00000000',
+ '1402440452.054579_00000000',
+ '1402440452.054580000',
+ '1402440452.054579999',
+ '1402440452.054580000_0000000000000',
+ '1402440452.054579999_0000ff00',
+ '000001402440452.054580000',
+ '000001402440452.0545799',
+ '000001402440452.054580000_0000000000',
+ '000001402440452.054579999999_00000fffff',
+ 1402440452.05458,
+ 1402440452.054579,
+ 1402440452.0545800000000000,
+ 1402440452.054579999,
+ timestamp.Timestamp(1402440452.05458),
+ timestamp.Timestamp(1402440452.0545799),
+ timestamp.Timestamp(1402440452.05458, offset=0),
+ timestamp.Timestamp(1402440452.05457999999, offset=0),
+ timestamp.Timestamp(1402440452.05458, offset=100),
+ timestamp.Timestamp(1402440452.054579, offset=100),
+ timestamp.Timestamp('1402440452.05458'),
+ timestamp.Timestamp('1402440452.054579999'),
+ timestamp.Timestamp('1402440452.05458', offset=0),
+ timestamp.Timestamp('1402440452.054579', offset=0),
+ timestamp.Timestamp('1402440452.05458', offset=300),
+ timestamp.Timestamp('1402440452.05457999', offset=300),
+ timestamp.Timestamp('1402440452.05458_00000000'),
+ timestamp.Timestamp('1402440452.05457999_00000000'),
+ timestamp.Timestamp('1402440452.05458_00000000', offset=0),
+ timestamp.Timestamp('1402440452.05457999_00000aaa', offset=0),
+ timestamp.Timestamp('1402440452.05458_00000000', offset=400),
+ timestamp.Timestamp('1402440452.054579_0a', offset=400),
+ )
+ for value in test_values:
+ self.assertEqual(timestamp.Timestamp(value).isoformat, expected)
+ expected = '1970-01-01T00:00:00.000000'
+ test_values = (
+ '0',
+ '0000000000.00000',
+ '0000000000.00000_ffffffffffff',
+ 0,
+ 0.0,
+ )
+ for value in test_values:
+ self.assertEqual(timestamp.Timestamp(value).isoformat, expected)
+
+ def test_from_isoformat(self):
+ ts = timestamp.Timestamp.from_isoformat('2014-06-10T22:47:32.054580')
+ self.assertIsInstance(ts, timestamp.Timestamp)
+ self.assertEqual(1402440452.05458, float(ts))
+ self.assertEqual('2014-06-10T22:47:32.054580', ts.isoformat)
+
+ ts = timestamp.Timestamp.from_isoformat('1970-01-01T00:00:00.000000')
+ self.assertIsInstance(ts, timestamp.Timestamp)
+ self.assertEqual(0.0, float(ts))
+ self.assertEqual('1970-01-01T00:00:00.000000', ts.isoformat)
+
+ ts = timestamp.Timestamp(1402440452.05458)
+ self.assertIsInstance(ts, timestamp.Timestamp)
+ self.assertEqual(ts, timestamp.Timestamp.from_isoformat(ts.isoformat))
+
+ def test_ceil(self):
+ self.assertEqual(0.0, timestamp.Timestamp(0).ceil())
+ self.assertEqual(1.0, timestamp.Timestamp(0.00001).ceil())
+ self.assertEqual(1.0, timestamp.Timestamp(0.000001).ceil())
+ self.assertEqual(12345678.0, timestamp.Timestamp(12345678.0).ceil())
+ self.assertEqual(12345679.0,
+ timestamp.Timestamp(12345678.000001).ceil())
+
+ def test_not_equal(self):
+ ts = '1402436408.91203_0000000000000001'
+ test_values = (
+ timestamp.Timestamp('1402436408.91203_0000000000000002'),
+ timestamp.Timestamp('1402436408.91203'),
+ timestamp.Timestamp(1402436408.91203),
+ timestamp.Timestamp(1402436408.91204),
+ timestamp.Timestamp(1402436408.91203, offset=0),
+ timestamp.Timestamp(1402436408.91203, offset=2),
+ )
+ for value in test_values:
+ self.assertTrue(value != ts)
+
+ self.assertIs(True, timestamp.Timestamp(ts) == ts) # sanity
+ self.assertIs(False,
+ timestamp.Timestamp(ts) != timestamp.Timestamp(ts))
+ self.assertIs(False, timestamp.Timestamp(ts) != ts)
+ self.assertIs(False, timestamp.Timestamp(ts) is None)
+ self.assertIs(True, timestamp.Timestamp(ts) is not None)
+
+ def test_no_force_internal_no_offset(self):
+ """Test that internal is the same as normal with no offset"""
+ with mock.patch('swift.common.utils.timestamp.FORCE_INTERNAL',
+ new=False):
+ self.assertEqual(timestamp.Timestamp(0).internal,
+ '0000000000.00000')
+ self.assertEqual(timestamp.Timestamp(1402437380.58186).internal,
+ '1402437380.58186')
+ self.assertEqual(timestamp.Timestamp(1402437380.581859).internal,
+ '1402437380.58186')
+ self.assertEqual(timestamp.Timestamp(0).internal,
+ timestamp.normalize_timestamp(0))
+
+ def test_no_force_internal_with_offset(self):
+ """Test that internal always includes the offset if significant"""
+ with mock.patch('swift.common.utils.timestamp.FORCE_INTERNAL',
+ new=False):
+ self.assertEqual(timestamp.Timestamp(0, offset=1).internal,
+ '0000000000.00000_0000000000000001')
+ self.assertEqual(
+ timestamp.Timestamp(1402437380.58186, offset=16).internal,
+ '1402437380.58186_0000000000000010')
+ self.assertEqual(
+ timestamp.Timestamp(1402437380.581859, offset=240).internal,
+ '1402437380.58186_00000000000000f0')
+ self.assertEqual(
+ timestamp.Timestamp('1402437380.581859_00000001',
+ offset=240).internal,
+ '1402437380.58186_00000000000000f1')
+
+ def test_force_internal(self):
+ """Test that internal always includes the offset if forced"""
+ with mock.patch('swift.common.utils.timestamp.FORCE_INTERNAL',
+ new=True):
+ self.assertEqual(timestamp.Timestamp(0).internal,
+ '0000000000.00000_0000000000000000')
+ self.assertEqual(timestamp.Timestamp(1402437380.58186).internal,
+ '1402437380.58186_0000000000000000')
+ self.assertEqual(timestamp.Timestamp(1402437380.581859).internal,
+ '1402437380.58186_0000000000000000')
+ self.assertEqual(timestamp.Timestamp(0, offset=1).internal,
+ '0000000000.00000_0000000000000001')
+ self.assertEqual(
+ timestamp.Timestamp(1402437380.58186, offset=16).internal,
+ '1402437380.58186_0000000000000010')
+ self.assertEqual(
+ timestamp.Timestamp(1402437380.581859, offset=16).internal,
+ '1402437380.58186_0000000000000010')
+
+ def test_internal_format_no_offset(self):
+ expected = '1402436408.91203_0000000000000000'
+ test_values = (
+ '1402436408.91203',
+ '1402436408.91203_00000000',
+ '1402436408.912030000',
+ '1402436408.912030000_0000000000000',
+ '000001402436408.912030000',
+ '000001402436408.912030000_0000000000',
+ 1402436408.91203,
+ 1402436408.9120300000000000,
+ 1402436408.912029,
+ 1402436408.912029999999999999,
+ timestamp.Timestamp(1402436408.91203),
+ timestamp.Timestamp(1402436408.91203, offset=0),
+ timestamp.Timestamp(1402436408.912029),
+ timestamp.Timestamp(1402436408.91202999999999999, offset=0),
+ timestamp.Timestamp('1402436408.91203'),
+ timestamp.Timestamp('1402436408.91203', offset=0),
+ timestamp.Timestamp('1402436408.912029'),
+ timestamp.Timestamp('1402436408.912029', offset=0),
+ timestamp.Timestamp('1402436408.912029999999999'),
+ timestamp.Timestamp('1402436408.912029999999999', offset=0),
+ )
+ for value in test_values:
+ # timestamp instance is always equivalent
+ self.assertEqual(timestamp.Timestamp(value), expected)
+ if timestamp.FORCE_INTERNAL:
+ # the FORCE_INTERNAL flag makes the internal format always
+ # include the offset portion of the timestamp even when it's
+ # not significant and would be bad during upgrades
+ self.assertEqual(timestamp.Timestamp(value).internal, expected)
+ else:
+ # unless we FORCE_INTERNAL, when there's no offset the
+ # internal format is equivalent to the normalized format
+ self.assertEqual(timestamp.Timestamp(value).internal,
+ '1402436408.91203')
+
+ def test_internal_format_with_offset(self):
+ expected = '1402436408.91203_00000000000000f0'
+ test_values = (
+ '1402436408.91203_000000f0',
+ u'1402436408.91203_000000f0',
+ b'1402436408.91203_000000f0',
+ '1402436408.912030000_0000000000f0',
+ '1402436408.912029_000000f0',
+ '1402436408.91202999999_0000000000f0',
+ '000001402436408.912030000_000000000f0',
+ '000001402436408.9120299999_000000000f0',
+ timestamp.Timestamp(1402436408.91203, offset=240),
+ timestamp.Timestamp(1402436408.912029, offset=240),
+ timestamp.Timestamp('1402436408.91203', offset=240),
+ timestamp.Timestamp('1402436408.91203_00000000', offset=240),
+ timestamp.Timestamp('1402436408.91203_0000000f', offset=225),
+ timestamp.Timestamp('1402436408.9120299999', offset=240),
+ timestamp.Timestamp('1402436408.9120299999_00000000', offset=240),
+ timestamp.Timestamp('1402436408.9120299999_00000010', offset=224),
+ )
+ for value in test_values:
+ ts = timestamp.Timestamp(value)
+ self.assertEqual(ts.internal, expected)
+ # can compare with offset if the string is internalized
+ self.assertEqual(ts, expected)
+ # if comparison value only includes the normalized portion and the
+ # timestamp includes an offset, it is considered greater
+ normal = timestamp.Timestamp(expected).normal
+ self.assertTrue(ts > normal,
+ '%r is not bigger than %r given %r' % (
+ ts, normal, value))
+ self.assertTrue(ts > float(normal),
+ '%r is not bigger than %f given %r' % (
+ ts, float(normal), value))
+
+ def test_short_format_with_offset(self):
+ expected = '1402436408.91203_f0'
+ ts = timestamp.Timestamp(1402436408.91203, 0xf0)
+ self.assertEqual(expected, ts.short)
+
+ expected = '1402436408.91203'
+ ts = timestamp.Timestamp(1402436408.91203)
+ self.assertEqual(expected, ts.short)
+
+ def test_raw(self):
+ expected = 140243640891203
+ ts = timestamp.Timestamp(1402436408.91203)
+ self.assertEqual(expected, ts.raw)
+
+ # 'raw' does not include offset
+ ts = timestamp.Timestamp(1402436408.91203, 0xf0)
+ self.assertEqual(expected, ts.raw)
+
+ def test_delta(self):
+ def _assertWithinBounds(expected, timestamp):
+ tolerance = 0.00001
+ minimum = expected - tolerance
+ maximum = expected + tolerance
+ self.assertTrue(float(timestamp) > minimum)
+ self.assertTrue(float(timestamp) < maximum)
+
+ ts = timestamp.Timestamp(1402436408.91203, delta=100)
+ _assertWithinBounds(1402436408.91303, ts)
+ self.assertEqual(140243640891303, ts.raw)
+
+ ts = timestamp.Timestamp(1402436408.91203, delta=-100)
+ _assertWithinBounds(1402436408.91103, ts)
+ self.assertEqual(140243640891103, ts.raw)
+
+ ts = timestamp.Timestamp(1402436408.91203, delta=0)
+ _assertWithinBounds(1402436408.91203, ts)
+ self.assertEqual(140243640891203, ts.raw)
+
+ # delta is independent of offset
+ ts = timestamp.Timestamp(1402436408.91203, offset=42, delta=100)
+ self.assertEqual(140243640891303, ts.raw)
+ self.assertEqual(42, ts.offset)
+
+ # cannot go negative
+ self.assertRaises(ValueError, timestamp.Timestamp, 1402436408.91203,
+ delta=-140243640891203)
+
+ def test_int(self):
+ expected = 1402437965
+ test_values = (
+ '1402437965.91203',
+ '1402437965.91203_00000000',
+ '1402437965.912030000',
+ '1402437965.912030000_0000000000000',
+ '000001402437965.912030000',
+ '000001402437965.912030000_0000000000',
+ 1402437965.91203,
+ 1402437965.9120300000000000,
+ 1402437965.912029,
+ 1402437965.912029999999999999,
+ timestamp.Timestamp(1402437965.91203),
+ timestamp.Timestamp(1402437965.91203, offset=0),
+ timestamp.Timestamp(1402437965.91203, offset=500),
+ timestamp.Timestamp(1402437965.912029),
+ timestamp.Timestamp(1402437965.91202999999999999, offset=0),
+ timestamp.Timestamp(1402437965.91202999999999999, offset=300),
+ timestamp.Timestamp('1402437965.91203'),
+ timestamp.Timestamp('1402437965.91203', offset=0),
+ timestamp.Timestamp('1402437965.91203', offset=400),
+ timestamp.Timestamp('1402437965.912029'),
+ timestamp.Timestamp('1402437965.912029', offset=0),
+ timestamp.Timestamp('1402437965.912029', offset=200),
+ timestamp.Timestamp('1402437965.912029999999999'),
+ timestamp.Timestamp('1402437965.912029999999999', offset=0),
+ timestamp.Timestamp('1402437965.912029999999999', offset=100),
+ )
+ for value in test_values:
+ ts = timestamp.Timestamp(value)
+ self.assertEqual(int(ts), expected)
+ self.assertTrue(ts > expected)
+
+ def test_float(self):
+ expected = 1402438115.91203
+ test_values = (
+ '1402438115.91203',
+ '1402438115.91203_00000000',
+ '1402438115.912030000',
+ '1402438115.912030000_0000000000000',
+ '000001402438115.912030000',
+ '000001402438115.912030000_0000000000',
+ 1402438115.91203,
+ 1402438115.9120300000000000,
+ 1402438115.912029,
+ 1402438115.912029999999999999,
+ timestamp.Timestamp(1402438115.91203),
+ timestamp.Timestamp(1402438115.91203, offset=0),
+ timestamp.Timestamp(1402438115.91203, offset=500),
+ timestamp.Timestamp(1402438115.912029),
+ timestamp.Timestamp(1402438115.91202999999999999, offset=0),
+ timestamp.Timestamp(1402438115.91202999999999999, offset=300),
+ timestamp.Timestamp('1402438115.91203'),
+ timestamp.Timestamp('1402438115.91203', offset=0),
+ timestamp.Timestamp('1402438115.91203', offset=400),
+ timestamp.Timestamp('1402438115.912029'),
+ timestamp.Timestamp('1402438115.912029', offset=0),
+ timestamp.Timestamp('1402438115.912029', offset=200),
+ timestamp.Timestamp('1402438115.912029999999999'),
+ timestamp.Timestamp('1402438115.912029999999999', offset=0),
+ timestamp.Timestamp('1402438115.912029999999999', offset=100),
+ )
+ tolerance = 0.00001
+ minimum = expected - tolerance
+ maximum = expected + tolerance
+ for value in test_values:
+ ts = timestamp.Timestamp(value)
+ self.assertTrue(float(ts) > minimum,
+ '%f is not bigger than %f given %r' % (
+ ts, minimum, value))
+ self.assertTrue(float(ts) < maximum,
+ '%f is not smaller than %f given %r' % (
+ ts, maximum, value))
+ # direct comparison of timestamp works too
+ self.assertTrue(ts > minimum,
+ '%s is not bigger than %f given %r' % (
+ ts.normal, minimum, value))
+ self.assertTrue(ts < maximum,
+ '%s is not smaller than %f given %r' % (
+ ts.normal, maximum, value))
+ # ... even against strings
+ self.assertTrue(ts > '%f' % minimum,
+ '%s is not bigger than %s given %r' % (
+ ts.normal, minimum, value))
+ self.assertTrue(ts < '%f' % maximum,
+ '%s is not smaller than %s given %r' % (
+ ts.normal, maximum, value))
+
+ def test_false(self):
+ self.assertFalse(timestamp.Timestamp(0))
+ self.assertFalse(timestamp.Timestamp(0, offset=0))
+ self.assertFalse(timestamp.Timestamp('0'))
+ self.assertFalse(timestamp.Timestamp('0', offset=0))
+ self.assertFalse(timestamp.Timestamp(0.0))
+ self.assertFalse(timestamp.Timestamp(0.0, offset=0))
+ self.assertFalse(timestamp.Timestamp('0.0'))
+ self.assertFalse(timestamp.Timestamp('0.0', offset=0))
+ self.assertFalse(timestamp.Timestamp(00000000.00000000))
+ self.assertFalse(timestamp.Timestamp(00000000.00000000, offset=0))
+ self.assertFalse(timestamp.Timestamp('00000000.00000000'))
+ self.assertFalse(timestamp.Timestamp('00000000.00000000', offset=0))
+
+ def test_true(self):
+ self.assertTrue(timestamp.Timestamp(1))
+ self.assertTrue(timestamp.Timestamp(1, offset=1))
+ self.assertTrue(timestamp.Timestamp(0, offset=1))
+ self.assertTrue(timestamp.Timestamp('1'))
+ self.assertTrue(timestamp.Timestamp('1', offset=1))
+ self.assertTrue(timestamp.Timestamp('0', offset=1))
+ self.assertTrue(timestamp.Timestamp(1.1))
+ self.assertTrue(timestamp.Timestamp(1.1, offset=1))
+ self.assertTrue(timestamp.Timestamp(0.0, offset=1))
+ self.assertTrue(timestamp.Timestamp('1.1'))
+ self.assertTrue(timestamp.Timestamp('1.1', offset=1))
+ self.assertTrue(timestamp.Timestamp('0.0', offset=1))
+ self.assertTrue(timestamp.Timestamp(11111111.11111111))
+ self.assertTrue(timestamp.Timestamp(11111111.11111111, offset=1))
+ self.assertTrue(timestamp.Timestamp(00000000.00000000, offset=1))
+ self.assertTrue(timestamp.Timestamp('11111111.11111111'))
+ self.assertTrue(timestamp.Timestamp('11111111.11111111', offset=1))
+ self.assertTrue(timestamp.Timestamp('00000000.00000000', offset=1))
+
+ def test_greater_no_offset(self):
+ now = time.time()
+ older = now - 1
+ ts = timestamp.Timestamp(now)
+ test_values = (
+ 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
+ 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
+ 1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
+ older, '%f' % older, '%f_0000ffff' % older,
+ )
+ for value in test_values:
+ other = timestamp.Timestamp(value)
+ self.assertNotEqual(ts, other) # sanity
+ self.assertTrue(ts > value,
+ '%r is not greater than %r given %r' % (
+ ts, value, value))
+ self.assertTrue(ts > other,
+ '%r is not greater than %r given %r' % (
+ ts, other, value))
+ self.assertTrue(ts > other.normal,
+ '%r is not greater than %r given %r' % (
+ ts, other.normal, value))
+ self.assertTrue(ts > other.internal,
+ '%r is not greater than %r given %r' % (
+ ts, other.internal, value))
+ self.assertTrue(ts > float(other),
+ '%r is not greater than %r given %r' % (
+ ts, float(other), value))
+ self.assertTrue(ts > int(other),
+ '%r is not greater than %r given %r' % (
+ ts, int(other), value))
+
+ def _test_greater_with_offset(self, now, test_values):
+ for offset in range(1, 1000, 100):
+ ts = timestamp.Timestamp(now, offset=offset)
+ for value in test_values:
+ other = timestamp.Timestamp(value)
+ self.assertNotEqual(ts, other) # sanity
+ self.assertTrue(ts > value,
+ '%r is not greater than %r given %r' % (
+ ts, value, value))
+ self.assertTrue(ts > other,
+ '%r is not greater than %r given %r' % (
+ ts, other, value))
+ self.assertTrue(ts > other.normal,
+ '%r is not greater than %r given %r' % (
+ ts, other.normal, value))
+ self.assertTrue(ts > other.internal,
+ '%r is not greater than %r given %r' % (
+ ts, other.internal, value))
+ self.assertTrue(ts > float(other),
+ '%r is not greater than %r given %r' % (
+ ts, float(other), value))
+ self.assertTrue(ts > int(other),
+ '%r is not greater than %r given %r' % (
+ ts, int(other), value))
+
+ def test_greater_with_offset(self):
+ # Part 1: use the natural time of the Python. This is deliciously
+ # unpredictable, but completely legitimate and realistic. Finds bugs!
+ now = time.time()
+ older = now - 1
+ test_values = (
+ 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
+ 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
+ 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
+ older, now,
+ )
+ self._test_greater_with_offset(now, test_values)
+ # Part 2: Same as above, but with fixed time values that reproduce
+ # specific corner cases.
+ now = 1519830570.6949348
+ older = now - 1
+ test_values = (
+ 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
+ 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
+ 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
+ older, now,
+ )
+ self._test_greater_with_offset(now, test_values)
+ # Part 3: The '%f' problem. Timestamps cannot be converted to %f
+ # strings, then back to timestamps, then compared with originals.
+ # You can only "import" a floating point representation once.
+ now = 1519830570.6949348
+ now = float('%f' % now)
+ older = now - 1
+ test_values = (
+ 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
+ 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
+ older, '%f' % older, '%f_0000ffff' % older,
+ now, '%f' % now, '%s_00000000' % now,
+ )
+ self._test_greater_with_offset(now, test_values)
+
+ def test_smaller_no_offset(self):
+ now = time.time()
+ newer = now + 1
+ ts = timestamp.Timestamp(now)
+ test_values = (
+ 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
+ newer, '%f' % newer, '%f_0000ffff' % newer,
+ )
+ for value in test_values:
+ other = timestamp.Timestamp(value)
+ self.assertNotEqual(ts, other) # sanity
+ self.assertTrue(ts < value,
+ '%r is not smaller than %r given %r' % (
+ ts, value, value))
+ self.assertTrue(ts < other,
+ '%r is not smaller than %r given %r' % (
+ ts, other, value))
+ self.assertTrue(ts < other.normal,
+ '%r is not smaller than %r given %r' % (
+ ts, other.normal, value))
+ self.assertTrue(ts < other.internal,
+ '%r is not smaller than %r given %r' % (
+ ts, other.internal, value))
+ self.assertTrue(ts < float(other),
+ '%r is not smaller than %r given %r' % (
+ ts, float(other), value))
+ self.assertTrue(ts < int(other),
+ '%r is not smaller than %r given %r' % (
+ ts, int(other), value))
+
+ def test_smaller_with_offset(self):
+ now = time.time()
+ newer = now + 1
+ test_values = (
+ 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
+ newer, '%f' % newer, '%f_0000ffff' % newer,
+ )
+ for offset in range(1, 1000, 100):
+ ts = timestamp.Timestamp(now, offset=offset)
+ for value in test_values:
+ other = timestamp.Timestamp(value)
+ self.assertNotEqual(ts, other) # sanity
+ self.assertTrue(ts < value,
+ '%r is not smaller than %r given %r' % (
+ ts, value, value))
+ self.assertTrue(ts < other,
+ '%r is not smaller than %r given %r' % (
+ ts, other, value))
+ self.assertTrue(ts < other.normal,
+ '%r is not smaller than %r given %r' % (
+ ts, other.normal, value))
+ self.assertTrue(ts < other.internal,
+ '%r is not smaller than %r given %r' % (
+ ts, other.internal, value))
+ self.assertTrue(ts < float(other),
+ '%r is not smaller than %r given %r' % (
+ ts, float(other), value))
+ self.assertTrue(ts < int(other),
+ '%r is not smaller than %r given %r' % (
+ ts, int(other), value))
+
+ def test_cmp_with_none(self):
+ self.assertGreater(timestamp.Timestamp(0), None)
+ self.assertGreater(timestamp.Timestamp(1.0), None)
+ self.assertGreater(timestamp.Timestamp(1.0, 42), None)
+
+ def test_ordering(self):
+ given = [
+ '1402444820.62590_000000000000000a',
+ '1402444820.62589_0000000000000001',
+ '1402444821.52589_0000000000000004',
+ '1402444920.62589_0000000000000004',
+ '1402444821.62589_000000000000000a',
+ '1402444821.72589_000000000000000a',
+ '1402444920.62589_0000000000000002',
+ '1402444820.62589_0000000000000002',
+ '1402444820.62589_000000000000000a',
+ '1402444820.62590_0000000000000004',
+ '1402444920.62589_000000000000000a',
+ '1402444820.62590_0000000000000002',
+ '1402444821.52589_0000000000000002',
+ '1402444821.52589_0000000000000000',
+ '1402444920.62589',
+ '1402444821.62589_0000000000000004',
+ '1402444821.72589_0000000000000001',
+ '1402444820.62590',
+ '1402444820.62590_0000000000000001',
+ '1402444820.62589_0000000000000004',
+ '1402444821.72589_0000000000000000',
+ '1402444821.52589_000000000000000a',
+ '1402444821.72589_0000000000000004',
+ '1402444821.62589',
+ '1402444821.52589_0000000000000001',
+ '1402444821.62589_0000000000000001',
+ '1402444821.62589_0000000000000002',
+ '1402444821.72589_0000000000000002',
+ '1402444820.62589',
+ '1402444920.62589_0000000000000001']
+ expected = [
+ '1402444820.62589',
+ '1402444820.62589_0000000000000001',
+ '1402444820.62589_0000000000000002',
+ '1402444820.62589_0000000000000004',
+ '1402444820.62589_000000000000000a',
+ '1402444820.62590',
+ '1402444820.62590_0000000000000001',
+ '1402444820.62590_0000000000000002',
+ '1402444820.62590_0000000000000004',
+ '1402444820.62590_000000000000000a',
+ '1402444821.52589',
+ '1402444821.52589_0000000000000001',
+ '1402444821.52589_0000000000000002',
+ '1402444821.52589_0000000000000004',
+ '1402444821.52589_000000000000000a',
+ '1402444821.62589',
+ '1402444821.62589_0000000000000001',
+ '1402444821.62589_0000000000000002',
+ '1402444821.62589_0000000000000004',
+ '1402444821.62589_000000000000000a',
+ '1402444821.72589',
+ '1402444821.72589_0000000000000001',
+ '1402444821.72589_0000000000000002',
+ '1402444821.72589_0000000000000004',
+ '1402444821.72589_000000000000000a',
+ '1402444920.62589',
+ '1402444920.62589_0000000000000001',
+ '1402444920.62589_0000000000000002',
+ '1402444920.62589_0000000000000004',
+ '1402444920.62589_000000000000000a',
+ ]
+ # less visual version
+ """
+ now = time.time()
+ given = [
+ timestamp.Timestamp(now + i, offset=offset).internal
+ for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
+ for offset in (0, 1, 2, 4, 10)
+ ]
+ expected = [t for t in given]
+ random.shuffle(given)
+ """
+ self.assertEqual(len(given), len(expected)) # sanity
+ timestamps = [timestamp.Timestamp(t) for t in given]
+ # our expected values don't include insignificant offsets
+ with mock.patch('swift.common.utils.timestamp.FORCE_INTERNAL',
+ new=False):
+ self.assertEqual(
+ [t.internal for t in sorted(timestamps)], expected)
+ # string sorting works as well
+ self.assertEqual(
+ sorted([t.internal for t in timestamps]), expected)
+
+ def test_hashable(self):
+ ts_0 = timestamp.Timestamp('1402444821.72589')
+ ts_0_also = timestamp.Timestamp('1402444821.72589')
+ self.assertEqual(ts_0, ts_0_also) # sanity
+ self.assertEqual(hash(ts_0), hash(ts_0_also))
+ d = {ts_0: 'whatever'}
+ self.assertIn(ts_0, d) # sanity
+ self.assertIn(ts_0_also, d)
+
+ def test_out_of_range_comparisons(self):
+ now = timestamp.Timestamp.now()
+
+ def check_is_later(val):
+ self.assertTrue(now != val)
+ self.assertFalse(now == val)
+ self.assertTrue(now <= val)
+ self.assertTrue(now < val)
+ self.assertTrue(val > now)
+ self.assertTrue(val >= now)
+
+ check_is_later(1e30)
+ check_is_later(1579753284000) # someone gave us ms instead of s!
+ check_is_later('1579753284000')
+ check_is_later(b'1e15')
+ check_is_later(u'1.e+10_f')
+
+ def check_is_earlier(val):
+ self.assertTrue(now != val)
+ self.assertFalse(now == val)
+ self.assertTrue(now >= val)
+ self.assertTrue(now > val)
+ self.assertTrue(val < now)
+ self.assertTrue(val <= now)
+
+ check_is_earlier(-1)
+ check_is_earlier(-0.1)
+ check_is_earlier('-9999999')
+ check_is_earlier(b'-9999.999')
+ check_is_earlier(u'-1234_5678')
+
+ def test_inversion(self):
+ ts = timestamp.Timestamp(0)
+ self.assertIsInstance(~ts, timestamp.Timestamp)
+ self.assertEqual((~ts).internal, '9999999999.99999')
+
+ ts = timestamp.Timestamp(123456.789)
+ self.assertIsInstance(~ts, timestamp.Timestamp)
+ self.assertEqual(ts.internal, '0000123456.78900')
+ self.assertEqual((~ts).internal, '9999876543.21099')
+
+ timestamps = sorted(timestamp.Timestamp(random.random() * 1e10)
+ for _ in range(20))
+ self.assertEqual([x.internal for x in timestamps],
+ sorted(x.internal for x in timestamps))
+ self.assertEqual([(~x).internal for x in reversed(timestamps)],
+ sorted((~x).internal for x in timestamps))
+
+ ts = timestamp.Timestamp.now()
+ self.assertGreater(~ts, ts) # NB: will break around 2128
+
+ ts = timestamp.Timestamp.now(offset=1)
+ with self.assertRaises(ValueError) as caught:
+ ~ts
+ self.assertEqual(caught.exception.args[0],
+ 'Cannot invert timestamps with offsets')
+
+
+class TestTimestampEncoding(unittest.TestCase):
+
+ def setUp(self):
+ t0 = timestamp.Timestamp(0.0)
+ t1 = timestamp.Timestamp(997.9996)
+ t2 = timestamp.Timestamp(999)
+ t3 = timestamp.Timestamp(1000, 24)
+ t4 = timestamp.Timestamp(1001)
+ t5 = timestamp.Timestamp(1002.00040)
+
+ # encodings that are expected when explicit = False
+ self.non_explicit_encodings = (
+ ('0000001000.00000_18', (t3, t3, t3)),
+ ('0000001000.00000_18', (t3, t3, None)),
+ )
+
+ # mappings that are expected when explicit = True
+ self.explicit_encodings = (
+ ('0000001000.00000_18+0+0', (t3, t3, t3)),
+ ('0000001000.00000_18+0', (t3, t3, None)),
+ )
+
+ # mappings that are expected when explicit = True or False
+ self.encodings = (
+ ('0000001000.00000_18+0+186a0', (t3, t3, t4)),
+ ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
+ ('0000001000.00000_18-186a0+0', (t3, t2, t2)),
+ ('0000001000.00000_18+0-186a0', (t3, t3, t2)),
+ ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
+ ('0000001000.00000_18', (t3, None, None)),
+ ('0000001000.00000_18+186a0', (t3, t4, None)),
+ ('0000001000.00000_18-186a0', (t3, t2, None)),
+ ('0000001000.00000_18', (t3, None, t1)),
+ ('0000001000.00000_18-5f5e100', (t3, t0, None)),
+ ('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
+ ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
+ )
+
+ # decodings that are expected when explicit = False
+ self.non_explicit_decodings = (
+ ('0000001000.00000_18', (t3, t3, t3)),
+ ('0000001000.00000_18+186a0', (t3, t4, t4)),
+ ('0000001000.00000_18-186a0', (t3, t2, t2)),
+ ('0000001000.00000_18+186a0', (t3, t4, t4)),
+ ('0000001000.00000_18-186a0', (t3, t2, t2)),
+ ('0000001000.00000_18-5f5e100', (t3, t0, t0)),
+ )
+
+ # decodings that are expected when explicit = True
+ self.explicit_decodings = (
+ ('0000001000.00000_18+0+0', (t3, t3, t3)),
+ ('0000001000.00000_18+0', (t3, t3, None)),
+ ('0000001000.00000_18', (t3, None, None)),
+ ('0000001000.00000_18+186a0', (t3, t4, None)),
+ ('0000001000.00000_18-186a0', (t3, t2, None)),
+ ('0000001000.00000_18-5f5e100', (t3, t0, None)),
+ )
+
+ # decodings that are expected when explicit = True or False
+ self.decodings = (
+ ('0000001000.00000_18+0+186a0', (t3, t3, t4)),
+ ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
+ ('0000001000.00000_18-186a0+0', (t3, t2, t2)),
+ ('0000001000.00000_18+0-186a0', (t3, t3, t2)),
+ ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
+ ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
+ )
+
+ def _assertEqual(self, expected, actual, test):
+ self.assertEqual(expected, actual,
+ 'Got %s but expected %s for parameters %s'
+ % (actual, expected, test))
+
+ def test_encoding(self):
+ for test in self.explicit_encodings:
+ actual = timestamp.encode_timestamps(test[1][0], test[1][1],
+ test[1][2], True)
+ self._assertEqual(test[0], actual, test[1])
+ for test in self.non_explicit_encodings:
+ actual = timestamp.encode_timestamps(test[1][0], test[1][1],
+ test[1][2], False)
+ self._assertEqual(test[0], actual, test[1])
+ for explicit in (True, False):
+ for test in self.encodings:
+ actual = timestamp.encode_timestamps(test[1][0], test[1][1],
+ test[1][2], explicit)
+ self._assertEqual(test[0], actual, test[1])
+
+ def test_decoding(self):
+ for test in self.explicit_decodings:
+ actual = timestamp.decode_timestamps(test[0], True)
+ self._assertEqual(test[1], actual, test[0])
+ for test in self.non_explicit_decodings:
+ actual = timestamp.decode_timestamps(test[0], False)
+ self._assertEqual(test[1], actual, test[0])
+ for explicit in (True, False):
+ for test in self.decodings:
+ actual = timestamp.decode_timestamps(test[0], explicit)
+ self._assertEqual(test[1], actual, test[0])
diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py
index aca6d38b6..c09a8d997 100644
--- a/test/unit/container/test_server.py
+++ b/test/unit/container/test_server.py
@@ -435,6 +435,69 @@ class TestContainerController(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
+ def test_PUT_HEAD_put_timestamp_updates(self):
+ put_ts = Timestamp(1)
+ req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'X-Timestamp': put_ts.internal})
+ resp = req.get_response(self.controller)
+ self.assertEqual(resp.status_int, 201)
+
+ def do_put_head(put_ts, meta_value, extra_hdrs, body='', path='a/c'):
+ # Set metadata header
+ req = Request.blank('/sda1/p/' + path,
+ environ={'REQUEST_METHOD': 'PUT'},
+ headers={'X-Timestamp': put_ts.internal,
+ 'X-Container-Meta-Test': meta_value},
+ body=body)
+ req.headers.update(extra_hdrs)
+ resp = req.get_response(self.controller)
+ self.assertTrue(resp.is_success)
+ req = Request.blank('/sda1/p/a/c',
+ environ={'REQUEST_METHOD': 'HEAD'})
+ resp = req.get_response(self.controller)
+ self.assertEqual(resp.status_int, 204)
+ return resp.headers
+
+ # put timestamp is advanced on PUT with container path
+ put_ts = Timestamp(2)
+ resp_hdrs = do_put_head(put_ts, 'val1',
+ {'x-backend-no-timestamp-update': 'false'})
+ self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val1')
+ self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
+ put_ts.internal)
+ self.assertEqual(resp_hdrs.get('x-put-timestamp'), put_ts.internal)
+
+ put_ts = Timestamp(3)
+ resp_hdrs = do_put_head(put_ts, 'val2',
+ {'x-backend-no-timestamp-update': 'true'})
+ self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val2')
+ self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
+ put_ts.internal)
+ self.assertEqual(resp_hdrs.get('x-put-timestamp'), put_ts.internal)
+
+ # put timestamp is NOT updated if record type is shard
+ put_ts = Timestamp(4)
+ resp_hdrs = do_put_head(
+ put_ts, 'val3', {'x-backend-record-type': 'shard'},
+ body=json.dumps([dict(ShardRange('x/y', 123.4))]))
+ self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val3')
+ self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
+ Timestamp(3).internal)
+ self.assertEqual(resp_hdrs.get('x-put-timestamp'),
+ Timestamp(3).internal)
+
+ # put timestamp and metadata are NOT updated for request with obj path
+ put_ts = Timestamp(5)
+ resp_hdrs = do_put_head(
+ put_ts, 'val4',
+ {'x-content-type': 'plain/text', 'x-size': 0, 'x-etag': 'an-etag'},
+ path='a/c/o')
+ self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val3')
+ self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
+ Timestamp(3).internal)
+ self.assertEqual(resp_hdrs.get('x-put-timestamp'),
+ Timestamp(3).internal)
+
def test_PUT_insufficient_space(self):
conf = {'devices': self.testdir,
'mount_check': 'false',
@@ -1058,6 +1121,58 @@ class TestContainerController(unittest.TestCase):
self.assertEqual(resp.status_int, 204)
self.assertNotIn(key.lower(), resp.headers)
+ def test_POST_HEAD_no_timestamp_update(self):
+ put_ts = Timestamp(1)
+ req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
+ headers={'X-Timestamp': put_ts.internal})
+ resp = req.get_response(self.controller)
+ self.assertEqual(resp.status_int, 201)
+
+ def do_post_head(post_ts, value, extra_hdrs):
+ # Set metadata header
+ req = Request.blank('/sda1/p/a/c',
+ environ={'REQUEST_METHOD': 'POST'},
+ headers={'X-Timestamp': post_ts.internal,
+ 'X-Container-Meta-Test': value})
+ req.headers.update(extra_hdrs)
+ resp = req.get_response(self.controller)
+ self.assertEqual(resp.status_int, 204)
+ req = Request.blank('/sda1/p/a/c',
+ environ={'REQUEST_METHOD': 'HEAD'})
+ resp = req.get_response(self.controller)
+ self.assertEqual(resp.status_int, 204)
+ return resp.headers
+
+ # verify timestamp IS advanced
+ post_ts = Timestamp(2)
+ resp_hdrs = do_post_head(post_ts, 'val1', {})
+ self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val1')
+ self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
+ post_ts.internal)
+
+ post_ts = Timestamp(3)
+ resp_hdrs = do_post_head(post_ts, 'val2',
+ {'x-backend-no-timestamp-update': 'false'})
+ self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val2')
+ self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
+ post_ts.internal)
+
+ # verify timestamp IS NOT advanced, but metadata still updated
+ post_ts = Timestamp(4)
+ resp_hdrs = do_post_head(post_ts, 'val3',
+ {'x-backend-No-timeStamp-update': 'true'})
+ self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val3')
+ self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
+ Timestamp(3).internal)
+
+ # verify timestamp will not go backwards
+ post_ts = Timestamp(2)
+ resp_hdrs = do_post_head(post_ts, 'val4',
+ {'x-backend-no-timestamp-update': 'true'})
+ self.assertEqual(resp_hdrs.get('x-container-meta-test'), 'val3')
+ self.assertEqual(resp_hdrs.get('x-backend-put-timestamp'),
+ Timestamp(3).internal)
+
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
diff --git a/test/unit/container/test_sharder.py b/test/unit/container/test_sharder.py
index 5e77c9071..463ca1461 100644
--- a/test/unit/container/test_sharder.py
+++ b/test/unit/container/test_sharder.py
@@ -203,7 +203,6 @@ class TestSharder(BaseTestSharder):
'container-sharder', sharder.logger.logger.name)
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf', 'Swift Container Sharder', 3,
- allow_modify_pipeline=False,
use_replication_network=True,
global_conf={'log_name': 'container-sharder-ic'})
@@ -218,7 +217,6 @@ class TestSharder(BaseTestSharder):
sharder, mock_ic = self._do_test_init(conf, expected)
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf', 'Swift Container Sharder', 3,
- allow_modify_pipeline=False,
use_replication_network=True,
global_conf={'log_name': 'container-sharder-ic'})
@@ -280,7 +278,6 @@ class TestSharder(BaseTestSharder):
sharder, mock_ic = self._do_test_init(conf, expected)
mock_ic.assert_called_once_with(
'/etc/swift/my-sharder-ic.conf', 'Swift Container Sharder', 2,
- allow_modify_pipeline=False,
use_replication_network=True,
global_conf={'log_name': 'container-sharder-ic'})
self.assertEqual(self.logger.get_lines_for_level('warning'), [
@@ -418,7 +415,6 @@ class TestSharder(BaseTestSharder):
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf',
'Swift Container Sharder', 3,
- allow_modify_pipeline=False,
global_conf={'log_name': exp_internal_client_log_name},
use_replication_network=True)
@@ -426,14 +422,109 @@ class TestSharder(BaseTestSharder):
_do_test_init_ic_log_name({'log_name': 'container-sharder-6021'},
'container-sharder-6021-ic')
+ def test_log_broker(self):
+ broker = self._make_broker(container='c@d')
+
+ def do_test(level):
+ with self._mock_sharder() as sharder:
+ func = getattr(sharder, level)
+ func(broker, 'bonjour %s %s', 'mes', 'amis')
+ func(broker, 'hello my %s', 'friend%04ds')
+ func(broker, 'greetings friend%04ds')
+
+ self.assertEqual(
+ ['bonjour mes amis, path: a/c%40d, db: ' + broker.db_file,
+ 'hello my friend%04ds, path: a/c%40d, db: ' + broker.db_file,
+ 'greetings friend%04ds, path: a/c%40d, db: ' + broker.db_file
+ ], sharder.logger.get_lines_for_level(level))
+
+ for log_level, lines in sharder.logger.all_log_lines().items():
+ if log_level == level:
+ continue
+ else:
+ self.assertFalse(lines)
+
+ do_test('debug')
+ do_test('info')
+ do_test('warning')
+ do_test('error')
+
+ def test_log_broker_exception(self):
+ broker = self._make_broker()
+
+ with self._mock_sharder() as sharder:
+ try:
+ raise ValueError('test')
+ except ValueError as err:
+ sharder.exception(broker, 'exception: %s', err)
+
+ self.assertEqual(
+ ['exception: test, path: a/c, db: %s: ' % broker.db_file],
+ sharder.logger.get_lines_for_level('error'))
+
+ for log_level, lines in sharder.logger.all_log_lines().items():
+ if log_level == 'error':
+ continue
+ else:
+ self.assertFalse(lines)
+
+ def test_log_broker_levels(self):
+ # verify that the broker is not queried if the log level is not enabled
+ broker = self._make_broker()
+ # erase cached properties...
+ broker.account = broker.container = None
+
+ with self._mock_sharder() as sharder:
+ with mock.patch.object(sharder.logger, 'isEnabledFor',
+ return_value=False):
+ sharder.debug(broker, 'test')
+ sharder.info(broker, 'test')
+ sharder.warning(broker, 'test')
+ sharder.error(broker, 'test')
+
+ # cached properties have not been set...
+ self.assertIsNone(broker.account)
+ self.assertIsNone(broker.container)
+ self.assertFalse(sharder.logger.all_log_lines())
+
+ def test_log_broker_exception_while_logging(self):
+ broker = self._make_broker()
+
+ def do_test(level):
+ with self._mock_sharder() as sharder:
+ func = getattr(sharder, level)
+ with mock.patch.object(broker, '_populate_instance_cache',
+ side_effect=Exception()):
+ func(broker, 'bonjour %s %s', 'mes', 'amis')
+ broker._db_files = None
+ with mock.patch.object(broker, 'reload_db_files',
+ side_effect=Exception()):
+ func(broker, 'bonjour %s %s', 'mes', 'amis')
+
+ self.assertEqual(
+ ['bonjour mes amis, path: , db: %s' % broker.db_file,
+ 'bonjour mes amis, path: a/c, db: '],
+ sharder.logger.get_lines_for_level(level))
+
+ for log_level, lines in sharder.logger.all_log_lines().items():
+ if log_level == level:
+ continue
+ else:
+ self.assertFalse(lines)
+
+ do_test('debug')
+ do_test('info')
+ do_test('warning')
+ do_test('error')
+
def _assert_stats(self, expected, sharder, category):
- # assertEqual doesn't work with a defaultdict
+ # assertEqual doesn't work with a stats defaultdict so copy to a dict
+ # before comparing
stats = sharder.stats['sharding'][category]
+ actual = {}
for k, v in expected.items():
- actual = stats[k]
- self.assertEqual(
- v, actual, 'Expected %s but got %s for %s in %s' %
- (v, actual, k, stats))
+ actual[k] = stats[k]
+ self.assertEqual(expected, actual)
return stats
def _assert_recon_stats(self, expected, sharder, category):
@@ -587,6 +678,7 @@ class TestSharder(BaseTestSharder):
lines = sharder.logger.get_lines_for_level('error')
self.assertIn(
'Unhandled exception while dumping progress', lines[0])
+ self.assertIn('path: a/c', lines[0]) # match one of the brokers
self.assertIn('Test over', lines[0])
def check_recon(data, time, last, expected_stats):
@@ -782,6 +874,7 @@ class TestSharder(BaseTestSharder):
self.assertEqual({'a/c0', 'a/c1', 'a/c2'}, set(processed_paths))
lines = sharder.logger.get_lines_for_level('error')
self.assertIn('Unhandled exception while processing', lines[0])
+ self.assertIn('path: a/c', lines[0]) # match one of the brokers
self.assertFalse(lines[1:])
sharder.logger.clear()
expected_stats = {'attempted': 3, 'success': 2, 'failure': 1,
@@ -1181,6 +1274,204 @@ class TestSharder(BaseTestSharder):
'GET', '/v1/a/c', expected_headers, acceptable_statuses=(2,),
params=params)
+ def test_yield_objects(self):
+ broker = self._make_broker()
+ objects = [
+ ('o%02d' % i, self.ts_encoded(), 10, 'text/plain', 'etag_a',
+ i % 2, 0) for i in range(30)]
+ for obj in objects:
+ broker.put_object(*obj)
+
+ src_range = ShardRange('dont/care', Timestamp.now())
+ with self._mock_sharder(conf={}) as sharder:
+ batches = [b for b, _ in
+ sharder.yield_objects(broker, src_range)]
+ self.assertEqual([15, 15], [len(b) for b in batches])
+ self.assertEqual([[0] * 15, [1] * 15],
+ [[o['deleted'] for o in b] for b in batches])
+
+ # custom batch size
+ with self._mock_sharder(conf={}) as sharder:
+ batches = [b for b, _ in
+ sharder.yield_objects(broker, src_range, batch_size=10)]
+ self.assertEqual([10, 5, 10, 5], [len(b) for b in batches])
+ self.assertEqual([[0] * 10, [0] * 5, [1] * 10, [1] * 5],
+ [[o['deleted'] for o in b] for b in batches])
+
+ # restricted source range
+ src_range = ShardRange('dont/care', Timestamp.now(),
+ lower='o10', upper='o20')
+ with self._mock_sharder(conf={}) as sharder:
+ batches = [b for b, _ in
+ sharder.yield_objects(broker, src_range)]
+ self.assertEqual([5, 5], [len(b) for b in batches])
+ self.assertEqual([[0] * 5, [1] * 5],
+ [[o['deleted'] for o in b] for b in batches])
+
+ # null source range
+ src_range = ShardRange('dont/care', Timestamp.now(),
+ lower=ShardRange.MAX)
+ with self._mock_sharder(conf={}) as sharder:
+ batches = [b for b, _ in
+ sharder.yield_objects(broker, src_range)]
+ self.assertEqual([], batches)
+ src_range = ShardRange('dont/care', Timestamp.now(),
+ upper=ShardRange.MIN)
+ with self._mock_sharder(conf={}) as sharder:
+ batches = [b for b, _ in
+ sharder.yield_objects(broker, src_range)]
+ self.assertEqual([], batches)
+
+ def test_yield_objects_to_shard_range_no_objects(self):
+ # verify that dest_shard_ranges func is not called if the source
+ # broker has no objects
+ broker = self._make_broker()
+ dest_shard_ranges = mock.MagicMock()
+ src_range = ShardRange('dont/care', Timestamp.now())
+ with self._mock_sharder(conf={}) as sharder:
+ batches = [b for b, _ in
+ sharder.yield_objects_to_shard_range(
+ broker, src_range, dest_shard_ranges)]
+ self.assertEqual([], batches)
+ dest_shard_ranges.assert_not_called()
+
+ def test_yield_objects_to_shard_range(self):
+ broker = self._make_broker()
+ objects = [
+ ('o%02d' % i, self.ts_encoded(), 10, 'text/plain', 'etag_a',
+ i % 2, 0) for i in range(30)]
+ for obj in objects:
+ broker.put_object(*obj)
+ orig_info = broker.get_info()
+ # yield_objects annotates the info dict...
+ orig_info['max_row'] = 30
+ dest_ranges = [
+ ShardRange('shard/0', Timestamp.now(), upper='o09'),
+ ShardRange('shard/1', Timestamp.now(), lower='o09', upper='o19'),
+ ShardRange('shard/2', Timestamp.now(), lower='o19'),
+ ]
+
+ # complete overlap of src and dest, multiple batches per dest shard
+ # range per deleted/not deleted
+ src_range = ShardRange('dont/care', Timestamp.now())
+ dest_shard_ranges = mock.MagicMock(return_value=dest_ranges)
+ with self._mock_sharder(conf={'cleave_row_batch_size': 4}) as sharder:
+ yielded = [y for y in
+ sharder.yield_objects_to_shard_range(
+ broker, src_range, dest_shard_ranges)]
+ self.assertEqual([dest_ranges[0], dest_ranges[0],
+ dest_ranges[0], dest_ranges[0],
+ dest_ranges[1], dest_ranges[1],
+ dest_ranges[1], dest_ranges[1],
+ dest_ranges[2], dest_ranges[2],
+ dest_ranges[2], dest_ranges[2]],
+ [dest for _, dest, _ in yielded])
+ self.assertEqual([[o[0] for o in objects[0:8:2]],
+ [o[0] for o in objects[8:10:2]],
+ [o[0] for o in objects[1:8:2]],
+ [o[0] for o in objects[9:10:2]],
+ [o[0] for o in objects[10:18:2]],
+ [o[0] for o in objects[18:20:2]],
+ [o[0] for o in objects[11:18:2]],
+ [o[0] for o in objects[19:20:2]],
+ [o[0] for o in objects[20:28:2]],
+ [o[0] for o in objects[28:30:2]],
+ [o[0] for o in objects[21:28:2]],
+ [o[0] for o in objects[29:30:2]]],
+ [[o['name'] for o in objs] for objs, _, _ in yielded])
+ self.assertEqual([orig_info] * 12, [info for _, _, info in yielded])
+
+ # src narrower than dest
+ src_range = ShardRange('dont/care', Timestamp.now(),
+ lower='o15', upper='o25')
+ dest_shard_ranges = mock.MagicMock(return_value=dest_ranges)
+ with self._mock_sharder(conf={}) as sharder:
+ yielded = [y for y in
+ sharder.yield_objects_to_shard_range(
+ broker, src_range, dest_shard_ranges)]
+ self.assertEqual([dest_ranges[1], dest_ranges[1],
+ dest_ranges[2], dest_ranges[2]],
+ [dest for _, dest, _ in yielded])
+ self.assertEqual([[o[0] for o in objects[16:20:2]],
+ [o[0] for o in objects[17:20:2]],
+ [o[0] for o in objects[20:26:2]],
+ [o[0] for o in objects[21:26:2]]],
+ [[o['name'] for o in objs] for objs, _, _ in yielded])
+ self.assertEqual([orig_info] * 4, [info for _, _, info in yielded])
+
+ # src much narrower than dest
+ src_range = ShardRange('dont/care', Timestamp.now(),
+ lower='o15', upper='o18')
+ dest_shard_ranges = mock.MagicMock(return_value=dest_ranges)
+ with self._mock_sharder(conf={}) as sharder:
+ yielded = [y for y in
+ sharder.yield_objects_to_shard_range(
+ broker, src_range, dest_shard_ranges)]
+ self.assertEqual([dest_ranges[1], dest_ranges[1]],
+ [dest for _, dest, _ in yielded])
+ self.assertEqual([[o[0] for o in objects[16:19:2]],
+ [o[0] for o in objects[17:19:2]]],
+ [[o['name'] for o in objs] for objs, _, _ in yielded])
+ self.assertEqual([orig_info] * 2, [info for _, _, info in yielded])
+
+ # dest narrower than src
+ src_range = ShardRange('dont/care', Timestamp.now(),
+ lower='o05', upper='o25')
+ dest_shard_ranges = mock.MagicMock(return_value=dest_ranges[1:])
+ with self._mock_sharder(conf={}) as sharder:
+ yielded = [y for y in
+ sharder.yield_objects_to_shard_range(
+ broker, src_range, dest_shard_ranges)]
+ self.assertEqual([None, None,
+ dest_ranges[1], dest_ranges[1],
+ dest_ranges[2], dest_ranges[2]],
+ [dest for _, dest, _ in yielded])
+ self.assertEqual([[o[0] for o in objects[6:10:2]],
+ [o[0] for o in objects[7:10:2]],
+ [o[0] for o in objects[10:20:2]],
+ [o[0] for o in objects[11:20:2]],
+ [o[0] for o in objects[20:26:2]],
+ [o[0] for o in objects[21:26:2]]],
+ [[o['name'] for o in objs] for objs, _, _ in yielded])
+ self.assertEqual([orig_info] * 6, [info for _, _, info in yielded])
+
+ # dest much narrower than src
+ src_range = ShardRange('dont/care', Timestamp.now(),
+ lower='o05', upper='o25')
+ dest_shard_ranges = mock.MagicMock(return_value=dest_ranges[1:2])
+ with self._mock_sharder(conf={}) as sharder:
+ yielded = [y for y in
+ sharder.yield_objects_to_shard_range(
+ broker, src_range, dest_shard_ranges)]
+ self.assertEqual([None, None,
+ dest_ranges[1], dest_ranges[1],
+ None, None],
+ [dest for _, dest, _ in yielded])
+ self.assertEqual([[o[0] for o in objects[6:10:2]],
+ [o[0] for o in objects[7:10:2]],
+ [o[0] for o in objects[10:20:2]],
+ [o[0] for o in objects[11:20:2]],
+ [o[0] for o in objects[20:26:2]],
+ [o[0] for o in objects[21:26:2]]],
+ [[o['name'] for o in objs] for objs, _, _ in yielded])
+ self.assertEqual([orig_info] * 6, [info for _, _, info in yielded])
+
+ # no dest, source is entire namespace, multiple batches
+ src_range = ShardRange('dont/care', Timestamp.now())
+ dest_shard_ranges = mock.MagicMock(return_value=[])
+ with self._mock_sharder(conf={'cleave_row_batch_size': 10}) as sharder:
+ yielded = [y for y in
+ sharder.yield_objects_to_shard_range(
+ broker, src_range, dest_shard_ranges)]
+ self.assertEqual([None] * 4,
+ [dest for _, dest, _ in yielded])
+ self.assertEqual([[o[0] for o in objects[:20:2]],
+ [o[0] for o in objects[20::2]],
+ [o[0] for o in objects[1:20:2]],
+ [o[0] for o in objects[21::2]]],
+ [[o['name'] for o in objs] for objs, _, _ in yielded])
+ self.assertEqual([orig_info] * 4, [info for _, _, info in yielded])
+
def _check_cleave_root(self, conf=None):
broker = self._make_broker()
objects = [
@@ -1785,7 +2076,8 @@ class TestSharder(BaseTestSharder):
self.assertEqual(UNSHARDED, broker.get_db_state())
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertEqual(warning_lines[0],
- 'Failed to get own_shard_range for a/c')
+ 'Failed to get own_shard_range, path: a/c, db: %s'
+ % broker.db_file)
sharder._replicate_object.assert_not_called()
context = CleavingContext.load(broker)
self.assertTrue(context.misplaced_done)
@@ -2176,10 +2468,13 @@ class TestSharder(BaseTestSharder):
self.assertEqual(12, context.max_row) # note that max row increased
lines = sharder.logger.get_lines_for_level('info')
self.assertEqual(
- ["Kick off container cleaving on a/c, own shard range in state "
- "'sharding'", "Starting to cleave (2 todo): a/c"], lines[:2])
- self.assertIn('Completed cleaving of a/c, DB remaining in '
- 'sharding state', lines[1:])
+ ["Kick off container cleaving, own shard range in state "
+ "'sharding', path: a/c, db: %s" % broker.db_file,
+ "Starting to cleave (2 todo), path: a/c, db: %s"
+ % broker.db_file], lines[:2])
+ self.assertIn('Completed cleaving, DB remaining in sharding state, '
+ 'path: a/c, db: %s'
+ % broker.db_file, lines[1:])
lines = sharder.logger.get_lines_for_level('warning')
self.assertIn('Repeat cleaving required', lines[0])
self.assertFalse(lines[1:])
@@ -2209,9 +2504,12 @@ class TestSharder(BaseTestSharder):
self._check_shard_range(shard_ranges[1], updated_shard_ranges[1])
self._check_objects(new_objects[1:], expected_shard_dbs[1])
lines = sharder.logger.get_lines_for_level('info')
- self.assertEqual('Starting to cleave (2 todo): a/c', lines[0])
- self.assertIn('Completed cleaving of a/c, DB set to sharded state',
- lines[1:])
+ self.assertEqual(
+ 'Starting to cleave (2 todo), path: a/c, db: %s'
+ % broker.db_file, lines[0])
+ self.assertIn(
+ 'Completed cleaving, DB set to sharded state, path: a/c, db: %s'
+ % broker.db_file, lines[1:])
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
def test_cleave_multiple_storage_policies(self):
@@ -2905,9 +3203,7 @@ class TestSharder(BaseTestSharder):
with self._mock_sharder() as sharder:
self.assertFalse(sharder._complete_sharding(broker))
warning_lines = sharder.logger.get_lines_for_level('warning')
- self.assertIn(
- 'Repeat cleaving required for %r' % broker.db_files[0],
- warning_lines[0])
+ self.assertIn('Repeat cleaving required', warning_lines[0])
self.assertFalse(warning_lines[1:])
sharder.logger.clear()
context = CleavingContext.load(broker)
@@ -3016,7 +3312,8 @@ class TestSharder(BaseTestSharder):
self.assertEqual(SHARDING, broker.get_db_state())
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertEqual(warning_lines[0],
- 'Failed to get own_shard_range for a/c')
+ 'Failed to get own_shard_range, path: a/c, db: %s'
+ % broker.db_file)
def test_sharded_record_sharding_progress_missing_contexts(self):
broker = self._check_complete_sharding(
@@ -3072,9 +3369,9 @@ class TestSharder(BaseTestSharder):
sharder._record_sharding_progress(broker, {}, None)
warning_lines = sharder.logger.get_lines_for_level('warning')
self.assertIn(
- 'Cleaving has not completed in %.2f seconds since %s.' %
- (future_time - float(own_shard_range.epoch),
- own_shard_range.epoch.isoformat),
+ 'Cleaving has not completed in %.2f seconds since %s. DB state: '
+ 'sharding' % (future_time - float(own_shard_range.epoch),
+ own_shard_range.epoch.isoformat),
warning_lines[0])
def test_incomplete_shrinking_progress_warning_log(self):
@@ -3783,8 +4080,11 @@ class TestSharder(BaseTestSharder):
self._assert_stats(expected_stats, sharder, 'misplaced')
lines = sharder.logger.get_lines_for_level('warning')
- self.assertIn('Refused to remove misplaced objects', lines[0])
- self.assertIn('Refused to remove misplaced objects', lines[1])
+ shard_ranges = broker.get_shard_ranges()
+ self.assertIn('Refused to remove misplaced objects for dest %s'
+ % shard_ranges[2], lines[0])
+ self.assertIn('Refused to remove misplaced objects for dest %s'
+ % shard_ranges[3], lines[1])
self.assertFalse(lines[2:])
# they will be moved again on next cycle
@@ -4844,6 +5144,7 @@ class TestSharder(BaseTestSharder):
self.assertTrue(sharding_enabled(broker))
def test_send_shard_ranges(self):
+ broker = self._make_broker()
shard_ranges = self._make_shard_ranges((('', 'h'), ('h', '')))
def do_test(replicas, *resp_codes):
@@ -4856,7 +5157,7 @@ class TestSharder(BaseTestSharder):
with mocked_http_conn(*resp_codes, give_send=on_send) as conn:
with mock_timestamp_now() as now:
res = sharder._send_shard_ranges(
- 'a', 'c', shard_ranges)
+ broker, 'a', 'c', shard_ranges)
self.assertEqual(sharder.ring.replica_count, len(conn.requests))
expected_body = json.dumps([dict(sr) for sr in shard_ranges])
@@ -4882,129 +5183,182 @@ class TestSharder(BaseTestSharder):
self.assertTrue(
req_headers['User-Agent'].startswith('container-sharder'))
self.assertEqual(sharder.ring.replica_count, len(hosts))
- return res, sharder
+ return res, sharder, hosts
replicas = 3
- res, sharder = do_test(replicas, 202, 202, 202)
+ res, sharder, _ = do_test(replicas, 202, 202, 202)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, 202, 202, 404)
+ res, sharder, _ = do_test(replicas, 202, 202, 404)
self.assertTrue(res)
self.assertEqual([True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual([True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, 202, 202, Exception)
+ res, sharder, _ = do_test(replicas, 202, 202, Exception)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
- res, sharder = do_test(replicas, 202, 404, 404)
+ self.assertEqual([True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('error')])
+ res, sharder, _ = do_test(replicas, 202, 404, 404)
self.assertFalse(res)
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual([True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, 500, 500, 500)
+ res, sharder, hosts = do_test(replicas, 500, 500, 500)
self.assertFalse(res)
- self.assertEqual([True, True, True], [
- 'Failed to put shard ranges' in line for line in
- sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual(set(
+ 'Failed to put shard ranges to %s a/c: 500, path: a/c, db: %s' %
+ (host, broker.db_file) for host in hosts),
+ set(sharder.logger.get_lines_for_level('warning')))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, Exception, Exception, 202)
+ res, sharder, _ = do_test(replicas, Exception, Exception, 202)
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
- res, sharder = do_test(replicas, Exception, eventlet.Timeout(), 202)
+ self.assertEqual([True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('error')])
+ res, sharder, _ = do_test(replicas, Exception, eventlet.Timeout(), 202)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
+ self.assertEqual([True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('error')])
replicas = 2
- res, sharder = do_test(replicas, 202, 202)
+ res, sharder, _ = do_test(replicas, 202, 202)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, 202, 404)
+ res, sharder, _ = do_test(replicas, 202, 404)
self.assertTrue(res)
self.assertEqual([True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual([True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, 202, Exception)
+ res, sharder, _ = do_test(replicas, 202, Exception)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
- res, sharder = do_test(replicas, 404, 404)
+ self.assertEqual([True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('error')])
+ res, sharder, _ = do_test(replicas, 404, 404)
self.assertFalse(res)
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual([True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, Exception, Exception)
+ res, sharder, hosts = do_test(replicas, Exception, Exception)
self.assertFalse(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
- self.assertEqual([True, True], [
- 'Failed to put shard ranges' in line for line in
- sharder.logger.get_lines_for_level('error')])
- res, sharder = do_test(replicas, eventlet.Timeout(), Exception)
+ self.assertEqual(set(
+ 'Failed to put shard ranges to %s a/c: FakeStatus Error, '
+ 'path: a/c, db: %s: ' % (host, broker.db_file) for host in hosts),
+ set(sharder.logger.get_lines_for_level('error')))
+ res, sharder, _ = do_test(replicas, eventlet.Timeout(), Exception)
self.assertFalse(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
+ self.assertEqual([True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('error')])
replicas = 4
- res, sharder = do_test(replicas, 202, 202, 202, 202)
+ res, sharder, _ = do_test(replicas, 202, 202, 202, 202)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self.assertTrue(res)
- res, sharder = do_test(replicas, 202, 202, 404, 404)
+ res, sharder, _ = do_test(replicas, 202, 202, 404, 404)
self.assertTrue(res)
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual([True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, 202, 202, Exception, Exception)
+ res, sharder, _ = do_test(replicas, 202, 202, Exception, Exception)
self.assertTrue(res)
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
- res, sharder = do_test(replicas, 202, 404, 404, 404)
+ self.assertEqual([True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('error')])
+ res, sharder, _ = do_test(replicas, 202, 404, 404, 404)
self.assertFalse(res)
self.assertEqual([True, True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual([True, True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, 500, 500, 500, 202)
+ res, sharder, _ = do_test(replicas, 500, 500, 500, 202)
self.assertFalse(res)
self.assertEqual([True, True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual([True, True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('warning')])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
- res, sharder = do_test(replicas, Exception, Exception, 202, 404)
+ res, sharder, _ = do_test(replicas, Exception, Exception, 202, 404)
self.assertFalse(res)
self.assertEqual([True], [
all(msg in line for msg in ('Failed to put shard ranges', '404'))
for line in sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual([True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
- res, sharder = do_test(
+ self.assertEqual([True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('error')])
+ res, sharder, _ = do_test(
replicas, eventlet.Timeout(), eventlet.Timeout(), 202, 404)
self.assertFalse(res)
self.assertEqual([True], [
all(msg in line for msg in ('Failed to put shard ranges', '404'))
for line in sharder.logger.get_lines_for_level('warning')])
+ self.assertEqual([True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('warning')])
self.assertEqual([True, True], [
'Failed to put shard ranges' in line for line in
sharder.logger.get_lines_for_level('error')])
+ self.assertEqual([True, True], [
+ 'path: a/c, db: %s' % broker.db_file in line for line in
+ sharder.logger.get_lines_for_level('error')])
def test_process_broker_not_sharding_no_others(self):
# verify that sharding process will not start when own shard range is
@@ -5072,8 +5426,8 @@ class TestSharder(BaseTestSharder):
self.assertEqual(SHARDED, broker.get_db_state())
self.assertEqual(epoch.normal, parse_db_filename(broker.db_file)[1])
lines = broker.logger.get_lines_for_level('info')
- self.assertIn('Completed creating shard range containers: 2 created, '
- 'from sharding container a/c', lines)
+ self.assertIn('Completed creating 2 shard range containers, '
+ 'path: a/c, db: %s' % broker.db_file, lines)
self.assertFalse(broker.logger.get_lines_for_level('warning'))
self.assertFalse(broker.logger.get_lines_for_level('error'))
self.assertEqual(deleted, broker.is_deleted())
@@ -5513,8 +5867,9 @@ class TestSharder(BaseTestSharder):
mocked.assert_not_called()
def assert_overlap_warning(line, state_text):
- self.assertIn(
- 'Audit failed for root %s' % broker.db_file, line)
+ self.assertIn('Audit failed for root', line)
+ self.assertIn(broker.db_file, line)
+ self.assertIn(broker.path, line)
self.assertIn(
'overlapping ranges in state %r: k-t s-y, y-z y-z'
% state_text, line)
@@ -5583,9 +5938,10 @@ class TestSharder(BaseTestSharder):
broker.merge_shard_ranges(shard_ranges)
def assert_missing_warning(line):
- self.assertIn(
- 'Audit failed for root %s' % broker.db_file, line)
+ self.assertIn('Audit failed for root', line)
self.assertIn('missing range(s): -a j-k z-', line)
+ self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
+ line)
def check_missing():
own_shard_range = broker.get_own_shard_range()
@@ -5698,9 +6054,10 @@ class TestSharder(BaseTestSharder):
'swift.container.sharder.time.time',
return_value=future_time), self._mock_sharder() as sharder:
sharder._audit_container(broker)
- message = 'Reclaimable db stuck waiting for shrinking: %s (%s)' % (
- broker.db_file, broker.path)
- self.assertEqual([message], self.logger.get_lines_for_level('warning'))
+ self.assertEqual(
+ ['Reclaimable db stuck waiting for shrinking, path: %s, db: %s'
+ % (broker.path, broker.db_file)],
+ self.logger.get_lines_for_level('warning'))
# delete all shard ranges
for sr in shard_ranges:
@@ -5772,10 +6129,14 @@ class TestSharder(BaseTestSharder):
sharder, mock_swift = self.call_audit_container(broker, shard_ranges)
lines = sharder.logger.get_lines_for_level('warning')
self._assert_stats(expected_stats, sharder, 'audit_shard')
- self.assertIn('Audit failed for shard %s' % broker.db_file, lines[0])
+ self.assertIn('Audit failed for shard', lines[0])
self.assertIn('missing own shard range', lines[0])
- self.assertIn('Audit warnings for shard %s' % broker.db_file, lines[1])
+ self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
+ lines[0])
+ self.assertIn('Audit warnings for shard', lines[1])
self.assertIn('account not in shards namespace', lines[1])
+ self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
+ lines[1])
self.assertNotIn('root has no matching shard range', lines[1])
self.assertNotIn('unable to get shard ranges from root', lines[1])
self.assertFalse(lines[2:])
@@ -5786,8 +6147,10 @@ class TestSharder(BaseTestSharder):
sharder, mock_swift = self.call_audit_container(broker, shard_ranges)
lines = sharder.logger.get_lines_for_level('warning')
self._assert_stats(expected_stats, sharder, 'audit_shard')
- self.assertIn('Audit failed for shard %s' % broker.db_file, lines[0])
+ self.assertIn('Audit failed for shard', lines[0])
self.assertIn('missing own shard range', lines[0])
+ self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
+ lines[0])
self.assertNotIn('unable to get shard ranges from root', lines[0])
self.assertFalse(lines[1:])
self.assertFalse(sharder.logger.get_lines_for_level('error'))
@@ -5811,12 +6174,14 @@ class TestSharder(BaseTestSharder):
sharder, mock_swift = self.call_audit_container(
broker, shard_ranges)
self._assert_stats(expected_stats, sharder, 'audit_shard')
- self.assertEqual(['Updating own shard range from root'],
+ self.assertEqual(['Updating own shard range from root, path: '
+ '.shards_a/shard_c, db: %s' % broker.db_file],
sharder.logger.get_lines_for_level('debug'))
expected = shard_ranges[1].copy()
- self.assertEqual(['Updated own shard range from %s to %s'
- % (own_shard_range, expected)],
- sharder.logger.get_lines_for_level('info'))
+ self.assertEqual(
+ ['Updated own shard range from %s to %s, path: .shards_a/shard_c, '
+ 'db: %s' % (own_shard_range, expected, broker.db_file)],
+ sharder.logger.get_lines_for_level('info'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self.assertFalse(broker.is_deleted())
@@ -5851,7 +6216,8 @@ class TestSharder(BaseTestSharder):
sharder, mock_swift = self.call_audit_container(broker, shard_ranges)
self._assert_stats(expected_stats, sharder, 'audit_shard')
- self.assertEqual(['Updating own shard range from root'],
+ self.assertEqual(['Updating own shard range from root, path: '
+ '.shards_a/shard_c, db: %s' % broker.db_file],
sharder.logger.get_lines_for_level('debug'))
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
@@ -5885,7 +6251,9 @@ class TestSharder(BaseTestSharder):
exc=internal_client.UnexpectedResponse('bad', 'resp'))
lines = sharder.logger.get_lines_for_level('warning')
self.assertIn('Failed to get shard ranges', lines[0])
- self.assertIn('Audit warnings for shard %s' % broker.db_file, lines[1])
+ self.assertIn('Audit warnings for shard', lines[1])
+ self.assertIn('path: %s, db: %s' % (broker.path, broker.db_file),
+ lines[1])
self.assertNotIn('account not in shards namespace', lines[1])
self.assertNotIn('missing own shard range', lines[1])
self.assertNotIn('root has no matching shard range', lines[1])
@@ -5913,12 +6281,14 @@ class TestSharder(BaseTestSharder):
broker, shard_ranges)
self.assert_no_audit_messages(sharder, mock_swift)
self.assertFalse(broker.is_deleted())
- self.assertEqual(['Updating own shard range from root'],
+ self.assertEqual(['Updating own shard range from root, path: '
+ '.shards_a/shard_c, db: %s' % broker.db_file],
sharder.logger.get_lines_for_level('debug'))
expected = shard_ranges[1].copy()
- self.assertEqual(['Updated own shard range from %s to %s'
- % (own_shard_range, expected)],
- sharder.logger.get_lines_for_level('info'))
+ self.assertEqual(
+ ['Updated own shard range from %s to %s, path: .shards_a/shard_c, '
+ 'db: %s' % (own_shard_range, expected, broker.db_file)],
+ sharder.logger.get_lines_for_level('info'))
# own shard range state is updated from root version
own_shard_range = broker.get_own_shard_range()
self.assertEqual(ShardRange.SHARDING, own_shard_range.state)
@@ -5959,9 +6329,9 @@ class TestSharder(BaseTestSharder):
shard_ranges = self._make_shard_ranges(shard_bounds, shard_states)
def check_audit(own_state, root_state):
- broker = self._make_broker(
- account='.shards_a',
- container='shard_c_%s' % root_ts.normal)
+ shard_container = 'shard_c_%s' % root_ts.normal
+ broker = self._make_broker(account='.shards_a',
+ container=shard_container)
broker.set_sharding_sysmeta(*args)
shard_ranges[1].name = broker.path
@@ -5982,8 +6352,10 @@ class TestSharder(BaseTestSharder):
self._assert_stats(expected_stats, sharder, 'audit_shard')
debug_lines = sharder.logger.get_lines_for_level('debug')
self.assertGreater(len(debug_lines), 0)
- self.assertEqual('Updating own shard range from root',
- debug_lines[0])
+ self.assertEqual(
+ 'Updating own shard range from root, path: .shards_a/%s, '
+ 'db: %s' % (shard_container, broker.db_file),
+ sharder.logger.get_lines_for_level('debug')[0])
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
self.assertFalse(sharder.logger.get_lines_for_level('error'))
self.assertFalse(broker.is_deleted())
@@ -7056,8 +7428,10 @@ class TestSharder(BaseTestSharder):
self._assert_shard_ranges_equal([donor, acceptor, shard_ranges[2]],
broker.get_shard_ranges())
sharder._send_shard_ranges.assert_has_calls(
- [mock.call(acceptor.account, acceptor.container, [acceptor]),
- mock.call(donor.account, donor.container, [donor, acceptor])]
+ [mock.call(broker, acceptor.account, acceptor.container,
+ [acceptor]),
+ mock.call(broker, donor.account, donor.container,
+ [donor, acceptor])]
)
# check idempotency
@@ -7068,8 +7442,10 @@ class TestSharder(BaseTestSharder):
self._assert_shard_ranges_equal([donor, acceptor, shard_ranges[2]],
broker.get_shard_ranges())
sharder._send_shard_ranges.assert_has_calls(
- [mock.call(acceptor.account, acceptor.container, [acceptor]),
- mock.call(donor.account, donor.container, [donor, acceptor])]
+ [mock.call(broker, acceptor.account, acceptor.container,
+ [acceptor]),
+ mock.call(broker, donor.account, donor.container,
+ [donor, acceptor])]
)
# acceptor falls below threshold - not a candidate
@@ -7082,8 +7458,10 @@ class TestSharder(BaseTestSharder):
self._assert_shard_ranges_equal([donor, acceptor, shard_ranges[2]],
broker.get_shard_ranges())
sharder._send_shard_ranges.assert_has_calls(
- [mock.call(acceptor.account, acceptor.container, [acceptor]),
- mock.call(donor.account, donor.container, [donor, acceptor])]
+ [mock.call(broker, acceptor.account, acceptor.container,
+ [acceptor]),
+ mock.call(broker, donor.account, donor.container,
+ [donor, acceptor])]
)
# ...until donor has shrunk
@@ -7102,9 +7480,9 @@ class TestSharder(BaseTestSharder):
[donor, new_donor, new_acceptor],
broker.get_shard_ranges(include_deleted=True))
sharder._send_shard_ranges.assert_has_calls(
- [mock.call(new_acceptor.account, new_acceptor.container,
+ [mock.call(broker, new_acceptor.account, new_acceptor.container,
[new_acceptor]),
- mock.call(new_donor.account, new_donor.container,
+ mock.call(broker, new_donor.account, new_donor.container,
[new_donor, new_acceptor])]
)
@@ -7123,7 +7501,7 @@ class TestSharder(BaseTestSharder):
[donor, new_donor, final_donor],
broker.get_shard_ranges(include_deleted=True))
sharder._send_shard_ranges.assert_has_calls(
- [mock.call(final_donor.account, final_donor.container,
+ [mock.call(broker, final_donor.account, final_donor.container,
[final_donor, broker.get_own_shard_range()])]
)
@@ -7169,8 +7547,10 @@ class TestSharder(BaseTestSharder):
broker.get_shard_ranges())
for donor, acceptor in (shard_ranges[:2], shard_ranges[3:5]):
sharder._send_shard_ranges.assert_has_calls(
- [mock.call(acceptor.account, acceptor.container, [acceptor]),
- mock.call(donor.account, donor.container, [donor, acceptor])]
+ [mock.call(broker, acceptor.account, acceptor.container,
+ [acceptor]),
+ mock.call(broker, donor.account, donor.container,
+ [donor, acceptor])]
)
def test_partition_and_device_filters(self):
diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py
index 59baa38ab..8d3a484b7 100644
--- a/test/unit/obj/test_diskfile.py
+++ b/test/unit/obj/test_diskfile.py
@@ -47,6 +47,7 @@ from test.unit import (mock as unit_mock, temptree, mock_check_drive,
encode_frag_archive_bodies, skip_if_no_xattrs)
from swift.obj import diskfile
from swift.common import utils
+from swift.common.utils import libc
from swift.common.utils import hash_path, mkdirs, Timestamp, \
encode_timestamps, O_TMPFILE, md5 as _md5
from swift.common import ring
@@ -4748,7 +4749,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
# This is a horrible hack so you can run this test in isolation.
# Some of the ctypes machinery calls os.close(), and that runs afoul
# of our mock.
- with mock.patch.object(utils, '_sys_fallocate', None):
+ with mock.patch.object(libc, '_sys_fallocate', None):
utils.disable_fallocate()
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc',
diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py
index 5f022e3c5..ff76e9146 100644
--- a/test/unit/obj/test_server.py
+++ b/test/unit/obj/test_server.py
@@ -6637,6 +6637,30 @@ class TestObjectController(BaseTestCase):
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 404)
+ # ...unless sending an x-backend-replication header...which lets you
+ # modify x-delete-at
+ now += 2
+ recreate_test_object(now)
+ the_time = delete_at_timestamp + 2
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ environ={'REQUEST_METHOD': 'POST'},
+ headers={'X-Timestamp': normalize_timestamp(the_time),
+ 'x-backend-replication': 'true',
+ 'x-delete-at': str(delete_at_timestamp + 100)})
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 202)
+ # ...so the object becomes accessible again even without an
+ # x-backend-replication header
+ the_time = delete_at_timestamp + 3
+ req = Request.blank(
+ '/sda1/p/a/c/o',
+ environ={'REQUEST_METHOD': 'POST'},
+ headers={'X-Timestamp': normalize_timestamp(the_time),
+ 'x-delete-at': str(delete_at_timestamp + 101)})
+ resp = req.get_response(self.object_controller)
+ self.assertEqual(resp.status_int, 202)
+
def test_DELETE_can_skip_updating_expirer_queue(self):
policy = POLICIES.get_by_index(0)
test_time = time()
diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py
index 8ed352842..73d61c6ef 100644
--- a/test/unit/proxy/controllers/test_base.py
+++ b/test/unit/proxy/controllers/test_base.py
@@ -152,7 +152,10 @@ class ZeroCacheDynamicResponseFactory(DynamicResponseFactory):
class FakeApp(object):
recheck_container_existence = 30
+ container_existence_skip_cache = 0
recheck_account_existence = 30
+ account_existence_skip_cache = 0
+ logger = None
def __init__(self, response_factory=None, statuses=None):
self.responses = response_factory or \
@@ -352,6 +355,29 @@ class TestFuncs(BaseTest):
self.assertEqual([e['swift.source'] for e in app.captured_envs],
['MC', 'MC'])
+ def test_get_container_info_in_pipeline(self):
+ final_app = FakeApp()
+
+ def factory(app):
+ def wsgi_filter(env, start_response):
+ # lots of middlewares get info...
+ if env['PATH_INFO'].count('/') > 2:
+ get_container_info(env, app)
+ else:
+ get_account_info(env, app)
+ # ...then decide to no-op based on the result
+ return app(env, start_response)
+
+ wsgi_filter._pipeline_final_app = final_app
+ return wsgi_filter
+
+ # build up a pipeline
+ filtered_app = factory(factory(factory(final_app)))
+ req = Request.blank("/v1/a/c/o", environ={'swift.cache': FakeCache()})
+ req.get_response(filtered_app)
+ self.assertEqual([e['PATH_INFO'] for e in final_app.captured_envs],
+ ['/v1/a', '/v1/a/c', '/v1/a/c/o'])
+
def test_get_object_info_swift_source(self):
app = FakeApp()
req = Request.blank("/v1/a/c/o",
@@ -475,7 +501,7 @@ class TestFuncs(BaseTest):
self.assertEqual(get_cache_key("account", "cont", shard="listing"),
'shard-listing/account/cont')
self.assertEqual(get_cache_key("account", "cont", shard="updating"),
- 'shard-updating/account/cont')
+ 'shard-updating-v2/account/cont')
self.assertRaises(ValueError,
get_cache_key, "account", shard="listing")
self.assertRaises(ValueError,
diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py
index b2cfb6f93..c010c7227 100644
--- a/test/unit/proxy/controllers/test_container.py
+++ b/test/unit/proxy/controllers/test_container.py
@@ -546,7 +546,8 @@ class TestContainerController(TestRingBase):
req['headers'])
return resp
- def check_response(self, resp, root_resp_hdrs, expected_objects=None):
+ def check_response(self, resp, root_resp_hdrs, expected_objects=None,
+ exp_sharding_state='sharded'):
info_hdrs = dict(root_resp_hdrs)
if expected_objects is None:
# default is to expect whatever the root container sent
@@ -561,7 +562,8 @@ class TestContainerController(TestRingBase):
int(resp.headers['X-Container-Bytes-Used']))
self.assertEqual(expected_obj_count,
int(resp.headers['X-Container-Object-Count']))
- self.assertEqual('sharded', resp.headers['X-Backend-Sharding-State'])
+ self.assertEqual(exp_sharding_state,
+ resp.headers['X-Backend-Sharding-State'])
for k, v in root_resp_hdrs.items():
if k.lower().startswith('x-container-meta'):
self.assertEqual(v, resp.headers[k])
@@ -1864,7 +1866,7 @@ class TestContainerController(TestRingBase):
self.check_response(resp, root_resp_hdrs,
expected_objects=expected_objects)
- def test_GET_sharded_container_gap_in_shards(self):
+ def test_GET_sharded_container_gap_in_shards_no_memcache(self):
# verify ordered listing even if unexpected gap between shard ranges
shard_bounds = (('', 'ham'), ('onion', 'pie'), ('rhubarb', ''))
shard_ranges = [
@@ -1888,6 +1890,7 @@ class TestContainerController(TestRingBase):
num_all_objects = len(all_objects)
limit = CONTAINER_LISTING_LIMIT
root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
+ 'X-Backend-Override-Shard-Name-Filter': 'true',
'X-Backend-Timestamp': '99',
'X-Container-Object-Count': num_all_objects,
'X-Container-Bytes-Used': size_all_objects,
@@ -1920,9 +1923,143 @@ class TestContainerController(TestRingBase):
]
resp = self._check_GET_shard_listing(
- mock_responses, all_objects, expected_requests)
- # root object count will overridden by actual length of listing
+ mock_responses, all_objects, expected_requests, memcache=False)
+ # root object count will be overridden by actual length of listing
self.check_response(resp, root_resp_hdrs)
+ self.assertNotIn('swift.cache', resp.request.environ)
+
+ def test_GET_sharding_container_gap_in_shards_memcache(self):
+ # verify ordered listing even if unexpected gap between shard ranges;
+ # root is sharding so shard ranges are not cached
+ shard_bounds = (('', 'ham'), ('onion', 'pie'), ('rhubarb', ''))
+ shard_ranges = [
+ ShardRange('.shards_a/c_' + upper, Timestamp.now(), lower, upper)
+ for lower, upper in shard_bounds]
+ sr_dicts = [dict(sr) for sr in shard_ranges]
+ sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
+ shard_resp_hdrs = [
+ {'X-Backend-Sharding-State': 'unsharded',
+ 'X-Container-Object-Count': len(sr_objs[i]),
+ 'X-Container-Bytes-Used':
+ sum([obj['bytes'] for obj in sr_objs[i]]),
+ 'X-Container-Meta-Flavour': 'flavour%d' % i,
+ 'X-Backend-Storage-Policy-Index': 0}
+ for i in range(3)]
+
+ all_objects = []
+ for objects in sr_objs:
+ all_objects.extend(objects)
+ size_all_objects = sum([obj['bytes'] for obj in all_objects])
+ num_all_objects = len(all_objects)
+ limit = CONTAINER_LISTING_LIMIT
+ root_resp_hdrs = {'X-Backend-Sharding-State': 'sharding',
+ 'X-Backend-Override-Shard-Name-Filter': 'true',
+ 'X-Backend-Timestamp': '99',
+ 'X-Container-Object-Count': num_all_objects,
+ 'X-Container-Bytes-Used': size_all_objects,
+ 'X-Container-Meta-Flavour': 'peach',
+ 'X-Backend-Storage-Policy-Index': 0}
+ root_shard_resp_hdrs = dict(root_resp_hdrs)
+ root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
+
+ mock_responses = [
+ # status, body, headers
+ (200, sr_dicts, root_shard_resp_hdrs),
+ (200, sr_objs[0], shard_resp_hdrs[0]),
+ (200, sr_objs[1], shard_resp_hdrs[1]),
+ (200, sr_objs[2], shard_resp_hdrs[2])
+ ]
+ # NB marker always advances to last object name
+ expected_requests = [
+ # path, headers, params
+ ('a/c', {'X-Backend-Record-Type': 'auto'},
+ dict(states='listing')), # 200
+ (shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
+ dict(marker='', end_marker='ham\x00', states='listing',
+ limit=str(limit))), # 200
+ (shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
+ dict(marker='h', end_marker='pie\x00', states='listing',
+ limit=str(limit - len(sr_objs[0])))), # 200
+ (shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
+ dict(marker='p', end_marker='', states='listing',
+ limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
+ ]
+
+ resp = self._check_GET_shard_listing(
+ mock_responses, all_objects, expected_requests, memcache=True)
+ # root object count will be overridden by actual length of listing
+ self.check_response(resp, root_resp_hdrs,
+ exp_sharding_state='sharding')
+ self.assertIn('swift.cache', resp.request.environ)
+ self.assertNotIn('shard-listing/a/c',
+ resp.request.environ['swift.cache'].store)
+
+ def test_GET_sharded_container_gap_in_shards_memcache(self):
+ # verify ordered listing even if unexpected gap between shard ranges
+ shard_bounds = (('', 'ham'), ('onion', 'pie'), ('rhubarb', ''))
+ shard_ranges = [
+ ShardRange('.shards_a/c_' + upper, Timestamp.now(), lower, upper)
+ for lower, upper in shard_bounds]
+ sr_dicts = [dict(sr) for sr in shard_ranges]
+ sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
+ shard_resp_hdrs = [
+ {'X-Backend-Sharding-State': 'unsharded',
+ 'X-Container-Object-Count': len(sr_objs[i]),
+ 'X-Container-Bytes-Used':
+ sum([obj['bytes'] for obj in sr_objs[i]]),
+ 'X-Container-Meta-Flavour': 'flavour%d' % i,
+ 'X-Backend-Storage-Policy-Index': 0}
+ for i in range(3)]
+
+ all_objects = []
+ for objects in sr_objs:
+ all_objects.extend(objects)
+ size_all_objects = sum([obj['bytes'] for obj in all_objects])
+ num_all_objects = len(all_objects)
+ limit = CONTAINER_LISTING_LIMIT
+ root_resp_hdrs = {'X-Backend-Sharding-State': 'sharded',
+ 'X-Backend-Override-Shard-Name-Filter': 'true',
+ 'X-Backend-Timestamp': '99',
+ 'X-Container-Object-Count': num_all_objects,
+ 'X-Container-Bytes-Used': size_all_objects,
+ 'X-Container-Meta-Flavour': 'peach',
+ 'X-Backend-Storage-Policy-Index': 0}
+ root_shard_resp_hdrs = dict(root_resp_hdrs)
+ root_shard_resp_hdrs['X-Backend-Record-Type'] = 'shard'
+
+ mock_responses = [
+ # status, body, headers
+ (200, sr_dicts, root_shard_resp_hdrs),
+ (200, sr_objs[0], shard_resp_hdrs[0]),
+ (200, sr_objs[1], shard_resp_hdrs[1]),
+ (200, sr_objs[2], shard_resp_hdrs[2])
+ ]
+ # NB marker always advances to last object name
+ expected_requests = [
+ # path, headers, params
+ ('a/c', {'X-Backend-Record-Type': 'auto'},
+ dict(states='listing')), # 200
+ (shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
+ dict(marker='', end_marker='ham\x00', states='listing',
+ limit=str(limit))), # 200
+ (shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
+ dict(marker='h', end_marker='pie\x00', states='listing',
+ limit=str(limit - len(sr_objs[0])))), # 200
+ (shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
+ dict(marker='p', end_marker='', states='listing',
+ limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
+ ]
+
+ resp = self._check_GET_shard_listing(
+ mock_responses, all_objects, expected_requests, memcache=True)
+ # root object count will be overridden by actual length of listing
+ self.check_response(resp, root_resp_hdrs)
+ self.assertIn('swift.cache', resp.request.environ)
+ self.assertIn('shard-listing/a/c',
+ resp.request.environ['swift.cache'].store)
+ self.assertEqual(
+ sr_dicts,
+ resp.request.environ['swift.cache'].store['shard-listing/a/c'])
def test_GET_sharded_container_empty_shard(self):
# verify ordered listing when a shard is empty
diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py
index 5ac90171d..50b1fcd1d 100644
--- a/test/unit/proxy/test_server.py
+++ b/test/unit/proxy/test_server.py
@@ -71,7 +71,7 @@ from swift.common import utils, constraints, registry
from swift.common.utils import hash_path, storage_directory, \
parse_content_type, parse_mime_headers, StatsdClient, \
iter_multipart_mime_documents, public, mkdirs, NullLogger, md5, \
- node_to_string
+ node_to_string, NamespaceBoundList
from swift.common.wsgi import loadapp, ConfigString
from swift.common.http_protocol import SwiftHttpProtocol
from swift.proxy.controllers import base as proxy_base
@@ -513,7 +513,8 @@ class TestController(unittest.TestCase):
def test_get_account_info_returns_values_as_strings(self):
app = mock.MagicMock()
- app._pipeline_final_app.account_existence_skip_cache = 0.0
+ app._pipeline_final_app = app
+ app.account_existence_skip_cache = 0.0
memcache = mock.MagicMock()
memcache.get = mock.MagicMock()
memcache.get.return_value = {
@@ -539,7 +540,8 @@ class TestController(unittest.TestCase):
def test_get_container_info_returns_values_as_strings(self):
app = mock.MagicMock()
- app._pipeline_final_app.container_existence_skip_cache = 0.0
+ app._pipeline_final_app = app
+ app.container_existence_skip_cache = 0.0
memcache = mock.MagicMock()
memcache.get = mock.MagicMock()
memcache.get.return_value = {
@@ -4368,13 +4370,16 @@ class TestReplicatedObjectController(
params={'states': 'updating'},
headers={'X-Backend-Record-Type': 'shard'})
- cache_key = 'shard-updating/a/c'
+ cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ['swift.cache'].store)
- self.assertEqual(req.environ['swift.cache'].store[cache_key],
- [dict(sr) for sr in shard_ranges])
+ cached_namespaces = NamespaceBoundList.parse(shard_ranges)
+ self.assertEqual(
+ req.environ['swift.cache'].store[cache_key],
+ cached_namespaces.bounds)
self.assertIn(cache_key, req.environ.get('swift.infocache'))
- self.assertEqual(req.environ['swift.infocache'][cache_key],
- tuple(dict(sr) for sr in shard_ranges))
+ self.assertEqual(
+ req.environ['swift.infocache'][cache_key].bounds,
+ cached_namespaces.bounds)
# make sure backend requests included expected container headers
container_headers = {}
@@ -4431,8 +4436,11 @@ class TestReplicatedObjectController(
'.shards_a/c_nope', utils.Timestamp.now(), 'u', ''),
]
cache = FakeMemcache()
- cache.set('shard-updating/a/c', tuple(
- dict(shard_range) for shard_range in shard_ranges))
+ cache.set(
+ 'shard-updating-v2/a/c',
+ tuple(
+ [shard_range.lower_str, str(shard_range.name)]
+ for shard_range in shard_ranges))
req = Request.blank('/v1/a/c/o', {'swift.cache': cache},
method=method, body='',
headers={'Content-Type': 'text/plain'})
@@ -4465,10 +4473,11 @@ class TestReplicatedObjectController(
container_request, method='HEAD', path='/sda/0/a/c')
# infocache gets populated from memcache
- cache_key = 'shard-updating/a/c'
+ cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ.get('swift.infocache'))
- self.assertEqual(req.environ['swift.infocache'][cache_key],
- tuple(dict(sr) for sr in shard_ranges))
+ self.assertEqual(
+ req.environ['swift.infocache'][cache_key].bounds,
+ NamespaceBoundList.parse(shard_ranges).bounds)
# make sure backend requests included expected container headers
container_headers = {}
@@ -4525,8 +4534,8 @@ class TestReplicatedObjectController(
'.shards_a/c_nope', utils.Timestamp.now(), 'u', ''),
]
infocache = {
- 'shard-updating/a/c':
- tuple(dict(shard_range) for shard_range in shard_ranges)}
+ 'shard-updating-v2/a/c':
+ NamespaceBoundList.parse(shard_ranges)}
req = Request.blank('/v1/a/c/o', {'swift.infocache': infocache},
method=method, body='',
headers={'Content-Type': 'text/plain'})
@@ -4558,10 +4567,11 @@ class TestReplicatedObjectController(
container_request, method='HEAD', path='/sda/0/a/c')
# verify content in infocache.
- cache_key = 'shard-updating/a/c'
+ cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ.get('swift.infocache'))
- self.assertEqual(req.environ['swift.infocache'][cache_key],
- tuple(dict(sr) for sr in shard_ranges))
+ self.assertEqual(
+ req.environ['swift.infocache'][cache_key].bounds,
+ NamespaceBoundList.parse(shard_ranges).bounds)
# make sure backend requests included expected container headers
container_headers = {}
@@ -4619,8 +4629,10 @@ class TestReplicatedObjectController(
'.shards_a/c_no_way', utils.Timestamp.now(), 'u', ''),
]
cache = FakeMemcache()
- cache.set('shard-updating/a/c', tuple(
- dict(shard_range) for shard_range in cached_shard_ranges))
+ cache.set('shard-updating-v2/a/c',
+ tuple(
+ [sr.lower_str, str(sr.name)]
+ for sr in cached_shard_ranges))
# sanity check: we can get the old shard from cache
req = Request.blank(
@@ -4634,7 +4646,7 @@ class TestReplicatedObjectController(
'x-backend-sharding-state': sharding_state,
'X-Backend-Record-Type': 'shard'}
with mock.patch('random.random', return_value=1), \
- mocked_http_conn(*status_codes, headers=resp_headers):
+ mocked_http_conn(*status_codes, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
@@ -4644,13 +4656,16 @@ class TestReplicatedObjectController(
'object.shard_updating.cache.hit': 1}, stats)
# cached shard ranges are still there
- cache_key = 'shard-updating/a/c'
+ cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ['swift.cache'].store)
- self.assertEqual(req.environ['swift.cache'].store[cache_key],
- [dict(sr) for sr in cached_shard_ranges])
+ cached_namespaces = NamespaceBoundList.parse(cached_shard_ranges)
+ self.assertEqual(
+ req.environ['swift.cache'].store[cache_key],
+ cached_namespaces.bounds)
self.assertIn(cache_key, req.environ.get('swift.infocache'))
- self.assertEqual(req.environ['swift.infocache'][cache_key],
- tuple(dict(sr) for sr in cached_shard_ranges))
+ self.assertEqual(
+ req.environ['swift.infocache'][cache_key].bounds,
+ cached_namespaces.bounds)
# ...but we have some chance to skip cache
req = Request.blank(
@@ -4673,8 +4688,8 @@ class TestReplicatedObjectController(
dict(shard_range)
for shard_range in shard_ranges]).encode('ascii')
with mock.patch('random.random', return_value=0), \
- mocked_http_conn(*status_codes, headers=resp_headers,
- body=body) as fake_conn:
+ mocked_http_conn(*status_codes, headers=resp_headers,
+ body=body) as fake_conn:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
@@ -4696,13 +4711,16 @@ class TestReplicatedObjectController(
headers={'X-Backend-Record-Type': 'shard'})
# and skipping cache will refresh it
- cache_key = 'shard-updating/a/c'
+ cache_key = 'shard-updating-v2/a/c'
self.assertIn(cache_key, req.environ['swift.cache'].store)
- self.assertEqual(req.environ['swift.cache'].store[cache_key],
- [dict(sr) for sr in shard_ranges])
+ cached_namespaces = NamespaceBoundList.parse(shard_ranges)
+ self.assertEqual(
+ req.environ['swift.cache'].store[cache_key],
+ cached_namespaces.bounds)
self.assertIn(cache_key, req.environ.get('swift.infocache'))
- self.assertEqual(req.environ['swift.infocache'][cache_key],
- tuple(dict(sr) for sr in shard_ranges))
+ self.assertEqual(
+ req.environ['swift.infocache'][cache_key].bounds,
+ cached_namespaces.bounds)
# make sure backend requests included expected container headers
container_headers = {}
@@ -4803,7 +4821,7 @@ class TestReplicatedObjectController(
headers={'X-Backend-Record-Type': 'shard'})
# infocache does not get populated from memcache
- cache_key = 'shard-updating/a/c'
+ cache_key = 'shard-updating-v2/a/c'
self.assertNotIn(cache_key, req.environ.get('swift.infocache'))
# make sure backend requests included expected container headers