summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml4
-rw-r--r--doc/manpages/account-server.conf.52
-rw-r--r--doc/manpages/container-reconciler.conf.52
-rw-r--r--doc/manpages/container-server.conf.52
-rw-r--r--doc/manpages/container-sync-realms.conf.52
-rw-r--r--doc/manpages/object-expirer.conf.52
-rw-r--r--doc/manpages/object-server.conf.52
-rw-r--r--doc/manpages/proxy-server.conf.52
-rw-r--r--doc/manpages/swift.conf.52
-rw-r--r--doc/source/crossdomain.rst30
-rw-r--r--doc/source/development_middleware.rst4
-rw-r--r--doc/source/getting_started.rst6
-rw-r--r--doc/source/howto_installmultinode.rst15
-rw-r--r--doc/source/index.rst1
-rw-r--r--etc/internal-client.conf-sample1
-rw-r--r--etc/proxy-server.conf-sample6
-rw-r--r--swift/cli/info.py3
-rw-r--r--swift/cli/ringbuilder.py6
-rw-r--r--swift/common/daemon.py10
-rw-r--r--swift/common/internal_client.py27
-rw-r--r--swift/common/memcached.py114
-rw-r--r--swift/common/middleware/backend_ratelimit.py10
-rw-r--r--swift/common/middleware/crossdomain.py29
-rw-r--r--swift/common/ring/ring.py41
-rw-r--r--swift/common/utils/__init__.py1013
-rw-r--r--swift/common/utils/libc.py487
-rw-r--r--swift/common/utils/timestamp.py399
-rw-r--r--swift/common/wsgi.py13
-rw-r--r--swift/container/backend.py6
-rw-r--r--swift/container/sharder.py20
-rw-r--r--swift/obj/diskfile.py23
-rw-r--r--swift/obj/ssync_receiver.py16
-rw-r--r--swift/obj/ssync_sender.py4
-rw-r--r--swift/proxy/controllers/base.py23
-rw-r--r--swift/proxy/controllers/container.py140
-rw-r--r--swift/proxy/controllers/obj.py311
-rw-r--r--test/probe/test_object_versioning.py99
-rw-r--r--test/unit/__init__.py44
-rw-r--r--test/unit/cli/test_info.py10
-rw-r--r--test/unit/common/ring/test_ring.py16
-rw-r--r--test/unit/common/test_daemon.py126
-rw-r--r--test/unit/common/test_internal_client.py82
-rw-r--r--test/unit/common/test_memcached.py87
-rw-r--r--test/unit/common/test_utils.py1567
-rw-r--r--test/unit/common/test_wsgi.py121
-rw-r--r--test/unit/common/utils/__init__.py0
-rw-r--r--test/unit/common/utils/test_libc.py599
-rw-r--r--test/unit/common/utils/test_timestamp.py882
-rw-r--r--test/unit/container/test_sharder.py88
-rw-r--r--test/unit/helpers.py1
-rw-r--r--test/unit/obj/test_diskfile.py291
-rw-r--r--test/unit/obj/test_ssync.py32
-rw-r--r--test/unit/obj/test_ssync_receiver.py18
-rw-r--r--test/unit/obj/test_ssync_sender.py45
-rw-r--r--test/unit/obj/test_updater.py3
-rw-r--r--test/unit/proxy/controllers/test_base.py68
-rw-r--r--test/unit/proxy/controllers/test_container.py151
-rw-r--r--test/unit/proxy/controllers/test_obj.py61
-rw-r--r--test/unit/proxy/test_server.py95
-rw-r--r--tox.ini7
60 files changed, 4433 insertions, 2838 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index dacda2200..16e5fd2e2 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -526,13 +526,13 @@
- job:
name: swift-tox-lower-constraints
parent: openstack-tox-lower-constraints
+ # This seems defensible for a l-c job
+ nodeset: ubuntu-jammy
vars:
bindep_profile: test py27
python_version: 2.7
tox_environment:
TMPDIR: '{{ ansible_env.HOME }}/xfstmp'
- # This seems defensible for a l-c job
- ensure_tox_version: '<4'
# Image building jobs
- secret:
diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5
index 53c3cc27d..c4caa9837 100644
--- a/doc/manpages/account-server.conf.5
+++ b/doc/manpages/account-server.conf.5
@@ -42,7 +42,7 @@ certain number of key/value parameters which are described later.
Any line that begins with a '#' symbol is ignored.
You can find more information about python-pastedeploy configuration format at
-\fIhttp://pythonpaste.org/deploy/#config-format\fR
+\fIhttps://docs.pylonsproject.org/projects/pastedeploy/en/latest/#config-format\fR
diff --git a/doc/manpages/container-reconciler.conf.5 b/doc/manpages/container-reconciler.conf.5
index 3c2333d09..79797b649 100644
--- a/doc/manpages/container-reconciler.conf.5
+++ b/doc/manpages/container-reconciler.conf.5
@@ -39,7 +39,7 @@ certain number of key/value parameters which are described later.
Any line that begins with a '#' symbol is ignored.
You can find more information about python-pastedeploy configuration format at
-\fIhttp://pythonpaste.org/deploy/#config-format\fR
+\fIhttps://docs.pylonsproject.org/projects/pastedeploy/en/latest/#config-format\fR
.SH GLOBAL SECTION
diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5
index d0b1778cc..151d03596 100644
--- a/doc/manpages/container-server.conf.5
+++ b/doc/manpages/container-server.conf.5
@@ -42,7 +42,7 @@ certain number of key/value parameters which are described later.
Any line that begins with a '#' symbol is ignored.
You can find more information about python-pastedeploy configuration format at
-\fIhttp://pythonpaste.org/deploy/#config-format\fR
+\fIhttps://docs.pylonsproject.org/projects/pastedeploy/en/latest/#config-format\fR
diff --git a/doc/manpages/container-sync-realms.conf.5 b/doc/manpages/container-sync-realms.conf.5
index 6602615aa..e96b40011 100644
--- a/doc/manpages/container-sync-realms.conf.5
+++ b/doc/manpages/container-sync-realms.conf.5
@@ -47,7 +47,7 @@ certain number of key/value parameters which are described later.
Any line that begins with a '#' symbol is ignored.
You can find more information about python-pastedeploy configuration format at
-\fIhttp://pythonpaste.org/deploy/#config-format\fR
+\fIhttps://docs.pylonsproject.org/projects/pastedeploy/en/latest/#config-format\fR
diff --git a/doc/manpages/object-expirer.conf.5 b/doc/manpages/object-expirer.conf.5
index 2ee94ec85..a822e563b 100644
--- a/doc/manpages/object-expirer.conf.5
+++ b/doc/manpages/object-expirer.conf.5
@@ -43,7 +43,7 @@ certain number of key/value parameters which are described later.
Any line that begins with a '#' symbol is ignored.
You can find more information about python-pastedeploy configuration format at
-\fIhttp://pythonpaste.org/deploy/#config-format\fR
+\fIhttps://docs.pylonsproject.org/projects/pastedeploy/en/latest/#config-format\fR
diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5
index 7150c6c91..3d37af4da 100644
--- a/doc/manpages/object-server.conf.5
+++ b/doc/manpages/object-server.conf.5
@@ -43,7 +43,7 @@ certain number of key/value parameters which are described later.
Any line that begins with a '#' symbol is ignored.
You can find more information about python-pastedeploy configuration format at
-\fIhttp://pythonpaste.org/deploy/#config-format\fR
+\fIhttps://docs.pylonsproject.org/projects/pastedeploy/en/latest/#config-format\fR
diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5
index 1c03197ea..adeeb6d81 100644
--- a/doc/manpages/proxy-server.conf.5
+++ b/doc/manpages/proxy-server.conf.5
@@ -41,7 +41,7 @@ certain number of key/value parameters which are described later.
Any line that begins with a '#' symbol is ignored.
You can find more information about python-pastedeploy configuration format at
-\fIhttp://pythonpaste.org/deploy/#config-format\fR
+\fIhttps://docs.pylonsproject.org/projects/pastedeploy/en/latest/#config-format\fR
diff --git a/doc/manpages/swift.conf.5 b/doc/manpages/swift.conf.5
index 87659b175..b750cfdd4 100644
--- a/doc/manpages/swift.conf.5
+++ b/doc/manpages/swift.conf.5
@@ -43,7 +43,7 @@ later.
Any line that begins with a '#' symbol is ignored.
You can find more information about python-pastedeploy configuration format at
-\fIhttp://pythonpaste.org/deploy/#config-format\fR
+\fIhttps://docs.pylonsproject.org/projects/pastedeploy/en/latest/#config-format\fR
diff --git a/doc/source/crossdomain.rst b/doc/source/crossdomain.rst
index 3ea578eb5..d2d55facc 100644
--- a/doc/source/crossdomain.rst
+++ b/doc/source/crossdomain.rst
@@ -9,10 +9,12 @@ with the Swift API.
See http://www.adobe.com/devnet/articles/crossdomain_policy_file_spec.html for
a description of the purpose and structure of the cross-domain policy
file. The cross-domain policy file is installed in the root of a web
-server (i.e., the path is /crossdomain.xml).
+server (i.e., the path is ``/crossdomain.xml``).
-The crossdomain middleware responds to a path of /crossdomain.xml with an
-XML document such as::
+The crossdomain middleware responds to a path of ``/crossdomain.xml`` with an
+XML document such as:
+
+.. code:: xml
<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd" >
@@ -31,12 +33,16 @@ Configuration
To enable this middleware, add it to the pipeline in your proxy-server.conf
file. It should be added before any authentication (e.g., tempauth or
keystone) middleware. In this example ellipsis (...) indicate other
-middleware you may have chosen to use::
+middleware you may have chosen to use:
+
+.. code:: cfg
[pipeline:main]
pipeline = ... crossdomain ... authtoken ... proxy-server
-And add a filter section, such as::
+And add a filter section, such as:
+
+.. code:: cfg
[filter:crossdomain]
use = egg:swift#crossdomain
@@ -45,11 +51,19 @@ And add a filter section, such as::
For continuation lines, put some whitespace before the continuation
text. Ensure you put a completely blank line to terminate the
-cross_domain_policy value.
+``cross_domain_policy`` value.
-The cross_domain_policy name/value is optional. If omitted, the policy
-defaults as if you had specified::
+The ``cross_domain_policy`` name/value is optional. If omitted, the policy
+defaults as if you had specified:
+
+.. code:: cfg
cross_domain_policy = <allow-access-from domain="*" secure="false" />
+.. note::
+
+ The default policy is very permissive; this is appropriate
+ for most public cloud deployments, but may not be appropriate
+ for all deployments. See also:
+ `CWE-942 <https://cwe.mitre.org/data/definitions/942.html>`__
diff --git a/doc/source/development_middleware.rst b/doc/source/development_middleware.rst
index 774dab518..2e14e705c 100644
--- a/doc/source/development_middleware.rst
+++ b/doc/source/development_middleware.rst
@@ -18,7 +18,7 @@ Middleware can be added to the Swift WSGI servers by modifying their
`paste`_ configuration file. The majority of Swift middleware is applied
to the :ref:`proxy-server`.
-.. _paste: http://pythonpaste.org/
+.. _paste: https://pypi.org/project/Paste/
Given the following basic configuration::
@@ -172,7 +172,7 @@ documentation for more information about the syntax of the ``use`` option.
All middleware included with Swift is installed to support the ``egg:swift``
syntax.
-.. _PasteDeploy: http://pythonpaste.org/deploy/#egg-uris
+.. _PasteDeploy: https://pypi.org/project/PasteDeploy/
Middleware may advertize its availability and capabilities via Swift's
:ref:`discoverability` support by using
diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst
index 284e338ac..75b387b85 100644
--- a/doc/source/getting_started.rst
+++ b/doc/source/getting_started.rst
@@ -6,12 +6,12 @@ Getting Started
System Requirements
-------------------
-Swift development currently targets Ubuntu Server 16.04, but should work on
+Swift development currently targets Ubuntu Server 22.04, but should work on
most Linux platforms.
Swift is written in Python and has these dependencies:
-* Python (2.7 or 3.6-3.9)
+* Python (2.7 or 3.6-3.10)
* rsync 3.x
* `liberasurecode <https://opendev.org/openstack/liberasurecode/>`__
* The Python packages listed in `the requirements file <https://github.com/openstack/swift/blob/master/requirements.txt>`__
@@ -47,4 +47,4 @@ Production
If you want to set up and configure Swift for a production cluster, the
following doc should be useful:
-* :doc:`Multiple Server Swift Installation <howto_installmultinode>`
+* :doc:`install/index`
diff --git a/doc/source/howto_installmultinode.rst b/doc/source/howto_installmultinode.rst
deleted file mode 100644
index bbdd8f1c5..000000000
--- a/doc/source/howto_installmultinode.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-=====================================================
-Instructions for a Multiple Server Swift Installation
-=====================================================
-
-Please refer to the latest official
-`OpenStack Installation Guides <https://docs.openstack.org/latest/install/>`_
-for the most up-to-date documentation.
-
-Current Install Guides
-----------------------
-
-* `Object Storage installation guide for OpenStack Ocata
- <https://docs.openstack.org/project-install-guide/object-storage/ocata/>`__
-* `Object Storage installation guide for OpenStack Newton
- <https://docs.openstack.org/project-install-guide/object-storage/newton/>`__
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f1b6bee6f..41ec26e80 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -96,7 +96,6 @@ Administrator Documentation
.. toctree::
:maxdepth: 1
- howto_installmultinode
deployment_guide
apache_deployment_guide
admin_guide
diff --git a/etc/internal-client.conf-sample b/etc/internal-client.conf-sample
index 7ded5fd8a..d9ed5e24b 100644
--- a/etc/internal-client.conf-sample
+++ b/etc/internal-client.conf-sample
@@ -26,6 +26,7 @@
# log_statsd_metric_prefix =
[pipeline:main]
+# Note: gatekeeper middleware is not allowed in the internal client pipeline
pipeline = catch_errors proxy-logging cache symlink proxy-server
[app:proxy-server]
diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample
index d893ff8d7..c47b0cdb2 100644
--- a/etc/proxy-server.conf-sample
+++ b/etc/proxy-server.conf-sample
@@ -85,8 +85,14 @@ bind_port = 8080
# CORS documentation).
# cors_expose_headers =
#
+# General timeout when sending to or receiving from clients.
# client_timeout = 60.0
#
+# Timeout to use when looking for pipelined requests. Set to zero to disable
+# request pipelining. Defaults to client_timeout. Requires eventlet>=0.33.4;
+# with earlier eventlet, any non-zero value is treated as client_timeout.
+# keepalive_timeout =
+#
# Note: enabling evenlet_debug might reveal sensitive information, for example
# signatures for temp urls
# eventlet_debug = false
diff --git a/swift/cli/info.py b/swift/cli/info.py
index 7826a17b8..d99fb3b19 100644
--- a/swift/cli/info.py
+++ b/swift/cli/info.py
@@ -30,6 +30,7 @@ from swift.container.backend import ContainerBroker, DATADIR as CBDATADIR
from swift.obj.diskfile import get_data_dir, read_metadata, DATADIR_BASE, \
extract_policy
from swift.common.storage_policy import POLICIES
+from swift.common.swob import wsgi_to_str
from swift.common.middleware.crypto.crypto_utils import load_crypto_meta
from swift.common.utils import md5
@@ -537,6 +538,8 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
except EOFError:
print("Invalid metadata")
raise InfoSystemExit()
+ metadata = {wsgi_to_str(k): v if k == 'name' else wsgi_to_str(v)
+ for k, v in metadata.items()}
etag = metadata.pop('ETag', '')
length = metadata.pop('Content-Length', '')
diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py
index 001919d52..62b956023 100644
--- a/swift/cli/ringbuilder.py
+++ b/swift/cli/ringbuilder.py
@@ -194,7 +194,11 @@ def check_devs(devs, input_question, opts, abort_msg):
print('Matched more than one device:')
for dev in devs:
print(' %s' % format_device(dev))
- if not opts.yes and input(input_question) != 'y':
+ try:
+ abort = not opts.yes and input(input_question) != 'y'
+ except (EOFError, KeyboardInterrupt):
+ abort = True
+ if abort:
print(abort_msg)
exit(EXIT_ERROR)
diff --git a/swift/common/daemon.py b/swift/common/daemon.py
index 59a661189..300710e98 100644
--- a/swift/common/daemon.py
+++ b/swift/common/daemon.py
@@ -20,8 +20,8 @@ import time
import signal
from re import sub
+import eventlet
import eventlet.debug
-from eventlet.hubs import use_hub
from swift.common import utils
@@ -281,7 +281,9 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
# and results in an exit code of 1.
sys.exit(e)
- use_hub(utils.get_hub())
+ # patch eventlet/logging early
+ utils.monkey_patch()
+ eventlet.hubs.use_hub(utils.get_hub())
# once on command line (i.e. daemonize=false) will over-ride config
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
@@ -315,7 +317,9 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
logger.notice('Starting %s', os.getpid())
try:
- DaemonStrategy(klass(conf), logger).run(once=once, **kwargs)
+ d = klass(conf)
+ DaemonStrategy(d, logger).run(once=once, **kwargs)
except KeyboardInterrupt:
logger.info('User quit')
logger.notice('Exited %s', os.getpid())
+ return d
diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py
index 2c1c99cc0..fc5242ae8 100644
--- a/swift/common/internal_client.py
+++ b/swift/common/internal_client.py
@@ -28,6 +28,7 @@ from zlib import compressobj
from swift.common.exceptions import ClientException
from swift.common.http import (HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES,
is_client_error, is_server_error)
+from swift.common.middleware.gatekeeper import GatekeeperMiddleware
from swift.common.request_helpers import USE_REPLICATION_NETWORK_HEADER
from swift.common.swob import Request, bytes_to_wsgi
from swift.common.utils import quote, close_if_possible, drain_and_close
@@ -144,6 +145,8 @@ class InternalClient(object):
:param user_agent: User agent to be sent to requests to Swift.
:param request_tries: Number of tries before InternalClient.make_request()
gives up.
+ :param use_replication_network: Force the client to use the replication
+ network over the cluster.
:param global_conf: a dict of options to update the loaded proxy config.
Options in ``global_conf`` will override those in ``conf_path`` except
where the ``conf_path`` option is preceded by ``set``.
@@ -151,12 +154,17 @@ class InternalClient(object):
"""
def __init__(self, conf_path, user_agent, request_tries,
- allow_modify_pipeline=False, use_replication_network=False,
- global_conf=None, app=None):
+ use_replication_network=False, global_conf=None, app=None,
+ **kwargs):
if request_tries < 1:
raise ValueError('request_tries must be positive')
+ # Internal clients don't use the gatekeeper and the pipeline remains
+ # static so we never allow anything to modify the proxy pipeline.
+ if kwargs.get('allow_modify_pipeline'):
+ raise ValueError("'allow_modify_pipeline' is no longer supported")
self.app = app or loadapp(conf_path, global_conf=global_conf,
- allow_modify_pipeline=allow_modify_pipeline,)
+ allow_modify_pipeline=False,)
+ self.check_gatekeeper_not_loaded(self.app)
self.user_agent = \
self.app._pipeline_final_app.backend_user_agent = user_agent
self.request_tries = request_tries
@@ -167,6 +175,19 @@ class InternalClient(object):
self.auto_create_account_prefix = \
self.app._pipeline_final_app.auto_create_account_prefix
+ @staticmethod
+ def check_gatekeeper_not_loaded(app):
+ # the Gatekeeper middleware would prevent an InternalClient passing
+ # X-Backend-* headers to the proxy app, so ensure it's not present
+ try:
+ for app in app._pipeline:
+ if isinstance(app, GatekeeperMiddleware):
+ raise ValueError(
+ "Gatekeeper middleware is not allowed in the "
+ "InternalClient proxy pipeline")
+ except AttributeError:
+ pass
+
def make_request(
self, method, path, headers, acceptable_statuses, body_file=None,
params=None):
diff --git a/swift/common/memcached.py b/swift/common/memcached.py
index 74ec8efc7..22ec81c71 100644
--- a/swift/common/memcached.py
+++ b/swift/common/memcached.py
@@ -117,6 +117,13 @@ def set_msg(key, flags, timeout, value):
]) + (b'\r\n' + value + b'\r\n')
+# get the prefix of a user provided memcache key by removing the content after
+# the last '/', all current usages within swift are using prefix, such as
+# "shard-updating-v2", "nvratelimit" and etc.
+def get_key_prefix(key):
+ return key.rsplit('/', 1)[0]
+
+
class MemcacheConnectionError(Exception):
pass
@@ -216,18 +223,24 @@ class MemcacheRing(object):
def memcache_servers(self):
return list(self._client_cache.keys())
- def _exception_occurred(self, server, e, action='talking',
+ def _exception_occurred(self, server, e, key_prefix, action='talking',
sock=None, fp=None, got_connection=True):
if isinstance(e, Timeout):
- self.logger.error("Timeout %(action)s to memcached: %(server)s",
- {'action': action, 'server': server})
+ self.logger.error(
+ "Timeout %(action)s to memcached: %(server)s"
+ ": with key_prefix %(key_prefix)s",
+ {'action': action, 'server': server, 'key_prefix': key_prefix})
elif isinstance(e, (socket.error, MemcacheConnectionError)):
self.logger.error(
- "Error %(action)s to memcached: %(server)s: %(err)s",
- {'action': action, 'server': server, 'err': e})
+ "Error %(action)s to memcached: %(server)s: "
+ "with key_prefix %(key_prefix)s: %(err)s",
+ {'action': action, 'server': server, 'err': e,
+ 'key_prefix': key_prefix})
else:
- self.logger.exception("Error %(action)s to memcached: %(server)s",
- {'action': action, 'server': server})
+ self.logger.exception("Error %(action)s to memcached: %(server)s"
+ ": with key_prefix %(key_prefix)s",
+ {'action': action, 'server': server,
+ 'key_prefix': key_prefix})
try:
if fp:
fp.close()
@@ -257,14 +270,17 @@ class MemcacheRing(object):
self._error_limited[server] = now + self._error_limit_duration
self.logger.error('Error limiting server %s', server)
- def _get_conns(self, key):
+ def _get_conns(self, key_prefix, hash_key):
"""
Retrieves a server conn from the pool, or connects a new one.
Chooses the server based on a consistent hash of "key".
+ :param key_prefix: the prefix of user provided key.
+ :param hash_key: the consistent hash of user key, or server key for
+ set_multi and get_multi.
:return: generator to serve memcached connection
"""
- pos = bisect(self._sorted, key)
+ pos = bisect(self._sorted, hash_key)
served = []
any_yielded = False
while len(served) < self._tries:
@@ -283,14 +299,14 @@ class MemcacheRing(object):
yield server, fp, sock
except MemcachePoolTimeout as e:
self._exception_occurred(
- server, e, action='getting a connection',
+ server, e, key_prefix, action='getting a connection',
got_connection=False)
except (Exception, Timeout) as e:
# Typically a Timeout exception caught here is the one raised
# by the create() method of this server's MemcacheConnPool
# object.
self._exception_occurred(
- server, e, action='connecting', sock=sock)
+ server, e, key_prefix, action='connecting', sock=sock)
if not any_yielded:
self.logger.error('All memcached servers error-limited')
@@ -318,7 +334,8 @@ class MemcacheRing(object):
:param raise_on_error: if True, propagate Timeouts and other errors.
By default, errors are ignored.
"""
- key = md5hash(key)
+ key_prefix = get_key_prefix(key)
+ hash_key = md5hash(key)
timeout = sanitize_timeout(time)
flags = 0
if serialize:
@@ -329,10 +346,10 @@ class MemcacheRing(object):
elif not isinstance(value, bytes):
value = str(value).encode('utf-8')
- for (server, fp, sock) in self._get_conns(key):
+ for (server, fp, sock) in self._get_conns(key_prefix, hash_key):
try:
with Timeout(self._io_timeout):
- sock.sendall(set_msg(key, flags, timeout, value))
+ sock.sendall(set_msg(hash_key, flags, timeout, value))
# Wait for the set to complete
msg = fp.readline().strip()
if msg != b'STORED':
@@ -352,7 +369,8 @@ class MemcacheRing(object):
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
- self._exception_occurred(server, e, sock=sock, fp=fp)
+ self._exception_occurred(
+ server, e, key_prefix, sock=sock, fp=fp)
if raise_on_error:
raise MemcacheConnectionError(
"No memcached connections succeeded.")
@@ -368,19 +386,20 @@ class MemcacheRing(object):
By default, errors are treated as cache misses.
:returns: value of the key in memcache
"""
- key = md5hash(key)
+ key_prefix = get_key_prefix(key)
+ hash_key = md5hash(key)
value = None
- for (server, fp, sock) in self._get_conns(key):
+ for (server, fp, sock) in self._get_conns(key_prefix, hash_key):
try:
with Timeout(self._io_timeout):
- sock.sendall(b'get ' + key + b'\r\n')
+ sock.sendall(b'get ' + hash_key + b'\r\n')
line = fp.readline().strip().split()
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == b'END':
break
- if line[0].upper() == b'VALUE' and line[1] == key:
+ if line[0].upper() == b'VALUE' and line[1] == hash_key:
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
@@ -392,7 +411,8 @@ class MemcacheRing(object):
self._return_conn(server, fp, sock)
return value
except (Exception, Timeout) as e:
- self._exception_occurred(server, e, sock=sock, fp=fp)
+ self._exception_occurred(
+ server, e, key_prefix, sock=sock, fp=fp)
if raise_on_error:
raise MemcacheConnectionError(
"No memcached connections succeeded.")
@@ -415,17 +435,18 @@ class MemcacheRing(object):
:returns: result of incrementing
:raises MemcacheConnectionError:
"""
- key = md5hash(key)
+ key_prefix = get_key_prefix(key)
+ hash_key = md5hash(key)
command = b'incr'
if delta < 0:
command = b'decr'
delta = str(abs(int(delta))).encode('ascii')
timeout = sanitize_timeout(time)
- for (server, fp, sock) in self._get_conns(key):
+ for (server, fp, sock) in self._get_conns(key_prefix, hash_key):
try:
with Timeout(self._io_timeout):
sock.sendall(b' '.join([
- command, key, delta]) + b'\r\n')
+ command, hash_key, delta]) + b'\r\n')
line = fp.readline().strip().split()
if not line:
raise MemcacheConnectionError('incomplete read')
@@ -433,14 +454,16 @@ class MemcacheRing(object):
add_val = delta
if command == b'decr':
add_val = b'0'
- sock.sendall(b' '.join([
- b'add', key, b'0', str(timeout).encode('ascii'),
- str(len(add_val)).encode('ascii')
- ]) + b'\r\n' + add_val + b'\r\n')
+ sock.sendall(
+ b' '.join(
+ [b'add', hash_key, b'0', str(timeout).encode(
+ 'ascii'),
+ str(len(add_val)).encode('ascii')
+ ]) + b'\r\n' + add_val + b'\r\n')
line = fp.readline().strip().split()
if line[0].upper() == b'NOT_STORED':
sock.sendall(b' '.join([
- command, key, delta]) + b'\r\n')
+ command, hash_key, delta]) + b'\r\n')
line = fp.readline().strip().split()
ret = int(line[0].strip())
else:
@@ -450,7 +473,8 @@ class MemcacheRing(object):
self._return_conn(server, fp, sock)
return ret
except (Exception, Timeout) as e:
- self._exception_occurred(server, e, sock=sock, fp=fp)
+ self._exception_occurred(
+ server, e, key_prefix, sock=sock, fp=fp)
raise MemcacheConnectionError("No Memcached connections succeeded.")
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_LOW)
@@ -478,18 +502,20 @@ class MemcacheRing(object):
:param server_key: key to use in determining which server in the ring
is used
"""
- key = md5hash(key)
- server_key = md5hash(server_key) if server_key else key
- for (server, fp, sock) in self._get_conns(server_key):
+ key_prefix = get_key_prefix(key)
+ hash_key = md5hash(key)
+ server_key = md5hash(server_key) if server_key else hash_key
+ for (server, fp, sock) in self._get_conns(key_prefix, server_key):
try:
with Timeout(self._io_timeout):
- sock.sendall(b'delete ' + key + b'\r\n')
+ sock.sendall(b'delete ' + hash_key + b'\r\n')
# Wait for the delete to complete
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
- self._exception_occurred(server, e, sock=sock, fp=fp)
+ self._exception_occurred(
+ server, e, key_prefix, sock=sock, fp=fp)
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_HIGH)
def set_multi(self, mapping, server_key, serialize=True, time=0,
@@ -508,7 +534,8 @@ class MemcacheRing(object):
python-memcached interface. This implementation
ignores it
"""
- server_key = md5hash(server_key)
+ key_prefix = get_key_prefix(server_key)
+ hash_key = md5hash(server_key)
timeout = sanitize_timeout(time)
msg = []
for key, value in mapping.items():
@@ -520,7 +547,7 @@ class MemcacheRing(object):
value = json.dumps(value).encode('ascii')
flags |= JSON_FLAG
msg.append(set_msg(key, flags, timeout, value))
- for (server, fp, sock) in self._get_conns(server_key):
+ for (server, fp, sock) in self._get_conns(key_prefix, hash_key):
try:
with Timeout(self._io_timeout):
sock.sendall(b''.join(msg))
@@ -530,7 +557,8 @@ class MemcacheRing(object):
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
- self._exception_occurred(server, e, sock=sock, fp=fp)
+ self._exception_occurred(
+ server, e, key_prefix, sock=sock, fp=fp)
@memcached_timing_stats(sample_rate=TIMING_SAMPLE_RATE_HIGH)
def get_multi(self, keys, server_key):
@@ -542,12 +570,13 @@ class MemcacheRing(object):
is used
:returns: list of values
"""
+ key_prefix = get_key_prefix(server_key)
server_key = md5hash(server_key)
- keys = [md5hash(key) for key in keys]
- for (server, fp, sock) in self._get_conns(server_key):
+ hash_keys = [md5hash(key) for key in keys]
+ for (server, fp, sock) in self._get_conns(key_prefix, server_key):
try:
with Timeout(self._io_timeout):
- sock.sendall(b'get ' + b' '.join(keys) + b'\r\n')
+ sock.sendall(b'get ' + b' '.join(hash_keys) + b'\r\n')
line = fp.readline().strip().split()
responses = {}
while True:
@@ -566,7 +595,7 @@ class MemcacheRing(object):
fp.readline()
line = fp.readline().strip().split()
values = []
- for key in keys:
+ for key in hash_keys:
if key in responses:
values.append(responses[key])
else:
@@ -574,7 +603,8 @@ class MemcacheRing(object):
self._return_conn(server, fp, sock)
return values
except (Exception, Timeout) as e:
- self._exception_occurred(server, e, sock=sock, fp=fp)
+ self._exception_occurred(
+ server, e, key_prefix, sock=sock, fp=fp)
def load_memcache(conf, logger):
diff --git a/swift/common/middleware/backend_ratelimit.py b/swift/common/middleware/backend_ratelimit.py
index 980e9edc4..b4922005f 100644
--- a/swift/common/middleware/backend_ratelimit.py
+++ b/swift/common/middleware/backend_ratelimit.py
@@ -17,7 +17,8 @@ import time
from collections import defaultdict
from swift.common.request_helpers import split_and_validate_path
-from swift.common.swob import Request, HTTPTooManyBackendRequests
+from swift.common.swob import Request, HTTPTooManyBackendRequests, \
+ HTTPException
from swift.common.utils import get_logger, non_negative_float, \
EventletRateLimiter
@@ -66,13 +67,14 @@ class BackendRateLimitMiddleware(object):
try:
device, partition, _ = split_and_validate_path(req, 1, 3, True)
int(partition) # check it's a valid partition
+ except (ValueError, HTTPException):
+ # request may not have device/partition e.g. a healthcheck req
+ pass
+ else:
rate_limiter = self.rate_limiters[device]
if not rate_limiter.is_allowed():
self.logger.increment('backend.ratelimit')
handler = HTTPTooManyBackendRequests()
- except Exception: # noqa
- # request may not have device/partition e.g. a healthcheck req
- pass
return handler(env, start_response)
diff --git a/swift/common/middleware/crossdomain.py b/swift/common/middleware/crossdomain.py
index ffe73d43f..c15e52454 100644
--- a/swift/common/middleware/crossdomain.py
+++ b/swift/common/middleware/crossdomain.py
@@ -23,20 +23,24 @@ class CrossDomainMiddleware(object):
Cross domain middleware used to respond to requests for cross domain
policy information.
- If the path is /crossdomain.xml it will respond with an xml cross domain
- policy document. This allows web pages hosted elsewhere to use client
- side technologies such as Flash, Java and Silverlight to interact
+ If the path is ``/crossdomain.xml`` it will respond with an xml cross
+ domain policy document. This allows web pages hosted elsewhere to use
+ client side technologies such as Flash, Java and Silverlight to interact
with the Swift API.
To enable this middleware, add it to the pipeline in your proxy-server.conf
file. It should be added before any authentication (e.g., tempauth or
keystone) middleware. In this example ellipsis (...) indicate other
- middleware you may have chosen to use::
+ middleware you may have chosen to use:
+
+ .. code:: cfg
[pipeline:main]
pipeline = ... crossdomain ... authtoken ... proxy-server
- And add a filter section, such as::
+ And add a filter section, such as:
+
+ .. code:: cfg
[filter:crossdomain]
use = egg:swift#crossdomain
@@ -45,13 +49,22 @@ class CrossDomainMiddleware(object):
For continuation lines, put some whitespace before the continuation
text. Ensure you put a completely blank line to terminate the
- cross_domain_policy value.
+ ``cross_domain_policy`` value.
+
+ The ``cross_domain_policy`` name/value is optional. If omitted, the policy
+ defaults as if you had specified:
- The cross_domain_policy name/value is optional. If omitted, the policy
- defaults as if you had specified::
+ .. code:: cfg
cross_domain_policy = <allow-access-from domain="*" secure="false" />
+ .. note::
+
+ The default policy is very permissive; this is appropriate
+ for most public cloud deployments, but may not be appropriate
+ for all deployments. See also:
+ `CWE-942 <https://cwe.mitre.org/data/definitions/942.html>`__
+
"""
diff --git a/swift/common/ring/ring.py b/swift/common/ring/ring.py
index 98bc591f0..c3f726df6 100644
--- a/swift/common/ring/ring.py
+++ b/swift/common/ring/ring.py
@@ -48,6 +48,23 @@ def calc_replica_count(replica2part2dev_id):
return base + extra
+def normalize_devices(devs):
+ # NOTE(akscram): Replication parameters like replication_ip
+ # and replication_port are required for
+ # replication process. An old replication
+ # ring doesn't contain this parameters into
+ # device. Old-style pickled rings won't have
+ # region information.
+ for dev in devs:
+ if dev is None:
+ continue
+ dev.setdefault('region', 1)
+ if 'ip' in dev:
+ dev.setdefault('replication_ip', dev['ip'])
+ if 'port' in dev:
+ dev.setdefault('replication_port', dev['port'])
+
+
class RingReader(object):
chunk_size = 2 ** 16
@@ -118,6 +135,7 @@ class RingData(object):
def __init__(self, replica2part2dev_id, devs, part_shift,
next_part_power=None, version=None):
+ normalize_devices(devs)
self.devs = devs
self._replica2part2dev_id = replica2part2dev_id
self._part_shift = part_shift
@@ -125,10 +143,6 @@ class RingData(object):
self.version = version
self.md5 = self.size = self.raw_size = None
- for dev in self.devs:
- if dev is not None:
- dev.setdefault("region", 1)
-
@property
def replica_count(self):
"""Number of replicas (full or partial) used in the ring."""
@@ -194,7 +208,10 @@ class RingData(object):
gz_file.seek(0)
ring_data = pickle.load(gz_file)
- if not hasattr(ring_data, 'devs'):
+ if hasattr(ring_data, 'devs'):
+ # pickled RingData; make sure we've got region/replication info
+ normalize_devices(ring_data.devs)
+ else:
ring_data = RingData(ring_data['replica2part2dev_id'],
ring_data['devs'], ring_data['part_shift'],
ring_data.get('next_part_power'),
@@ -306,20 +323,6 @@ class Ring(object):
self._mtime = getmtime(self.serialized_path)
self._devs = ring_data.devs
- # NOTE(akscram): Replication parameters like replication_ip
- # and replication_port are required for
- # replication process. An old replication
- # ring doesn't contain this parameters into
- # device. Old-style pickled rings won't have
- # region information.
- for dev in self._devs:
- if dev:
- dev.setdefault('region', 1)
- if 'ip' in dev:
- dev.setdefault('replication_ip', dev['ip'])
- if 'port' in dev:
- dev.setdefault('replication_port', dev['port'])
-
self._replica2part2dev_id = ring_data._replica2part2dev_id
self._part_shift = ring_data._part_shift
self._rebuild_tier_data()
diff --git a/swift/common/utils/__init__.py b/swift/common/utils/__init__.py
index 3b4db177e..ef6b0180e 100644
--- a/swift/common/utils/__init__.py
+++ b/swift/common/utils/__init__.py
@@ -26,7 +26,6 @@ import fcntl
import grp
import hashlib
import json
-import math
import operator
import os
import pwd
@@ -37,12 +36,9 @@ import sys
import time
import uuid
import functools
-import platform
import email.parser
from random import random, shuffle
from contextlib import contextmanager, closing
-import ctypes
-import ctypes.util
from optparse import OptionParser
import traceback
import warnings
@@ -97,90 +93,36 @@ from swift.common.linkat import linkat
# For backwards compatability with 3rd party middlewares
from swift.common.registry import register_swift_info, get_swift_info # noqa
+from swift.common.utils.libc import ( # noqa
+ F_SETPIPE_SZ,
+ load_libc_function,
+ config_fallocate_value,
+ disable_fallocate,
+ fallocate,
+ punch_hole,
+ drop_buffer_cache,
+ get_md5_socket,
+ modify_priority,
+)
+from swift.common.utils.timestamp import ( # noqa
+ NORMAL_FORMAT,
+ INTERNAL_FORMAT,
+ SHORT_FORMAT,
+ MAX_OFFSET,
+ PRECISION,
+ Timestamp,
+ encode_timestamps,
+ decode_timestamps,
+ normalize_timestamp,
+ EPOCH,
+ last_modified_date_to_timestamp,
+ normalize_delete_at_timestamp,
+)
-# logging doesn't import patched as cleanly as one would like
from logging.handlers import SysLogHandler
import logging
-logging.thread = eventlet.green.thread
-logging.threading = eventlet.green.threading
-logging._lock = logging.threading.RLock()
-# setup notice level logging
-NOTICE = 25
-logging.addLevelName(NOTICE, 'NOTICE')
-SysLogHandler.priority_map['NOTICE'] = 'notice'
-
-# These are lazily pulled from libc elsewhere
-_sys_fallocate = None
-_posix_fadvise = None
-_libc_socket = None
-_libc_bind = None
-_libc_accept = None
-# see man -s 2 setpriority
-_libc_setpriority = None
-# see man -s 2 syscall
-_posix_syscall = None
-
-# If set to non-zero, fallocate routines will fail based on free space
-# available being at or below this amount, in bytes.
-FALLOCATE_RESERVE = 0
-# Indicates if FALLOCATE_RESERVE is the percentage of free space (True) or
-# the number of bytes (False).
-FALLOCATE_IS_PERCENT = False
-
-# from /usr/include/linux/falloc.h
-FALLOC_FL_KEEP_SIZE = 1
-FALLOC_FL_PUNCH_HOLE = 2
-
-# from /usr/src/linux-headers-*/include/uapi/linux/resource.h
-PRIO_PROCESS = 0
-
-
-# /usr/include/x86_64-linux-gnu/asm/unistd_64.h defines syscalls there
-# are many like it, but this one is mine, see man -s 2 ioprio_set
-def NR_ioprio_set():
- """Give __NR_ioprio_set value for your system."""
- architecture = os.uname()[4]
- arch_bits = platform.architecture()[0]
- # check if supported system, now support x86_64 and AArch64
- if architecture == 'x86_64' and arch_bits == '64bit':
- return 251
- elif architecture == 'aarch64' and arch_bits == '64bit':
- return 30
- raise OSError("Swift doesn't support ionice priority for %s %s" %
- (architecture, arch_bits))
-
-
-# this syscall integer probably only works on x86_64 linux systems, you
-# can check if it's correct on yours with something like this:
-"""
-#include <stdio.h>
-#include <sys/syscall.h>
-
-int main(int argc, const char* argv[]) {
- printf("%d\n", __NR_ioprio_set);
- return 0;
-}
-"""
-
-# this is the value for "which" that says our who value will be a pid
-# pulled out of /usr/src/linux-headers-*/include/linux/ioprio.h
-IOPRIO_WHO_PROCESS = 1
-
-
-IO_CLASS_ENUM = {
- 'IOPRIO_CLASS_RT': 1,
- 'IOPRIO_CLASS_BE': 2,
- 'IOPRIO_CLASS_IDLE': 3,
-}
-
-# the IOPRIO_PRIO_VALUE "macro" is also pulled from
-# /usr/src/linux-headers-*/include/linux/ioprio.h
-IOPRIO_CLASS_SHIFT = 13
-
-
-def IOPRIO_PRIO_VALUE(class_, data):
- return (((class_) << IOPRIO_CLASS_SHIFT) | data)
+NOTICE = 25
# Used by hash_path to offer a bit more security when generating hashes for
# paths. It simply appends this value to all paths; guessing the hash a path
@@ -190,12 +132,6 @@ HASH_PATH_PREFIX = b''
SWIFT_CONF_FILE = '/etc/swift/swift.conf'
-# These constants are Linux-specific, and Python doesn't seem to know
-# about them. We ask anyway just in case that ever gets fixed.
-#
-# The values were copied from the Linux 3.x kernel headers.
-AF_ALG = getattr(socket, 'AF_ALG', 38)
-F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
O_TMPFILE = getattr(os, 'O_TMPFILE', 0o20000000 | os.O_DIRECTORY)
# Used by the parse_socket_string() function to validate IPv6 addresses
@@ -500,6 +436,17 @@ def config_read_prefixed_options(conf, prefix_name, defaults):
return params
+def logging_monkey_patch():
+ # explicitly patch the logging lock
+ logging._lock = logging.threading.RLock()
+ # setup notice level logging
+ logging.addLevelName(NOTICE, 'NOTICE')
+ SysLogHandler.priority_map['NOTICE'] = 'notice'
+ # Trying to log threads while monkey-patched can lead to deadlocks; see
+ # https://bugs.launchpad.net/swift/+bug/1895739
+ logging.logThreads = 0
+
+
def eventlet_monkey_patch():
"""
Install the appropriate Eventlet monkey patches.
@@ -510,13 +457,14 @@ def eventlet_monkey_patch():
# if thread is monkey-patched.
eventlet.patcher.monkey_patch(all=False, socket=True, select=True,
thread=True)
- # Trying to log threads while monkey-patched can lead to deadlocks; see
- # https://bugs.launchpad.net/swift/+bug/1895739
- logging.logThreads = 0
-def noop_libc_function(*args):
- return 0
+def monkey_patch():
+ """
+ Apply all swift monkey patching consistently in one place.
+ """
+ eventlet_monkey_patch()
+ logging_monkey_patch()
def validate_configuration():
@@ -526,39 +474,6 @@ def validate_configuration():
sys.exit("Error: %s" % e)
-def load_libc_function(func_name, log_error=True,
- fail_if_missing=False, errcheck=False):
- """
- Attempt to find the function in libc, otherwise return a no-op func.
-
- :param func_name: name of the function to pull from libc.
- :param log_error: log an error when a function can't be found
- :param fail_if_missing: raise an exception when a function can't be found.
- Default behavior is to return a no-op function.
- :param errcheck: boolean, if true install a wrapper on the function
- to check for a return values of -1 and call
- ctype.get_errno and raise an OSError
- """
- try:
- libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
- func = getattr(libc, func_name)
- except AttributeError:
- if fail_if_missing:
- raise
- if log_error:
- logging.warning(_("Unable to locate %s in libc. Leaving as a "
- "no-op."), func_name)
- return noop_libc_function
- if errcheck:
- def _errcheck(result, f, args):
- if result == -1:
- errcode = ctypes.get_errno()
- raise OSError(errcode, os.strerror(errcode))
- return result
- func.errcheck = _errcheck
- return func
-
-
def generate_trans_id(trans_id_suffix):
return 'tx%s-%010x%s' % (
uuid.uuid4().hex[:21], int(time.time()), quote(trans_id_suffix))
@@ -755,25 +670,6 @@ def get_trans_id_time(trans_id):
return None
-def config_fallocate_value(reserve_value):
- """
- Returns fallocate reserve_value as an int or float.
- Returns is_percent as a boolean.
- Returns a ValueError on invalid fallocate value.
- """
- try:
- if str(reserve_value[-1:]) == '%':
- reserve_value = float(reserve_value[:-1])
- is_percent = True
- else:
- reserve_value = int(reserve_value)
- is_percent = False
- except ValueError:
- raise ValueError('Error: %s is an invalid value for fallocate'
- '_reserve.' % reserve_value)
- return reserve_value, is_percent
-
-
class FileLikeIter(object):
def __init__(self, iterable):
@@ -924,164 +820,6 @@ def fs_has_free_space(fs_path, space_needed, is_percent):
return free_bytes >= space_needed
-class _LibcWrapper(object):
- """
- A callable object that forwards its calls to a C function from libc.
-
- These objects are lazy. libc will not be checked until someone tries to
- either call the function or check its availability.
-
- _LibcWrapper objects have an "available" property; if true, then libc
- has the function of that name. If false, then calls will fail with a
- NotImplementedError.
- """
-
- def __init__(self, func_name):
- self._func_name = func_name
- self._func_handle = None
- self._loaded = False
-
- def _ensure_loaded(self):
- if not self._loaded:
- func_name = self._func_name
- try:
- # Keep everything in this try-block in local variables so
- # that a typo in self.some_attribute_name doesn't raise a
- # spurious AttributeError.
- func_handle = load_libc_function(
- func_name, fail_if_missing=True)
- self._func_handle = func_handle
- except AttributeError:
- # We pass fail_if_missing=True to load_libc_function and
- # then ignore the error. It's weird, but otherwise we have
- # to check if self._func_handle is noop_libc_function, and
- # that's even weirder.
- pass
- self._loaded = True
-
- @property
- def available(self):
- self._ensure_loaded()
- return bool(self._func_handle)
-
- def __call__(self, *args):
- if self.available:
- return self._func_handle(*args)
- else:
- raise NotImplementedError(
- "No function %r found in libc" % self._func_name)
-
-
-_fallocate_enabled = True
-_fallocate_warned_about_missing = False
-_sys_fallocate = _LibcWrapper('fallocate')
-_sys_posix_fallocate = _LibcWrapper('posix_fallocate')
-
-
-def disable_fallocate():
- global _fallocate_enabled
- _fallocate_enabled = False
-
-
-def fallocate(fd, size, offset=0):
- """
- Pre-allocate disk space for a file.
-
- This function can be disabled by calling disable_fallocate(). If no
- suitable C function is available in libc, this function is a no-op.
-
- :param fd: file descriptor
- :param size: size to allocate (in bytes)
- """
- global _fallocate_enabled
- if not _fallocate_enabled:
- return
-
- if size < 0:
- size = 0 # Done historically; not really sure why
- if size >= (1 << 63):
- raise ValueError('size must be less than 2 ** 63')
- if offset < 0:
- raise ValueError('offset must be non-negative')
- if offset >= (1 << 63):
- raise ValueError('offset must be less than 2 ** 63')
-
- # Make sure there's some (configurable) amount of free space in
- # addition to the number of bytes we're allocating.
- if FALLOCATE_RESERVE:
- st = os.fstatvfs(fd)
- free = st.f_frsize * st.f_bavail - size
- if FALLOCATE_IS_PERCENT:
- free = (float(free) / float(st.f_frsize * st.f_blocks)) * 100
- if float(free) <= float(FALLOCATE_RESERVE):
- raise OSError(
- errno.ENOSPC,
- 'FALLOCATE_RESERVE fail %g <= %g' %
- (free, FALLOCATE_RESERVE))
-
- if _sys_fallocate.available:
- # Parameters are (fd, mode, offset, length).
- #
- # mode=FALLOC_FL_KEEP_SIZE pre-allocates invisibly (without
- # affecting the reported file size).
- ret = _sys_fallocate(
- fd, FALLOC_FL_KEEP_SIZE, ctypes.c_uint64(offset),
- ctypes.c_uint64(size))
- err = ctypes.get_errno()
- elif _sys_posix_fallocate.available:
- # Parameters are (fd, offset, length).
- ret = _sys_posix_fallocate(fd, ctypes.c_uint64(offset),
- ctypes.c_uint64(size))
- err = ctypes.get_errno()
- else:
- # No suitable fallocate-like function is in our libc. Warn about it,
- # but just once per process, and then do nothing.
- global _fallocate_warned_about_missing
- if not _fallocate_warned_about_missing:
- logging.warning(_("Unable to locate fallocate, posix_fallocate in "
- "libc. Leaving as a no-op."))
- _fallocate_warned_about_missing = True
- return
-
- if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP,
- errno.EINVAL):
- raise OSError(err, 'Unable to fallocate(%s)' % size)
-
-
-def punch_hole(fd, offset, length):
- """
- De-allocate disk space in the middle of a file.
-
- :param fd: file descriptor
- :param offset: index of first byte to de-allocate
- :param length: number of bytes to de-allocate
- """
- if offset < 0:
- raise ValueError('offset must be non-negative')
- if offset >= (1 << 63):
- raise ValueError('offset must be less than 2 ** 63')
- if length <= 0:
- raise ValueError('length must be positive')
- if length >= (1 << 63):
- raise ValueError('length must be less than 2 ** 63')
-
- if _sys_fallocate.available:
- # Parameters are (fd, mode, offset, length).
- ret = _sys_fallocate(
- fd,
- FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
- ctypes.c_uint64(offset),
- ctypes.c_uint64(length))
- err = ctypes.get_errno()
- if ret and err:
- mode_str = "FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE"
- raise OSError(err, "Unable to fallocate(%d, %s, %d, %d)" % (
- fd, mode_str, offset, length))
- else:
- raise OSError(errno.ENOTSUP,
- 'No suitable C function found for hole punching')
-
-
def fsync(fd):
"""
Sync modified file data and metadata to disk.
@@ -1131,402 +869,6 @@ def fsync_dir(dirpath):
os.close(dirfd)
-def drop_buffer_cache(fd, offset, length):
- """
- Drop 'buffer' cache for the given range of the given file.
-
- :param fd: file descriptor
- :param offset: start offset
- :param length: length
- """
- global _posix_fadvise
- if _posix_fadvise is None:
- _posix_fadvise = load_libc_function('posix_fadvise64')
- # 4 means "POSIX_FADV_DONTNEED"
- ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
- ctypes.c_uint64(length), 4)
- if ret != 0:
- logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
- "-> %(ret)s", {'fd': fd, 'offset': offset,
- 'length': length, 'ret': ret})
-
-
-NORMAL_FORMAT = "%016.05f"
-INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
-SHORT_FORMAT = NORMAL_FORMAT + '_%x'
-MAX_OFFSET = (16 ** 16) - 1
-PRECISION = 1e-5
-# Setting this to True will cause the internal format to always display
-# extended digits - even when the value is equivalent to the normalized form.
-# This isn't ideal during an upgrade when some servers might not understand
-# the new time format - but flipping it to True works great for testing.
-FORCE_INTERNAL = False # or True
-
-
-@functools.total_ordering
-class Timestamp(object):
- """
- Internal Representation of Swift Time.
-
- The normalized form of the X-Timestamp header looks like a float
- with a fixed width to ensure stable string sorting - normalized
- timestamps look like "1402464677.04188"
-
- To support overwrites of existing data without modifying the original
- timestamp but still maintain consistency a second internal offset vector
- is append to the normalized timestamp form which compares and sorts
- greater than the fixed width float format but less than a newer timestamp.
- The internalized format of timestamps looks like
- "1402464677.04188_0000000000000000" - the portion after the underscore is
- the offset and is a formatted hexadecimal integer.
-
- The internalized form is not exposed to clients in responses from
- Swift. Normal client operations will not create a timestamp with an
- offset.
-
- The Timestamp class in common.utils supports internalized and
- normalized formatting of timestamps and also comparison of timestamp
- values. When the offset value of a Timestamp is 0 - it's considered
- insignificant and need not be represented in the string format; to
- support backwards compatibility during a Swift upgrade the
- internalized and normalized form of a Timestamp with an
- insignificant offset are identical. When a timestamp includes an
- offset it will always be represented in the internalized form, but
- is still excluded from the normalized form. Timestamps with an
- equivalent timestamp portion (the float part) will compare and order
- by their offset. Timestamps with a greater timestamp portion will
- always compare and order greater than a Timestamp with a lesser
- timestamp regardless of it's offset. String comparison and ordering
- is guaranteed for the internalized string format, and is backwards
- compatible for normalized timestamps which do not include an offset.
- """
-
- def __init__(self, timestamp, offset=0, delta=0, check_bounds=True):
- """
- Create a new Timestamp.
-
- :param timestamp: time in seconds since the Epoch, may be any of:
-
- * a float or integer
- * normalized/internalized string
- * another instance of this class (offset is preserved)
-
- :param offset: the second internal offset vector, an int
- :param delta: deca-microsecond difference from the base timestamp
- param, an int
- """
- if isinstance(timestamp, bytes):
- timestamp = timestamp.decode('ascii')
- if isinstance(timestamp, six.string_types):
- base, base_offset = timestamp.partition('_')[::2]
- self.timestamp = float(base)
- if '_' in base_offset:
- raise ValueError('invalid literal for int() with base 16: '
- '%r' % base_offset)
- if base_offset:
- self.offset = int(base_offset, 16)
- else:
- self.offset = 0
- else:
- self.timestamp = float(timestamp)
- self.offset = getattr(timestamp, 'offset', 0)
- # increment offset
- if offset >= 0:
- self.offset += offset
- else:
- raise ValueError('offset must be non-negative')
- if self.offset > MAX_OFFSET:
- raise ValueError('offset must be smaller than %d' % MAX_OFFSET)
- self.raw = int(round(self.timestamp / PRECISION))
- # add delta
- if delta:
- self.raw = self.raw + delta
- if self.raw <= 0:
- raise ValueError(
- 'delta must be greater than %d' % (-1 * self.raw))
- self.timestamp = float(self.raw * PRECISION)
- if check_bounds:
- if self.timestamp < 0:
- raise ValueError('timestamp cannot be negative')
- if self.timestamp >= 10000000000:
- raise ValueError('timestamp too large')
-
- @classmethod
- def now(cls, offset=0, delta=0):
- return cls(time.time(), offset=offset, delta=delta)
-
- def __repr__(self):
- return INTERNAL_FORMAT % (self.timestamp, self.offset)
-
- def __str__(self):
- raise TypeError('You must specify which string format is required')
-
- def __float__(self):
- return self.timestamp
-
- def __int__(self):
- return int(self.timestamp)
-
- def __nonzero__(self):
- return bool(self.timestamp or self.offset)
-
- def __bool__(self):
- return self.__nonzero__()
-
- @property
- def normal(self):
- return NORMAL_FORMAT % self.timestamp
-
- @property
- def internal(self):
- if self.offset or FORCE_INTERNAL:
- return INTERNAL_FORMAT % (self.timestamp, self.offset)
- else:
- return self.normal
-
- @property
- def short(self):
- if self.offset or FORCE_INTERNAL:
- return SHORT_FORMAT % (self.timestamp, self.offset)
- else:
- return self.normal
-
- @property
- def isoformat(self):
- """
- Get an isoformat string representation of the 'normal' part of the
- Timestamp with microsecond precision and no trailing timezone, for
- example::
-
- 1970-01-01T00:00:00.000000
-
- :return: an isoformat string
- """
- t = float(self.normal)
- if six.PY3:
- # On Python 3, round manually using ROUND_HALF_EVEN rounding
- # method, to use the same rounding method than Python 2. Python 3
- # used a different rounding method, but Python 3.4.4 and 3.5.1 use
- # again ROUND_HALF_EVEN as Python 2.
- # See https://bugs.python.org/issue23517
- frac, t = math.modf(t)
- us = round(frac * 1e6)
- if us >= 1000000:
- t += 1
- us -= 1000000
- elif us < 0:
- t -= 1
- us += 1000000
- dt = datetime.datetime.utcfromtimestamp(t)
- dt = dt.replace(microsecond=us)
- else:
- dt = datetime.datetime.utcfromtimestamp(t)
-
- isoformat = dt.isoformat()
- # python isoformat() doesn't include msecs when zero
- if len(isoformat) < len("1970-01-01T00:00:00.000000"):
- isoformat += ".000000"
- return isoformat
-
- @classmethod
- def from_isoformat(cls, date_string):
- """
- Parse an isoformat string representation of time to a Timestamp object.
-
- :param date_string: a string formatted as per an Timestamp.isoformat
- property.
- :return: an instance of this class.
- """
- start = datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%f")
- delta = start - EPOCH
- # This calculation is based on Python 2.7's Modules/datetimemodule.c,
- # function delta_to_microseconds(), but written in Python.
- return cls(delta.total_seconds())
-
- def ceil(self):
- """
- Return the 'normal' part of the timestamp rounded up to the nearest
- integer number of seconds.
-
- This value should be used whenever the second-precision Last-Modified
- time of a resource is required.
-
- :return: a float value with second precision.
- """
- return math.ceil(float(self))
-
- def __eq__(self, other):
- if other is None:
- return False
- if not isinstance(other, Timestamp):
- try:
- other = Timestamp(other, check_bounds=False)
- except ValueError:
- return False
- return self.internal == other.internal
-
- def __ne__(self, other):
- return not (self == other)
-
- def __lt__(self, other):
- if other is None:
- return False
- if not isinstance(other, Timestamp):
- other = Timestamp(other, check_bounds=False)
- if other.timestamp < 0:
- return False
- if other.timestamp >= 10000000000:
- return True
- return self.internal < other.internal
-
- def __hash__(self):
- return hash(self.internal)
-
- def __invert__(self):
- if self.offset:
- raise ValueError('Cannot invert timestamps with offsets')
- return Timestamp((999999999999999 - self.raw) * PRECISION)
-
-
-def encode_timestamps(t1, t2=None, t3=None, explicit=False):
- """
- Encode up to three timestamps into a string. Unlike a Timestamp object, the
- encoded string does NOT used fixed width fields and consequently no
- relative chronology of the timestamps can be inferred from lexicographic
- sorting of encoded timestamp strings.
-
- The format of the encoded string is:
- <t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
-
- i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
- otherwise the time offsets for t2 and t3 are appended. If explicit is True
- then the offsets for t2 and t3 are always appended even if zero.
-
- Note: any offset value in t1 will be preserved, but offsets on t2 and t3
- are not preserved. In the anticipated use cases for this method (and the
- inverse decode_timestamps method) the timestamps passed as t2 and t3 are
- not expected to have offsets as they will be timestamps associated with a
- POST request. In the case where the encoding is used in a container objects
- table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
- content type and metadata times (if different from the data file) i.e.
- correspond to POST timestamps. In the case where the encoded form is used
- in a .meta file name, t1 and t2 both correspond to POST timestamps.
- """
- form = '{0}'
- values = [t1.short]
- if t2 is not None:
- t2_t1_delta = t2.raw - t1.raw
- explicit = explicit or (t2_t1_delta != 0)
- values.append(t2_t1_delta)
- if t3 is not None:
- t3_t2_delta = t3.raw - t2.raw
- explicit = explicit or (t3_t2_delta != 0)
- values.append(t3_t2_delta)
- if explicit:
- form += '{1:+x}'
- if t3 is not None:
- form += '{2:+x}'
- return form.format(*values)
-
-
-def decode_timestamps(encoded, explicit=False):
- """
- Parses a string of the form generated by encode_timestamps and returns
- a tuple of the three component timestamps. If explicit is False, component
- timestamps that are not explicitly encoded will be assumed to have zero
- delta from the previous component and therefore take the value of the
- previous component. If explicit is True, component timestamps that are
- not explicitly encoded will be returned with value None.
- """
- # TODO: some tests, e.g. in test_replicator, put float timestamps values
- # into container db's, hence this defensive check, but in real world
- # this may never happen.
- if not isinstance(encoded, six.string_types):
- ts = Timestamp(encoded)
- return ts, ts, ts
-
- parts = []
- signs = []
- pos_parts = encoded.split('+')
- for part in pos_parts:
- # parse time components and their signs
- # e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1]
- neg_parts = part.split('-')
- parts = parts + neg_parts
- signs = signs + [1] + [-1] * (len(neg_parts) - 1)
- t1 = Timestamp(parts[0])
- t2 = t3 = None
- if len(parts) > 1:
- t2 = t1
- delta = signs[1] * int(parts[1], 16)
- # if delta = 0 we want t2 = t3 = t1 in order to
- # preserve any offset in t1 - only construct a distinct
- # timestamp if there is a non-zero delta.
- if delta:
- t2 = Timestamp((t1.raw + delta) * PRECISION)
- elif not explicit:
- t2 = t1
- if len(parts) > 2:
- t3 = t2
- delta = signs[2] * int(parts[2], 16)
- if delta:
- t3 = Timestamp((t2.raw + delta) * PRECISION)
- elif not explicit:
- t3 = t2
- return t1, t2, t3
-
-
-def normalize_timestamp(timestamp):
- """
- Format a timestamp (string or numeric) into a standardized
- xxxxxxxxxx.xxxxx (10.5) format.
-
- Note that timestamps using values greater than or equal to November 20th,
- 2286 at 17:46 UTC will use 11 digits to represent the number of
- seconds.
-
- :param timestamp: unix timestamp
- :returns: normalized timestamp as a string
- """
- return Timestamp(timestamp).normal
-
-
-EPOCH = datetime.datetime(1970, 1, 1)
-
-
-def last_modified_date_to_timestamp(last_modified_date_str):
- """
- Convert a last modified date (like you'd get from a container listing,
- e.g. 2014-02-28T23:22:36.698390) to a float.
- """
- return Timestamp.from_isoformat(last_modified_date_str)
-
-
-def normalize_delete_at_timestamp(timestamp, high_precision=False):
- """
- Format a timestamp (string or numeric) into a standardized
- xxxxxxxxxx (10) or xxxxxxxxxx.xxxxx (10.5) format.
-
- Note that timestamps less than 0000000000 are raised to
- 0000000000 and values greater than November 20th, 2286 at
- 17:46:39 UTC will be capped at that date and time, resulting in
- no return value exceeding 9999999999.99999 (or 9999999999 if
- using low-precision).
-
- This cap is because the expirer is already working through a
- sorted list of strings that were all a length of 10. Adding
- another digit would mess up the sort and cause the expirer to
- break from processing early. By 2286, this problem will need to
- be fixed, probably by creating an additional .expiring_objects
- account to work from with 11 (or more) digit container names.
-
- :param timestamp: unix timestamp
- :returns: normalized timestamp as a string
- """
- fmt = '%016.5f' if high_precision else '%010d'
- return fmt % min(max(0, float(timestamp)), 9999999999.99999)
-
-
def mkdirs(path):
"""
Ensures the path is a directory or makes it if not. Errors if the path
@@ -2073,6 +1415,11 @@ class SwiftLoggerAdapter(logging.LoggerAdapter):
process() method to accomplish anything useful.
"""
+ @property
+ def name(self):
+ # py3 does this for us already; add it for py2
+ return self.logger.name
+
def get_metric_name(self, metric):
# subclasses may override this method to annotate the metric name
return metric
@@ -2274,8 +1621,10 @@ class LogAdapter(logging.LoggerAdapter, object):
emsg = '%s: %s' % (exc.__class__.__name__, exc.line)
elif isinstance(exc, eventlet.Timeout):
emsg = exc.__class__.__name__
- if hasattr(exc, 'seconds'):
- emsg += ' (%ss)' % exc.seconds
+ detail = '%ss' % exc.seconds
+ if hasattr(exc, 'created_at'):
+ detail += ' after %0.2fs' % (time.time() - exc.created_at)
+ emsg += ' (%s)' % detail
if isinstance(exc, swift.common.exceptions.MessageTimeout):
if exc.msg:
emsg += ' %s' % exc.msg
@@ -3205,6 +2554,7 @@ def readconf(conf_path, section_name=None, log_name=None, defaults=None,
# values like "1%" (which we want to support for
# fallocate_reserve).
c = ConfigParser(defaults, interpolation=NicerInterpolation())
+ c.optionxform = str # Don't lower-case keys
if hasattr(conf_path, 'readline'):
if hasattr(conf_path, 'seek'):
@@ -5107,87 +4457,6 @@ def parse_content_disposition(header):
return header, attributes
-class sockaddr_alg(ctypes.Structure):
- _fields_ = [("salg_family", ctypes.c_ushort),
- ("salg_type", ctypes.c_ubyte * 14),
- ("salg_feat", ctypes.c_uint),
- ("salg_mask", ctypes.c_uint),
- ("salg_name", ctypes.c_ubyte * 64)]
-
-
-_bound_md5_sockfd = None
-
-
-def get_md5_socket():
- """
- Get an MD5 socket file descriptor. One can MD5 data with it by writing it
- to the socket with os.write, then os.read the 16 bytes of the checksum out
- later.
-
- NOTE: It is the caller's responsibility to ensure that os.close() is
- called on the returned file descriptor. This is a bare file descriptor,
- not a Python object. It doesn't close itself.
- """
-
- # Linux's AF_ALG sockets work like this:
- #
- # First, initialize a socket with socket() and bind(). This tells the
- # socket what algorithm to use, as well as setting up any necessary bits
- # like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
- # algorithm name.
- #
- # Second, to hash some data, get a second socket by calling accept() on
- # the first socket. Write data to the socket, then when finished, read the
- # checksum from the socket and close it. This lets you checksum multiple
- # things without repeating all the setup code each time.
- #
- # Since we only need to bind() one socket, we do that here and save it for
- # future re-use. That way, we only use one file descriptor to get an MD5
- # socket instead of two, and we also get to save some syscalls.
-
- global _bound_md5_sockfd
- global _libc_socket
- global _libc_bind
- global _libc_accept
-
- if _libc_accept is None:
- _libc_accept = load_libc_function('accept', fail_if_missing=True)
- if _libc_socket is None:
- _libc_socket = load_libc_function('socket', fail_if_missing=True)
- if _libc_bind is None:
- _libc_bind = load_libc_function('bind', fail_if_missing=True)
-
- # Do this at first call rather than at import time so that we don't use a
- # file descriptor on systems that aren't using any MD5 sockets.
- if _bound_md5_sockfd is None:
- sockaddr_setup = sockaddr_alg(
- AF_ALG,
- (ord('h'), ord('a'), ord('s'), ord('h'), 0),
- 0, 0,
- (ord('m'), ord('d'), ord('5'), 0))
- hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
- ctypes.c_int(socket.SOCK_SEQPACKET),
- ctypes.c_int(0))
- if hash_sockfd < 0:
- raise IOError(ctypes.get_errno(),
- "Failed to initialize MD5 socket")
-
- bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
- ctypes.pointer(sockaddr_setup),
- ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
- if bind_result < 0:
- os.close(hash_sockfd)
- raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
-
- _bound_md5_sockfd = hash_sockfd
-
- md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
- if md5_sockfd < 0:
- raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
-
- return md5_sockfd
-
-
try:
_test_md5 = hashlib.md5(usedforsecurity=False) # nosec
@@ -5443,6 +4712,12 @@ class NamespaceBoundList(object):
"""
self.bounds = [] if bounds is None else bounds
+ def __eq__(self, other):
+ # test for equality of NamespaceBoundList objects only
+ if not isinstance(other, NamespaceBoundList):
+ return False
+ return self.bounds == other.bounds
+
@classmethod
def parse(cls, namespaces):
"""
@@ -5498,7 +4773,12 @@ class NamespaceBoundList(object):
def get_namespace(self, item):
"""
- Get a Namespace instance that contains ``item``.
+ Get a Namespace instance that contains ``item`` by bisecting on the
+ lower bounds directly. This function is used for performance sensitive
+ path, for example, '_get_update_shard' in proxy object controller. For
+ normal paths, convert NamespaceBoundList to a list of Namespaces, and
+ use `~swift.common.utils.find_namespace` or
+ `~swift.common.utils.filter_namespaces`.
:param item: The item for a which a Namespace is to be found.
:return: the Namespace that contains ``item``.
@@ -5509,6 +4789,24 @@ class NamespaceBoundList(object):
else self.bounds[pos + 1][0])
return Namespace(name, lower, upper)
+ def get_namespaces(self):
+ """
+ Get the contained namespaces as a list of contiguous Namespaces ordered
+ by lower bound.
+
+ :return: A list of Namespace objects which are ordered by
+ ``lower bound``.
+ """
+ if not self.bounds:
+ return []
+ namespaces = []
+ num_ns = len(self.bounds)
+ for i in range(num_ns):
+ lower, name = self.bounds[i]
+ upper = ('' if i + 1 == num_ns else self.bounds[i + 1][0])
+ namespaces.append(Namespace(name, lower, upper))
+ return namespaces
+
class ShardName(object):
"""
@@ -5693,11 +4991,11 @@ class ShardRange(Namespace):
'_deleted', '_state', '_count', '_bytes',
'_tombstones', '_reported')
- def __init__(self, name, timestamp,
+ def __init__(self, name, timestamp=0,
lower=Namespace.MIN, upper=Namespace.MAX,
object_count=0, bytes_used=0, meta_timestamp=None,
deleted=False, state=None, state_timestamp=None, epoch=None,
- reported=False, tombstones=-1):
+ reported=False, tombstones=-1, **kwargs):
super(ShardRange, self).__init__(name=name, lower=lower, upper=upper)
self.account = self.container = self._timestamp = \
self._meta_timestamp = self._state_timestamp = self._epoch = None
@@ -5720,7 +5018,8 @@ class ShardRange(Namespace):
def sort_key(cls, sr):
# defines the sort order for shard ranges
# note if this ever changes to *not* sort by upper first then it breaks
- # a key assumption for bisect, which is used by utils.find_shard_range
+ # a key assumption for bisect, which is used by utils.find_namespace
+ # with shard ranges.
return sr.upper, sr.state, sr.lower, sr.name
def is_child_of(self, parent):
@@ -6276,7 +5575,7 @@ class ShardRangeList(UserList):
containing the filtered shard ranges.
"""
return ShardRangeList(
- filter_shard_ranges(self, includes, marker, end_marker))
+ filter_namespaces(self, includes, marker, end_marker))
def find_lower(self, condition):
"""
@@ -6297,44 +5596,45 @@ class ShardRangeList(UserList):
return self.upper
-def find_shard_range(item, ranges):
+def find_namespace(item, namespaces):
"""
- Find a ShardRange in given list of ``shard_ranges`` whose namespace
+ Find a Namespace/ShardRange in given list of ``namespaces`` whose namespace
contains ``item``.
- :param item: The item for a which a ShardRange is to be found.
- :param ranges: a sorted list of ShardRanges.
- :return: the ShardRange whose namespace contains ``item``, or None if
- no suitable range is found.
+ :param item: The item for a which a Namespace is to be found.
+ :param ranges: a sorted list of Namespaces.
+ :return: the Namespace/ShardRange whose namespace contains ``item``, or
+ None if no suitable Namespace is found.
"""
- index = bisect.bisect_left(ranges, item)
- if index != len(ranges) and item in ranges[index]:
- return ranges[index]
+ index = bisect.bisect_left(namespaces, item)
+ if index != len(namespaces) and item in namespaces[index]:
+ return namespaces[index]
return None
-def filter_shard_ranges(shard_ranges, includes, marker, end_marker):
+def filter_namespaces(namespaces, includes, marker, end_marker):
"""
- Filter the given shard ranges to those whose namespace includes the
- ``includes`` name or any part of the namespace between ``marker`` and
+ Filter the given Namespaces/ShardRanges to those whose namespace includes
+ the ``includes`` name or any part of the namespace between ``marker`` and
``end_marker``. If none of ``includes``, ``marker`` or ``end_marker`` are
- specified then all shard ranges will be returned.
+ specified then all Namespaces will be returned.
- :param shard_ranges: A list of :class:`~swift.common.utils.ShardRange`.
- :param includes: a string; if not empty then only the shard range, if any,
- whose namespace includes this string will be returned, and ``marker``
- and ``end_marker`` will be ignored.
+ :param namespaces: A list of :class:`~swift.common.utils.Namespace` or
+ :class:`~swift.common.utils.ShardRange`.
+ :param includes: a string; if not empty then only the Namespace,
+ if any, whose namespace includes this string will be returned,
+ ``marker`` and ``end_marker`` will be ignored.
:param marker: if specified then only shard ranges whose upper bound is
greater than this value will be returned.
:param end_marker: if specified then only shard ranges whose lower bound is
less than this value will be returned.
- :return: A filtered list of :class:`~swift.common.utils.ShardRange`.
+ :return: A filtered list of :class:`~swift.common.utils.Namespace`.
"""
if includes:
- shard_range = find_shard_range(includes, shard_ranges)
- return [shard_range] if shard_range else []
+ namespace = find_namespace(includes, namespaces)
+ return [namespace] if namespace else []
- def shard_range_filter(sr):
+ def namespace_filter(sr):
end = start = True
if end_marker:
end = end_marker > sr.lower
@@ -6343,79 +5643,13 @@ def filter_shard_ranges(shard_ranges, includes, marker, end_marker):
return start and end
if marker or end_marker:
- return list(filter(shard_range_filter, shard_ranges))
+ return list(filter(namespace_filter, namespaces))
if marker == Namespace.MAX or end_marker == Namespace.MIN:
- # MIN and MAX are both Falsy so not handled by shard_range_filter
+ # MIN and MAX are both Falsy so not handled by namespace_filter
return []
- return shard_ranges
-
-
-def modify_priority(conf, logger):
- """
- Modify priority by nice and ionice.
- """
-
- global _libc_setpriority
- if _libc_setpriority is None:
- _libc_setpriority = load_libc_function('setpriority',
- errcheck=True)
-
- def _setpriority(nice_priority):
- """
- setpriority for this pid
-
- :param nice_priority: valid values are -19 to 20
- """
- try:
- _libc_setpriority(PRIO_PROCESS, os.getpid(),
- int(nice_priority))
- except (ValueError, OSError):
- print(_("WARNING: Unable to modify scheduling priority of process."
- " Keeping unchanged! Check logs for more info. "))
- logger.exception('Unable to modify nice priority')
- else:
- logger.debug('set nice priority to %s' % nice_priority)
-
- nice_priority = conf.get('nice_priority')
- if nice_priority is not None:
- _setpriority(nice_priority)
-
- global _posix_syscall
- if _posix_syscall is None:
- _posix_syscall = load_libc_function('syscall', errcheck=True)
-
- def _ioprio_set(io_class, io_priority):
- """
- ioprio_set for this process
-
- :param io_class: the I/O class component, can be
- IOPRIO_CLASS_RT, IOPRIO_CLASS_BE,
- or IOPRIO_CLASS_IDLE
- :param io_priority: priority value in the I/O class
- """
- try:
- io_class = IO_CLASS_ENUM[io_class]
- io_priority = int(io_priority)
- _posix_syscall(NR_ioprio_set(),
- IOPRIO_WHO_PROCESS,
- os.getpid(),
- IOPRIO_PRIO_VALUE(io_class, io_priority))
- except (KeyError, ValueError, OSError):
- print(_("WARNING: Unable to modify I/O scheduling class "
- "and priority of process. Keeping unchanged! "
- "Check logs for more info."))
- logger.exception("Unable to modify ionice priority")
- else:
- logger.debug('set ionice class %s priority %s',
- io_class, io_priority)
-
- io_class = conf.get("ionice_class")
- if io_class is None:
- return
- io_priority = conf.get("ionice_priority", 0)
- _ioprio_set(io_class, io_priority)
+ return namespaces
def o_tmpfile_in_path_supported(dirpath):
@@ -6995,14 +6229,15 @@ class Watchdog(object):
:param timeout: duration before the timeout expires
:param exc: exception to throw when the timeout expire, must inherit
- from eventlet.timeouts.Timeout
+ from eventlet.Timeout
:param timeout_at: allow to force the expiration timestamp
:return: id of the scheduled timeout, needed to cancel it
"""
+ now = time.time()
if not timeout_at:
- timeout_at = time.time() + timeout
+ timeout_at = now + timeout
gth = eventlet.greenthread.getcurrent()
- timeout_definition = (timeout, timeout_at, gth, exc)
+ timeout_definition = (timeout, timeout_at, gth, exc, now)
key = id(timeout_definition)
self._timeouts[key] = timeout_definition
@@ -7025,8 +6260,7 @@ class Watchdog(object):
:param key: timeout id, as returned by start()
"""
try:
- if key in self._timeouts:
- del(self._timeouts[key])
+ del(self._timeouts[key])
except KeyError:
pass
@@ -7046,15 +6280,14 @@ class Watchdog(object):
self._next_expiration = None
if self._evt.ready():
self._evt.reset()
- for k, (timeout, timeout_at, gth, exc) in list(self._timeouts.items()):
+ for k, (timeout, timeout_at, gth, exc,
+ created_at) in list(self._timeouts.items()):
if timeout_at <= now:
- try:
- if k in self._timeouts:
- del(self._timeouts[k])
- except KeyError:
- pass
+ self.stop(k)
e = exc()
+ # set this after __init__ to keep it off the eventlet scheduler
e.seconds = timeout
+ e.created_at = created_at
eventlet.hubs.get_hub().schedule_call_global(0, gth.throw, e)
else:
if (self._next_expiration is None
diff --git a/swift/common/utils/libc.py b/swift/common/utils/libc.py
new file mode 100644
index 000000000..df2179020
--- /dev/null
+++ b/swift/common/utils/libc.py
@@ -0,0 +1,487 @@
+# Copyright (c) 2010-2023 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Functions Swift uses to interact with libc and other low-level APIs."""
+
+import ctypes
+import ctypes.util
+import errno
+import fcntl
+import logging
+import os
+import platform
+import socket
+
+
+# These are lazily pulled from libc elsewhere
+_sys_fallocate = None
+_posix_fadvise = None
+_libc_socket = None
+_libc_bind = None
+_libc_accept = None
+# see man -s 2 setpriority
+_libc_setpriority = None
+# see man -s 2 syscall
+_posix_syscall = None
+
+# If set to non-zero, fallocate routines will fail based on free space
+# available being at or below this amount, in bytes.
+FALLOCATE_RESERVE = 0
+# Indicates if FALLOCATE_RESERVE is the percentage of free space (True) or
+# the number of bytes (False).
+FALLOCATE_IS_PERCENT = False
+
+# from /usr/include/linux/falloc.h
+FALLOC_FL_KEEP_SIZE = 1
+FALLOC_FL_PUNCH_HOLE = 2
+
+# from /usr/src/linux-headers-*/include/uapi/linux/resource.h
+PRIO_PROCESS = 0
+
+
+# /usr/include/x86_64-linux-gnu/asm/unistd_64.h defines syscalls there
+# are many like it, but this one is mine, see man -s 2 ioprio_set
+def NR_ioprio_set():
+ """Give __NR_ioprio_set value for your system."""
+ architecture = os.uname()[4]
+ arch_bits = platform.architecture()[0]
+ # check if supported system, now support x86_64 and AArch64
+ if architecture == 'x86_64' and arch_bits == '64bit':
+ return 251
+ elif architecture == 'aarch64' and arch_bits == '64bit':
+ return 30
+ raise OSError("Swift doesn't support ionice priority for %s %s" %
+ (architecture, arch_bits))
+
+
+# this syscall integer probably only works on x86_64 linux systems, you
+# can check if it's correct on yours with something like this:
+"""
+#include <stdio.h>
+#include <sys/syscall.h>
+
+int main(int argc, const char* argv[]) {
+ printf("%d\n", __NR_ioprio_set);
+ return 0;
+}
+"""
+
+# this is the value for "which" that says our who value will be a pid
+# pulled out of /usr/src/linux-headers-*/include/linux/ioprio.h
+IOPRIO_WHO_PROCESS = 1
+
+
+IO_CLASS_ENUM = {
+ 'IOPRIO_CLASS_RT': 1,
+ 'IOPRIO_CLASS_BE': 2,
+ 'IOPRIO_CLASS_IDLE': 3,
+}
+
+# the IOPRIO_PRIO_VALUE "macro" is also pulled from
+# /usr/src/linux-headers-*/include/linux/ioprio.h
+IOPRIO_CLASS_SHIFT = 13
+
+
+def IOPRIO_PRIO_VALUE(class_, data):
+ return (((class_) << IOPRIO_CLASS_SHIFT) | data)
+
+
+# These constants are Linux-specific, and Python doesn't seem to know
+# about them. We ask anyway just in case that ever gets fixed.
+#
+# The values were copied from the Linux 3.x kernel headers.
+AF_ALG = getattr(socket, 'AF_ALG', 38)
+F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
+
+
+def noop_libc_function(*args):
+ return 0
+
+
+def load_libc_function(func_name, log_error=True,
+ fail_if_missing=False, errcheck=False):
+ """
+ Attempt to find the function in libc, otherwise return a no-op func.
+
+ :param func_name: name of the function to pull from libc.
+ :param log_error: log an error when a function can't be found
+ :param fail_if_missing: raise an exception when a function can't be found.
+ Default behavior is to return a no-op function.
+ :param errcheck: boolean, if true install a wrapper on the function
+ to check for a return values of -1 and call
+ ctype.get_errno and raise an OSError
+ """
+ try:
+ libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
+ func = getattr(libc, func_name)
+ except AttributeError:
+ if fail_if_missing:
+ raise
+ if log_error:
+ logging.warning("Unable to locate %s in libc. Leaving as a "
+ "no-op.", func_name)
+ return noop_libc_function
+ if errcheck:
+ def _errcheck(result, f, args):
+ if result == -1:
+ errcode = ctypes.get_errno()
+ raise OSError(errcode, os.strerror(errcode))
+ return result
+ func.errcheck = _errcheck
+ return func
+
+
+class _LibcWrapper(object):
+ """
+ A callable object that forwards its calls to a C function from libc.
+
+ These objects are lazy. libc will not be checked until someone tries to
+ either call the function or check its availability.
+
+ _LibcWrapper objects have an "available" property; if true, then libc
+ has the function of that name. If false, then calls will fail with a
+ NotImplementedError.
+ """
+
+ def __init__(self, func_name):
+ self._func_name = func_name
+ self._func_handle = None
+ self._loaded = False
+
+ def _ensure_loaded(self):
+ if not self._loaded:
+ func_name = self._func_name
+ try:
+ # Keep everything in this try-block in local variables so
+ # that a typo in self.some_attribute_name doesn't raise a
+ # spurious AttributeError.
+ func_handle = load_libc_function(
+ func_name, fail_if_missing=True)
+ self._func_handle = func_handle
+ except AttributeError:
+ # We pass fail_if_missing=True to load_libc_function and
+ # then ignore the error. It's weird, but otherwise we have
+ # to check if self._func_handle is noop_libc_function, and
+ # that's even weirder.
+ pass
+ self._loaded = True
+
+ @property
+ def available(self):
+ self._ensure_loaded()
+ return bool(self._func_handle)
+
+ def __call__(self, *args):
+ if self.available:
+ return self._func_handle(*args)
+ else:
+ raise NotImplementedError(
+ "No function %r found in libc" % self._func_name)
+
+
+def config_fallocate_value(reserve_value):
+ """
+ Returns fallocate reserve_value as an int or float.
+ Returns is_percent as a boolean.
+ Returns a ValueError on invalid fallocate value.
+ """
+ try:
+ if str(reserve_value[-1:]) == '%':
+ reserve_value = float(reserve_value[:-1])
+ is_percent = True
+ else:
+ reserve_value = int(reserve_value)
+ is_percent = False
+ except ValueError:
+ raise ValueError('Error: %s is an invalid value for fallocate'
+ '_reserve.' % reserve_value)
+ return reserve_value, is_percent
+
+
+_fallocate_enabled = True
+_fallocate_warned_about_missing = False
+_sys_fallocate = _LibcWrapper('fallocate')
+_sys_posix_fallocate = _LibcWrapper('posix_fallocate')
+
+
+def disable_fallocate():
+ global _fallocate_enabled
+ _fallocate_enabled = False
+
+
+def fallocate(fd, size, offset=0):
+ """
+ Pre-allocate disk space for a file.
+
+ This function can be disabled by calling disable_fallocate(). If no
+ suitable C function is available in libc, this function is a no-op.
+
+ :param fd: file descriptor
+ :param size: size to allocate (in bytes)
+ """
+ global _fallocate_enabled
+ if not _fallocate_enabled:
+ return
+
+ if size < 0:
+ size = 0 # Done historically; not really sure why
+ if size >= (1 << 63):
+ raise ValueError('size must be less than 2 ** 63')
+ if offset < 0:
+ raise ValueError('offset must be non-negative')
+ if offset >= (1 << 63):
+ raise ValueError('offset must be less than 2 ** 63')
+
+ # Make sure there's some (configurable) amount of free space in
+ # addition to the number of bytes we're allocating.
+ if FALLOCATE_RESERVE:
+ st = os.fstatvfs(fd)
+ free = st.f_frsize * st.f_bavail - size
+ if FALLOCATE_IS_PERCENT:
+ free = (float(free) / float(st.f_frsize * st.f_blocks)) * 100
+ if float(free) <= float(FALLOCATE_RESERVE):
+ raise OSError(
+ errno.ENOSPC,
+ 'FALLOCATE_RESERVE fail %g <= %g' %
+ (free, FALLOCATE_RESERVE))
+
+ if _sys_fallocate.available:
+ # Parameters are (fd, mode, offset, length).
+ #
+ # mode=FALLOC_FL_KEEP_SIZE pre-allocates invisibly (without
+ # affecting the reported file size).
+ ret = _sys_fallocate(
+ fd, FALLOC_FL_KEEP_SIZE, ctypes.c_uint64(offset),
+ ctypes.c_uint64(size))
+ err = ctypes.get_errno()
+ elif _sys_posix_fallocate.available:
+ # Parameters are (fd, offset, length).
+ ret = _sys_posix_fallocate(fd, ctypes.c_uint64(offset),
+ ctypes.c_uint64(size))
+ err = ctypes.get_errno()
+ else:
+ # No suitable fallocate-like function is in our libc. Warn about it,
+ # but just once per process, and then do nothing.
+ global _fallocate_warned_about_missing
+ if not _fallocate_warned_about_missing:
+ logging.warning("Unable to locate fallocate, posix_fallocate in "
+ "libc. Leaving as a no-op.")
+ _fallocate_warned_about_missing = True
+ return
+
+ if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP,
+ errno.EINVAL):
+ raise OSError(err, 'Unable to fallocate(%s)' % size)
+
+
+def punch_hole(fd, offset, length):
+ """
+ De-allocate disk space in the middle of a file.
+
+ :param fd: file descriptor
+ :param offset: index of first byte to de-allocate
+ :param length: number of bytes to de-allocate
+ """
+ if offset < 0:
+ raise ValueError('offset must be non-negative')
+ if offset >= (1 << 63):
+ raise ValueError('offset must be less than 2 ** 63')
+ if length <= 0:
+ raise ValueError('length must be positive')
+ if length >= (1 << 63):
+ raise ValueError('length must be less than 2 ** 63')
+
+ if _sys_fallocate.available:
+ # Parameters are (fd, mode, offset, length).
+ ret = _sys_fallocate(
+ fd,
+ FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+ ctypes.c_uint64(offset),
+ ctypes.c_uint64(length))
+ err = ctypes.get_errno()
+ if ret and err:
+ mode_str = "FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE"
+ raise OSError(err, "Unable to fallocate(%d, %s, %d, %d)" % (
+ fd, mode_str, offset, length))
+ else:
+ raise OSError(errno.ENOTSUP,
+ 'No suitable C function found for hole punching')
+
+
+def drop_buffer_cache(fd, offset, length):
+ """
+ Drop 'buffer' cache for the given range of the given file.
+
+ :param fd: file descriptor
+ :param offset: start offset
+ :param length: length
+ """
+ global _posix_fadvise
+ if _posix_fadvise is None:
+ _posix_fadvise = load_libc_function('posix_fadvise64')
+ # 4 means "POSIX_FADV_DONTNEED"
+ ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
+ ctypes.c_uint64(length), 4)
+ if ret != 0:
+ logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
+ "-> %(ret)s", {'fd': fd, 'offset': offset,
+ 'length': length, 'ret': ret})
+
+
+class sockaddr_alg(ctypes.Structure):
+ _fields_ = [("salg_family", ctypes.c_ushort),
+ ("salg_type", ctypes.c_ubyte * 14),
+ ("salg_feat", ctypes.c_uint),
+ ("salg_mask", ctypes.c_uint),
+ ("salg_name", ctypes.c_ubyte * 64)]
+
+
+_bound_md5_sockfd = None
+
+
+def get_md5_socket():
+ """
+ Get an MD5 socket file descriptor. One can MD5 data with it by writing it
+ to the socket with os.write, then os.read the 16 bytes of the checksum out
+ later.
+
+ NOTE: It is the caller's responsibility to ensure that os.close() is
+ called on the returned file descriptor. This is a bare file descriptor,
+ not a Python object. It doesn't close itself.
+ """
+
+ # Linux's AF_ALG sockets work like this:
+ #
+ # First, initialize a socket with socket() and bind(). This tells the
+ # socket what algorithm to use, as well as setting up any necessary bits
+ # like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
+ # algorithm name.
+ #
+ # Second, to hash some data, get a second socket by calling accept() on
+ # the first socket. Write data to the socket, then when finished, read the
+ # checksum from the socket and close it. This lets you checksum multiple
+ # things without repeating all the setup code each time.
+ #
+ # Since we only need to bind() one socket, we do that here and save it for
+ # future re-use. That way, we only use one file descriptor to get an MD5
+ # socket instead of two, and we also get to save some syscalls.
+
+ global _bound_md5_sockfd
+ global _libc_socket
+ global _libc_bind
+ global _libc_accept
+
+ if _libc_accept is None:
+ _libc_accept = load_libc_function('accept', fail_if_missing=True)
+ if _libc_socket is None:
+ _libc_socket = load_libc_function('socket', fail_if_missing=True)
+ if _libc_bind is None:
+ _libc_bind = load_libc_function('bind', fail_if_missing=True)
+
+ # Do this at first call rather than at import time so that we don't use a
+ # file descriptor on systems that aren't using any MD5 sockets.
+ if _bound_md5_sockfd is None:
+ sockaddr_setup = sockaddr_alg(
+ AF_ALG,
+ (ord('h'), ord('a'), ord('s'), ord('h'), 0),
+ 0, 0,
+ (ord('m'), ord('d'), ord('5'), 0))
+ hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
+ ctypes.c_int(socket.SOCK_SEQPACKET),
+ ctypes.c_int(0))
+ if hash_sockfd < 0:
+ raise IOError(ctypes.get_errno(),
+ "Failed to initialize MD5 socket")
+
+ bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
+ ctypes.pointer(sockaddr_setup),
+ ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
+ if bind_result < 0:
+ os.close(hash_sockfd)
+ raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
+
+ _bound_md5_sockfd = hash_sockfd
+
+ md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
+ if md5_sockfd < 0:
+ raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
+
+ return md5_sockfd
+
+
+def modify_priority(conf, logger):
+ """
+ Modify priority by nice and ionice.
+ """
+
+ global _libc_setpriority
+ if _libc_setpriority is None:
+ _libc_setpriority = load_libc_function('setpriority',
+ errcheck=True)
+
+ def _setpriority(nice_priority):
+ """
+ setpriority for this pid
+
+ :param nice_priority: valid values are -19 to 20
+ """
+ try:
+ _libc_setpriority(PRIO_PROCESS, os.getpid(),
+ int(nice_priority))
+ except (ValueError, OSError):
+ print("WARNING: Unable to modify scheduling priority of process."
+ " Keeping unchanged! Check logs for more info. ")
+ logger.exception('Unable to modify nice priority')
+ else:
+ logger.debug('set nice priority to %s' % nice_priority)
+
+ nice_priority = conf.get('nice_priority')
+ if nice_priority is not None:
+ _setpriority(nice_priority)
+
+ global _posix_syscall
+ if _posix_syscall is None:
+ _posix_syscall = load_libc_function('syscall', errcheck=True)
+
+ def _ioprio_set(io_class, io_priority):
+ """
+ ioprio_set for this process
+
+ :param io_class: the I/O class component, can be
+ IOPRIO_CLASS_RT, IOPRIO_CLASS_BE,
+ or IOPRIO_CLASS_IDLE
+ :param io_priority: priority value in the I/O class
+ """
+ try:
+ io_class = IO_CLASS_ENUM[io_class]
+ io_priority = int(io_priority)
+ _posix_syscall(NR_ioprio_set(),
+ IOPRIO_WHO_PROCESS,
+ os.getpid(),
+ IOPRIO_PRIO_VALUE(io_class, io_priority))
+ except (KeyError, ValueError, OSError):
+ print("WARNING: Unable to modify I/O scheduling class "
+ "and priority of process. Keeping unchanged! "
+ "Check logs for more info.")
+ logger.exception("Unable to modify ionice priority")
+ else:
+ logger.debug('set ionice class %s priority %s',
+ io_class, io_priority)
+
+ io_class = conf.get("ionice_class")
+ if io_class is None:
+ return
+ io_priority = conf.get("ionice_priority", 0)
+ _ioprio_set(io_class, io_priority)
diff --git a/swift/common/utils/timestamp.py b/swift/common/utils/timestamp.py
new file mode 100644
index 000000000..be83fe512
--- /dev/null
+++ b/swift/common/utils/timestamp.py
@@ -0,0 +1,399 @@
+# Copyright (c) 2010-2023 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Timestamp-related functions for use with Swift."""
+
+import datetime
+import functools
+import math
+import time
+
+import six
+
+
+NORMAL_FORMAT = "%016.05f"
+INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
+SHORT_FORMAT = NORMAL_FORMAT + '_%x'
+MAX_OFFSET = (16 ** 16) - 1
+PRECISION = 1e-5
+# Setting this to True will cause the internal format to always display
+# extended digits - even when the value is equivalent to the normalized form.
+# This isn't ideal during an upgrade when some servers might not understand
+# the new time format - but flipping it to True works great for testing.
+FORCE_INTERNAL = False # or True
+
+
+@functools.total_ordering
+class Timestamp(object):
+ """
+ Internal Representation of Swift Time.
+
+ The normalized form of the X-Timestamp header looks like a float
+ with a fixed width to ensure stable string sorting - normalized
+ timestamps look like "1402464677.04188"
+
+ To support overwrites of existing data without modifying the original
+ timestamp but still maintain consistency a second internal offset vector
+ is append to the normalized timestamp form which compares and sorts
+ greater than the fixed width float format but less than a newer timestamp.
+ The internalized format of timestamps looks like
+ "1402464677.04188_0000000000000000" - the portion after the underscore is
+ the offset and is a formatted hexadecimal integer.
+
+ The internalized form is not exposed to clients in responses from
+ Swift. Normal client operations will not create a timestamp with an
+ offset.
+
+ The Timestamp class in common.utils supports internalized and
+ normalized formatting of timestamps and also comparison of timestamp
+ values. When the offset value of a Timestamp is 0 - it's considered
+ insignificant and need not be represented in the string format; to
+ support backwards compatibility during a Swift upgrade the
+ internalized and normalized form of a Timestamp with an
+ insignificant offset are identical. When a timestamp includes an
+ offset it will always be represented in the internalized form, but
+ is still excluded from the normalized form. Timestamps with an
+ equivalent timestamp portion (the float part) will compare and order
+ by their offset. Timestamps with a greater timestamp portion will
+ always compare and order greater than a Timestamp with a lesser
+ timestamp regardless of it's offset. String comparison and ordering
+ is guaranteed for the internalized string format, and is backwards
+ compatible for normalized timestamps which do not include an offset.
+ """
+
+ def __init__(self, timestamp, offset=0, delta=0, check_bounds=True):
+ """
+ Create a new Timestamp.
+
+ :param timestamp: time in seconds since the Epoch, may be any of:
+
+ * a float or integer
+ * normalized/internalized string
+ * another instance of this class (offset is preserved)
+
+ :param offset: the second internal offset vector, an int
+ :param delta: deca-microsecond difference from the base timestamp
+ param, an int
+ """
+ if isinstance(timestamp, bytes):
+ timestamp = timestamp.decode('ascii')
+ if isinstance(timestamp, six.string_types):
+ base, base_offset = timestamp.partition('_')[::2]
+ self.timestamp = float(base)
+ if '_' in base_offset:
+ raise ValueError('invalid literal for int() with base 16: '
+ '%r' % base_offset)
+ if base_offset:
+ self.offset = int(base_offset, 16)
+ else:
+ self.offset = 0
+ else:
+ self.timestamp = float(timestamp)
+ self.offset = getattr(timestamp, 'offset', 0)
+ # increment offset
+ if offset >= 0:
+ self.offset += offset
+ else:
+ raise ValueError('offset must be non-negative')
+ if self.offset > MAX_OFFSET:
+ raise ValueError('offset must be smaller than %d' % MAX_OFFSET)
+ self.raw = int(round(self.timestamp / PRECISION))
+ # add delta
+ if delta:
+ self.raw = self.raw + delta
+ if self.raw <= 0:
+ raise ValueError(
+ 'delta must be greater than %d' % (-1 * self.raw))
+ self.timestamp = float(self.raw * PRECISION)
+ if check_bounds:
+ if self.timestamp < 0:
+ raise ValueError('timestamp cannot be negative')
+ if self.timestamp >= 10000000000:
+ raise ValueError('timestamp too large')
+
+ @classmethod
+ def now(cls, offset=0, delta=0):
+ return cls(time.time(), offset=offset, delta=delta)
+
+ def __repr__(self):
+ return INTERNAL_FORMAT % (self.timestamp, self.offset)
+
+ def __str__(self):
+ raise TypeError('You must specify which string format is required')
+
+ def __float__(self):
+ return self.timestamp
+
+ def __int__(self):
+ return int(self.timestamp)
+
+ def __nonzero__(self):
+ return bool(self.timestamp or self.offset)
+
+ def __bool__(self):
+ return self.__nonzero__()
+
+ @property
+ def normal(self):
+ return NORMAL_FORMAT % self.timestamp
+
+ @property
+ def internal(self):
+ if self.offset or FORCE_INTERNAL:
+ return INTERNAL_FORMAT % (self.timestamp, self.offset)
+ else:
+ return self.normal
+
+ @property
+ def short(self):
+ if self.offset or FORCE_INTERNAL:
+ return SHORT_FORMAT % (self.timestamp, self.offset)
+ else:
+ return self.normal
+
+ @property
+ def isoformat(self):
+ """
+ Get an isoformat string representation of the 'normal' part of the
+ Timestamp with microsecond precision and no trailing timezone, for
+ example::
+
+ 1970-01-01T00:00:00.000000
+
+ :return: an isoformat string
+ """
+ t = float(self.normal)
+ if six.PY3:
+ # On Python 3, round manually using ROUND_HALF_EVEN rounding
+ # method, to use the same rounding method than Python 2. Python 3
+ # used a different rounding method, but Python 3.4.4 and 3.5.1 use
+ # again ROUND_HALF_EVEN as Python 2.
+ # See https://bugs.python.org/issue23517
+ frac, t = math.modf(t)
+ us = round(frac * 1e6)
+ if us >= 1000000:
+ t += 1
+ us -= 1000000
+ elif us < 0:
+ t -= 1
+ us += 1000000
+ dt = datetime.datetime.utcfromtimestamp(t)
+ dt = dt.replace(microsecond=us)
+ else:
+ dt = datetime.datetime.utcfromtimestamp(t)
+
+ isoformat = dt.isoformat()
+ # python isoformat() doesn't include msecs when zero
+ if len(isoformat) < len("1970-01-01T00:00:00.000000"):
+ isoformat += ".000000"
+ return isoformat
+
+ @classmethod
+ def from_isoformat(cls, date_string):
+ """
+ Parse an isoformat string representation of time to a Timestamp object.
+
+ :param date_string: a string formatted as per an Timestamp.isoformat
+ property.
+ :return: an instance of this class.
+ """
+ start = datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%f")
+ delta = start - EPOCH
+ # This calculation is based on Python 2.7's Modules/datetimemodule.c,
+ # function delta_to_microseconds(), but written in Python.
+ return cls(delta.total_seconds())
+
+ def ceil(self):
+ """
+ Return the 'normal' part of the timestamp rounded up to the nearest
+ integer number of seconds.
+
+ This value should be used whenever the second-precision Last-Modified
+ time of a resource is required.
+
+ :return: a float value with second precision.
+ """
+ return math.ceil(float(self))
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+ if not isinstance(other, Timestamp):
+ try:
+ other = Timestamp(other, check_bounds=False)
+ except ValueError:
+ return False
+ return self.internal == other.internal
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __lt__(self, other):
+ if other is None:
+ return False
+ if not isinstance(other, Timestamp):
+ other = Timestamp(other, check_bounds=False)
+ if other.timestamp < 0:
+ return False
+ if other.timestamp >= 10000000000:
+ return True
+ return self.internal < other.internal
+
+ def __hash__(self):
+ return hash(self.internal)
+
+ def __invert__(self):
+ if self.offset:
+ raise ValueError('Cannot invert timestamps with offsets')
+ return Timestamp((999999999999999 - self.raw) * PRECISION)
+
+
+def encode_timestamps(t1, t2=None, t3=None, explicit=False):
+ """
+ Encode up to three timestamps into a string. Unlike a Timestamp object, the
+ encoded string does NOT used fixed width fields and consequently no
+ relative chronology of the timestamps can be inferred from lexicographic
+ sorting of encoded timestamp strings.
+
+ The format of the encoded string is:
+ <t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
+
+ i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
+ otherwise the time offsets for t2 and t3 are appended. If explicit is True
+ then the offsets for t2 and t3 are always appended even if zero.
+
+ Note: any offset value in t1 will be preserved, but offsets on t2 and t3
+ are not preserved. In the anticipated use cases for this method (and the
+ inverse decode_timestamps method) the timestamps passed as t2 and t3 are
+ not expected to have offsets as they will be timestamps associated with a
+ POST request. In the case where the encoding is used in a container objects
+ table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
+ content type and metadata times (if different from the data file) i.e.
+ correspond to POST timestamps. In the case where the encoded form is used
+ in a .meta file name, t1 and t2 both correspond to POST timestamps.
+ """
+ form = '{0}'
+ values = [t1.short]
+ if t2 is not None:
+ t2_t1_delta = t2.raw - t1.raw
+ explicit = explicit or (t2_t1_delta != 0)
+ values.append(t2_t1_delta)
+ if t3 is not None:
+ t3_t2_delta = t3.raw - t2.raw
+ explicit = explicit or (t3_t2_delta != 0)
+ values.append(t3_t2_delta)
+ if explicit:
+ form += '{1:+x}'
+ if t3 is not None:
+ form += '{2:+x}'
+ return form.format(*values)
+
+
+def decode_timestamps(encoded, explicit=False):
+ """
+ Parses a string of the form generated by encode_timestamps and returns
+ a tuple of the three component timestamps. If explicit is False, component
+ timestamps that are not explicitly encoded will be assumed to have zero
+ delta from the previous component and therefore take the value of the
+ previous component. If explicit is True, component timestamps that are
+ not explicitly encoded will be returned with value None.
+ """
+ # TODO: some tests, e.g. in test_replicator, put float timestamps values
+ # into container db's, hence this defensive check, but in real world
+ # this may never happen.
+ if not isinstance(encoded, six.string_types):
+ ts = Timestamp(encoded)
+ return ts, ts, ts
+
+ parts = []
+ signs = []
+ pos_parts = encoded.split('+')
+ for part in pos_parts:
+ # parse time components and their signs
+ # e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1]
+ neg_parts = part.split('-')
+ parts = parts + neg_parts
+ signs = signs + [1] + [-1] * (len(neg_parts) - 1)
+ t1 = Timestamp(parts[0])
+ t2 = t3 = None
+ if len(parts) > 1:
+ t2 = t1
+ delta = signs[1] * int(parts[1], 16)
+ # if delta = 0 we want t2 = t3 = t1 in order to
+ # preserve any offset in t1 - only construct a distinct
+ # timestamp if there is a non-zero delta.
+ if delta:
+ t2 = Timestamp((t1.raw + delta) * PRECISION)
+ elif not explicit:
+ t2 = t1
+ if len(parts) > 2:
+ t3 = t2
+ delta = signs[2] * int(parts[2], 16)
+ if delta:
+ t3 = Timestamp((t2.raw + delta) * PRECISION)
+ elif not explicit:
+ t3 = t2
+ return t1, t2, t3
+
+
+def normalize_timestamp(timestamp):
+ """
+ Format a timestamp (string or numeric) into a standardized
+ xxxxxxxxxx.xxxxx (10.5) format.
+
+ Note that timestamps using values greater than or equal to November 20th,
+ 2286 at 17:46 UTC will use 11 digits to represent the number of
+ seconds.
+
+ :param timestamp: unix timestamp
+ :returns: normalized timestamp as a string
+ """
+ return Timestamp(timestamp).normal
+
+
+EPOCH = datetime.datetime(1970, 1, 1)
+
+
+def last_modified_date_to_timestamp(last_modified_date_str):
+ """
+ Convert a last modified date (like you'd get from a container listing,
+ e.g. 2014-02-28T23:22:36.698390) to a float.
+ """
+ return Timestamp.from_isoformat(last_modified_date_str)
+
+
+def normalize_delete_at_timestamp(timestamp, high_precision=False):
+ """
+ Format a timestamp (string or numeric) into a standardized
+ xxxxxxxxxx (10) or xxxxxxxxxx.xxxxx (10.5) format.
+
+ Note that timestamps less than 0000000000 are raised to
+ 0000000000 and values greater than November 20th, 2286 at
+ 17:46:39 UTC will be capped at that date and time, resulting in
+ no return value exceeding 9999999999.99999 (or 9999999999 if
+ using low-precision).
+
+ This cap is because the expirer is already working through a
+ sorted list of strings that were all a length of 10. Adding
+ another digit would mess up the sort and cause the expirer to
+ break from processing early. By 2286, this problem will need to
+ be fixed, probably by creating an additional .expiring_objects
+ account to work from with 11 (or more) digit container names.
+
+ :param timestamp: unix timestamp
+ :returns: normalized timestamp as a string
+ """
+ fmt = '%016.5f' if high_precision else '%010d'
+ return fmt % min(max(0, float(timestamp)), 9999999999.99999)
diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py
index 4fa4946dd..910d0051c 100644
--- a/swift/common/wsgi.py
+++ b/swift/common/wsgi.py
@@ -361,10 +361,14 @@ def loadapp(conf_file, global_conf=None, allow_modify_pipeline=True):
if func and allow_modify_pipeline:
func(PipelineWrapper(ctx))
filters = [c.create() for c in reversed(ctx.filter_contexts)]
+ pipeline = [ultimate_app]
+ ultimate_app._pipeline = pipeline
+ ultimate_app._pipeline_final_app = ultimate_app
app = ultimate_app
- app._pipeline_final_app = ultimate_app
for filter_app in filters:
- app = filter_app(app)
+ app = filter_app(pipeline[0])
+ pipeline.insert(0, app)
+ app._pipeline = pipeline
app._pipeline_final_app = ultimate_app
return app
return ctx.create()
@@ -430,6 +434,9 @@ def run_server(conf, logger, sock, global_conf=None, ready_callback=None,
# header; "Etag" just won't do).
'capitalize_response_headers': False,
}
+ if conf.get('keepalive_timeout'):
+ server_kwargs['keepalive'] = float(conf['keepalive_timeout']) or False
+
if ready_callback:
ready_callback()
# Yes, eventlet, we know -- we have to support bad clients, though
@@ -834,7 +841,7 @@ def run_wsgi(conf_path, app_section, *args, **kwargs):
return 1
# patch event before loadapp
- utils.eventlet_monkey_patch()
+ utils.monkey_patch()
# Ensure the configuration and application can be loaded before proceeding.
global_conf = {'log_name': log_name}
diff --git a/swift/container/backend.py b/swift/container/backend.py
index c1842d9bd..e6648038f 100644
--- a/swift/container/backend.py
+++ b/swift/container/backend.py
@@ -32,7 +32,7 @@ from swift.common.utils import Timestamp, encode_timestamps, \
decode_timestamps, extract_swift_bytes, storage_directory, hash_path, \
ShardRange, renamer, MD5_OF_EMPTY_STRING, mkdirs, get_db_files, \
parse_db_filename, make_db_file_path, split_path, RESERVED_BYTE, \
- filter_shard_ranges, ShardRangeList
+ filter_namespaces, ShardRangeList
from swift.common.db import DatabaseBroker, utf8encode, BROKER_TIMEOUT, \
zero_like, DatabaseAlreadyExists, SQLITE_ARG_LIMIT
@@ -1866,8 +1866,8 @@ class ContainerBroker(DatabaseBroker):
if includes:
return shard_ranges[:1] if shard_ranges else []
- shard_ranges = filter_shard_ranges(shard_ranges, includes,
- marker, end_marker)
+ shard_ranges = filter_namespaces(
+ shard_ranges, includes, marker, end_marker)
if fill_gaps:
own_shard_range = self.get_own_shard_range()
diff --git a/swift/container/sharder.py b/swift/container/sharder.py
index 0cba5cf9f..adff1df97 100644
--- a/swift/container/sharder.py
+++ b/swift/container/sharder.py
@@ -896,7 +896,6 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
internal_client_conf_path,
'Swift Container Sharder',
request_tries,
- allow_modify_pipeline=False,
use_replication_network=True,
global_conf={'log_name': '%s-ic' % conf.get(
'log_name', self.log_route)})
@@ -2332,9 +2331,13 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
return
# now look and deal with misplaced objects.
+ move_start_ts = time.time()
self._move_misplaced_objects(broker)
+ self.logger.timing_since(
+ 'sharder.sharding.move_misplaced', move_start_ts)
is_leader = node['index'] == 0 and self.auto_shard and not is_deleted
+
if state in (UNSHARDED, COLLAPSED):
if is_leader and broker.is_root_container():
# bootstrap sharding of root container
@@ -2349,11 +2352,14 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# container has been given shard ranges rather than
# found them e.g. via replication or a shrink event,
# or manually triggered cleaving.
+ db_start_ts = time.time()
if broker.set_sharding_state():
state = SHARDING
self.info(broker, 'Kick off container cleaving, '
'own shard range in state %r',
own_shard_range.state_text)
+ self.logger.timing_since(
+ 'sharder.sharding.set_state', db_start_ts)
elif is_leader:
if broker.set_sharding_state():
state = SHARDING
@@ -2364,6 +2370,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
own_shard_range.state_text)
if state == SHARDING:
+ cleave_start_ts = time.time()
if is_leader:
num_found = self._find_shard_ranges(broker)
else:
@@ -2378,6 +2385,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# always try to cleave any pending shard ranges
cleave_complete = self._cleave(broker)
+ self.logger.timing_since(
+ 'sharder.sharding.cleave', cleave_start_ts)
if cleave_complete:
if self._complete_sharding(broker):
@@ -2385,6 +2394,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self._increment_stat('visited', 'completed', statsd=True)
self.info(broker, 'Completed cleaving, DB set to sharded '
'state')
+ self.logger.timing_since(
+ 'sharder.sharding.completed',
+ broker.get_own_shard_range().epoch)
else:
self.info(broker, 'Completed cleaving, DB remaining in '
'sharding state')
@@ -2392,6 +2404,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
if not broker.is_deleted():
if state == SHARDED and broker.is_root_container():
# look for shrink stats
+ send_start_ts = time.time()
self._identify_shrinking_candidate(broker, node)
if is_leader:
self._find_and_enable_shrinking_candidates(broker)
@@ -2401,6 +2414,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
self._send_shard_ranges(broker, shard_range.account,
shard_range.container,
[shard_range])
+ self.logger.timing_since(
+ 'sharder.sharding.send_sr', send_start_ts)
if not broker.is_root_container():
# Update the root container with this container's shard range
@@ -2409,7 +2424,10 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
# sharding a shard, this is when the root will see the new
# shards move to ACTIVE state and the sharded shard
# simultaneously become deleted.
+ update_start_ts = time.time()
self._update_root_container(broker)
+ self.logger.timing_since(
+ 'sharder.sharding.update_root', update_start_ts)
self.debug(broker,
'Finished processing, state %s%s',
diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py
index efd897907..8b6b7d01b 100644
--- a/swift/obj/diskfile.py
+++ b/swift/obj/diskfile.py
@@ -167,24 +167,36 @@ def _encode_metadata(metadata):
return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items()))
-def _decode_metadata(metadata):
+def _decode_metadata(metadata, metadata_written_by_py3):
"""
Given a metadata dict from disk, convert keys and values to native strings.
:param metadata: a dict
+ :param metadata_written_by_py3:
"""
if six.PY2:
- def to_str(item):
+ def to_str(item, is_name=False):
+ # For years, py2 and py3 handled non-ascii metadata differently;
+ # see https://bugs.launchpad.net/swift/+bug/2012531
+ if metadata_written_by_py3 and not is_name:
+ # do our best to read new-style data replicated from a py3 node
+ item = item.decode('utf8').encode('latin1')
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
else:
- def to_str(item):
+ def to_str(item, is_name=False):
+ # For years, py2 and py3 handled non-ascii metadata differently;
+ # see https://bugs.launchpad.net/swift/+bug/2012531
+ if not metadata_written_by_py3 and isinstance(item, bytes) \
+ and not is_name:
+ # do our best to read old py2 data
+ item = item.decode('latin1')
if isinstance(item, six.binary_type):
return item.decode('utf8', 'surrogateescape')
return item
- return dict(((to_str(k), to_str(v)) for k, v in metadata.items()))
+ return {to_str(k): to_str(v, k == b'name') for k, v in metadata.items()}
def read_metadata(fd, add_missing_checksum=False):
@@ -238,6 +250,7 @@ def read_metadata(fd, add_missing_checksum=False):
"stored checksum='%s', computed='%s'" % (
fd, metadata_checksum, computed_checksum))
+ metadata_written_by_py3 = (b'_codecs\nencode' in metadata[:32])
# strings are utf-8 encoded when written, but have not always been
# (see https://bugs.launchpad.net/swift/+bug/1678018) so encode them again
# when read
@@ -245,7 +258,7 @@ def read_metadata(fd, add_missing_checksum=False):
metadata = pickle.loads(metadata)
else:
metadata = pickle.loads(metadata, encoding='bytes')
- return _decode_metadata(metadata)
+ return _decode_metadata(metadata, metadata_written_by_py3)
def write_metadata(fd, metadata, xattr_size=65536):
diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py
index 345728a83..fb125fca2 100644
--- a/swift/obj/ssync_receiver.py
+++ b/swift/obj/ssync_receiver.py
@@ -45,8 +45,8 @@ def decode_missing(line):
parts = line.decode('ascii').split()
result['object_hash'] = urllib.parse.unquote(parts[0])
t_data = urllib.parse.unquote(parts[1])
- result['ts_data'] = Timestamp(t_data)
- result['ts_meta'] = result['ts_ctype'] = result['ts_data']
+ result['ts_data'] = ts_data = Timestamp(t_data)
+ result['ts_meta'] = result['ts_ctype'] = ts_data
result['durable'] = True # default to True in case this key isn't sent
if len(parts) > 2:
# allow for a comma separated list of k:v pairs to future-proof
@@ -54,9 +54,17 @@ def decode_missing(line):
for item in [subpart for subpart in subparts if ':' in subpart]:
k, v = item.split(':')
if k == 'm':
- result['ts_meta'] = Timestamp(t_data, delta=int(v, 16))
+ v, _, o = v.partition('__')
+ # ignore ts_data offset when calculating ts_meta
+ result['ts_meta'] = Timestamp(ts_data.normal,
+ delta=int(v, 16),
+ offset=int(o or '0', 16))
elif k == 't':
- result['ts_ctype'] = Timestamp(t_data, delta=int(v, 16))
+ v, _, o = v.partition('__')
+ # ignore ts_data offset when calculating ts_ctype
+ result['ts_ctype'] = Timestamp(Timestamp(ts_data).normal,
+ delta=int(v, 16),
+ offset=int(o or '0', 16))
elif k == 'durable':
result['durable'] = utils.config_true_value(v)
return result
diff --git a/swift/obj/ssync_sender.py b/swift/obj/ssync_sender.py
index 57f02e0e2..b132b8b3d 100644
--- a/swift/obj/ssync_sender.py
+++ b/swift/obj/ssync_sender.py
@@ -42,9 +42,13 @@ def encode_missing(object_hash, ts_data, ts_meta=None, ts_ctype=None,
if ts_meta and ts_meta != ts_data:
delta = ts_meta.raw - ts_data.raw
extra_parts.append('m:%x' % delta)
+ if ts_meta.offset:
+ extra_parts[-1] += '__%x' % ts_meta.offset
if ts_ctype and ts_ctype != ts_data:
delta = ts_ctype.raw - ts_data.raw
extra_parts.append('t:%x' % delta)
+ if ts_ctype.offset:
+ extra_parts[-1] += '__%x' % ts_ctype.offset
if 'durable' in kwargs and kwargs['durable'] is False:
# only send durable in the less common case that it is False
extra_parts.append('durable:%s' % kwargs['durable'])
diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py
index 758aed72b..93bb056d2 100644
--- a/swift/proxy/controllers/base.py
+++ b/swift/proxy/controllers/base.py
@@ -615,10 +615,7 @@ def get_cache_key(account, container=None, obj=None, shard=None):
raise ValueError('Shard cache key requires account and container')
if obj:
raise ValueError('Shard cache key cannot have obj')
- if shard == 'updating':
- cache_key = 'shard-%s-v2/%s/%s' % (shard, account, container)
- else:
- cache_key = 'shard-%s/%s/%s' % (shard, account, container)
+ cache_key = 'shard-%s-v2/%s/%s' % (shard, account, container)
elif obj:
if not (account and container):
raise ValueError('Object cache key requires account and container')
@@ -1848,16 +1845,22 @@ class Controller(object):
:param transfer: If True, transfer headers from original client request
:returns: a dictionary of headers
"""
- # Use the additional headers first so they don't overwrite the headers
- # we require.
- headers = HeaderKeyDict(additional) if additional else HeaderKeyDict()
- if transfer:
- self.transfer_headers(orig_req.headers, headers)
- headers.setdefault('x-timestamp', Timestamp.now().internal)
+ headers = HeaderKeyDict()
if orig_req:
+ headers.update((k.lower(), v)
+ for k, v in orig_req.headers.items()
+ if k.lower().startswith('x-backend-'))
referer = orig_req.as_referer()
else:
referer = ''
+ # additional headers can override x-backend-* headers from orig_req
+ if additional:
+ headers.update(additional)
+ if orig_req and transfer:
+ # transfer headers from orig_req can override additional headers
+ self.transfer_headers(orig_req.headers, headers)
+ headers.setdefault('x-timestamp', Timestamp.now().internal)
+ # orig_req and additional headers cannot override the following...
headers['x-trans-id'] = self.trans_id
headers['connection'] = 'close'
headers['user-agent'] = self.app.backend_user_agent
diff --git a/swift/proxy/controllers/container.py b/swift/proxy/controllers/container.py
index 4102d652a..fe8480ba3 100644
--- a/swift/proxy/controllers/container.py
+++ b/swift/proxy/controllers/container.py
@@ -21,7 +21,8 @@ from six.moves.urllib.parse import unquote
from swift.common.memcached import MemcacheConnectionError
from swift.common.utils import public, private, csv_append, Timestamp, \
- config_true_value, ShardRange, cache_from_env, filter_shard_ranges
+ config_true_value, ShardRange, cache_from_env, filter_namespaces, \
+ NamespaceBoundList
from swift.common.constraints import check_metadata, CONTAINER_LISTING_LIMIT
from swift.common.http import HTTP_ACCEPTED, is_success
from swift.common.request_helpers import get_sys_meta_prefix, get_param, \
@@ -109,25 +110,42 @@ class ContainerController(Controller):
req.swift_entity_path, concurrency)
return resp
- def _make_shard_ranges_response_body(self, req, shard_range_dicts):
- # filter shard ranges according to request constraints and return a
- # serialised list of shard ranges
+ def _make_namespaces_response_body(self, req, ns_bound_list):
+ """
+ Filter namespaces according to request constraints and return a
+ serialised list of namespaces.
+
+ :param req: the request object.
+ :param ns_bound_list: an instance of
+ :class:`~swift.common.utils.NamespaceBoundList`.
+ :return: a serialised list of namespaces.
+ """
marker = get_param(req, 'marker', '')
end_marker = get_param(req, 'end_marker')
includes = get_param(req, 'includes')
reverse = config_true_value(get_param(req, 'reverse'))
if reverse:
marker, end_marker = end_marker, marker
- shard_ranges = [
- ShardRange.from_dict(shard_range)
- for shard_range in shard_range_dicts]
- shard_ranges = filter_shard_ranges(shard_ranges, includes, marker,
- end_marker)
+ namespaces = ns_bound_list.get_namespaces()
+ namespaces = filter_namespaces(
+ namespaces, includes, marker, end_marker)
if reverse:
- shard_ranges.reverse()
- return json.dumps([dict(sr) for sr in shard_ranges]).encode('ascii')
+ namespaces.reverse()
+ return json.dumps([dict(ns) for ns in namespaces]).encode('ascii')
def _get_shard_ranges_from_cache(self, req, headers):
+ """
+ Try to fetch shard namespace data from cache and, if successful, return
+ a response. Also return the cache state.
+
+ The response body will be a list of dicts each of which describes
+ a Namespace (i.e. includes the keys ``lower``, ``upper`` and ``name``).
+
+ :param req: an instance of ``swob.Request``.
+ :param headers: Headers to be sent with request.
+ :return: a tuple comprising (an instance of ``swob.Response``or
+ ``None`` if no namespaces were found in cache, the cache state).
+ """
infocache = req.environ.setdefault('swift.infocache', {})
memcache = cache_from_env(req.environ, True)
cache_key = get_cache_key(self.account_name,
@@ -135,11 +153,10 @@ class ContainerController(Controller):
shard='listing')
resp_body = None
- cached_range_dicts = infocache.get(cache_key)
- if cached_range_dicts:
+ ns_bound_list = infocache.get(cache_key)
+ if ns_bound_list:
cache_state = 'infocache_hit'
- resp_body = self._make_shard_ranges_response_body(
- req, cached_range_dicts)
+ resp_body = self._make_namespaces_response_body(req, ns_bound_list)
elif memcache:
skip_chance = \
self.app.container_listing_shard_ranges_skip_cache
@@ -147,12 +164,20 @@ class ContainerController(Controller):
cache_state = 'skip'
else:
try:
- cached_range_dicts = memcache.get(
+ cached_namespaces = memcache.get(
cache_key, raise_on_error=True)
- if cached_range_dicts:
+ if cached_namespaces:
cache_state = 'hit'
- resp_body = self._make_shard_ranges_response_body(
- req, cached_range_dicts)
+ if six.PY2:
+ # json.loads() in memcache.get will convert json
+ # 'string' to 'unicode' with python2, here we cast
+ # 'unicode' back to 'str'
+ cached_namespaces = [
+ [lower.encode('utf-8'), name.encode('utf-8')]
+ for lower, name in cached_namespaces]
+ ns_bound_list = NamespaceBoundList(cached_namespaces)
+ resp_body = self._make_namespaces_response_body(
+ req, ns_bound_list)
else:
cache_state = 'miss'
except MemcacheConnectionError:
@@ -162,9 +187,9 @@ class ContainerController(Controller):
resp = None
else:
# shard ranges can be returned from cache
- infocache[cache_key] = tuple(cached_range_dicts)
+ infocache[cache_key] = ns_bound_list
self.logger.debug('Found %d shards in cache for %s',
- len(cached_range_dicts), req.path_qs)
+ len(ns_bound_list.bounds), req.path_qs)
headers.update({'x-backend-record-type': 'shard',
'x-backend-cached-results': 'true'})
# mimic GetOrHeadHandler.get_working_response...
@@ -180,36 +205,62 @@ class ContainerController(Controller):
return resp, cache_state
def _store_shard_ranges_in_cache(self, req, resp):
- # parse shard ranges returned from backend, store them in infocache and
- # memcache, and return a list of dicts
- cache_key = get_cache_key(self.account_name, self.container_name,
- shard='listing')
+ """
+ Parse shard ranges returned from backend, store them in both infocache
+ and memcache.
+
+ :param req: the request object.
+ :param resp: the response object for the shard range listing.
+ :return: an instance of
+ :class:`~swift.common.utils.NamespaceBoundList`.
+ """
+ # Note: Any gaps in the response's shard ranges will be 'lost' as a
+ # result of compacting the list of shard ranges to a
+ # NamespaceBoundList. That is ok. When the cached NamespaceBoundList is
+ # transformed back to shard range Namespaces to perform a listing, the
+ # Namespace before each gap will have expanded to include the gap,
+ # which means that the backend GET to that shard will have an
+ # end_marker beyond that shard's upper bound, and equal to the next
+ # available shard's lower. At worst, some misplaced objects, in the gap
+ # above the shard's upper, may be included in the shard's response.
data = self._parse_listing_response(req, resp)
backend_shard_ranges = self._parse_shard_ranges(req, data, resp)
if backend_shard_ranges is None:
return None
- cached_range_dicts = [dict(sr) for sr in backend_shard_ranges]
+ ns_bound_list = NamespaceBoundList.parse(backend_shard_ranges)
if resp.headers.get('x-backend-sharding-state') == 'sharded':
# cache in infocache even if no shard ranges returned; this
# is unexpected but use that result for this request
infocache = req.environ.setdefault('swift.infocache', {})
- infocache[cache_key] = tuple(cached_range_dicts)
+ cache_key = get_cache_key(
+ self.account_name, self.container_name, shard='listing')
+ infocache[cache_key] = ns_bound_list
memcache = cache_from_env(req.environ, True)
- if memcache and cached_range_dicts:
+ if memcache and ns_bound_list:
# cache in memcache only if shard ranges as expected
self.logger.debug('Caching %d shards for %s',
- len(cached_range_dicts), req.path_qs)
- memcache.set(cache_key, cached_range_dicts,
+ len(ns_bound_list.bounds), req.path_qs)
+ memcache.set(cache_key, ns_bound_list.bounds,
time=self.app.recheck_listing_shard_ranges)
- return cached_range_dicts
+ return ns_bound_list
def _get_shard_ranges_from_backend(self, req):
- # Make a backend request for shard ranges. The response is cached and
- # then returned as a list of dicts.
+ """
+ Make a backend request for shard ranges and return a response.
+
+ The response body will be a list of dicts each of which describes
+ a Namespace (i.e. includes the keys ``lower``, ``upper`` and ``name``).
+ If the response headers indicate that the response body contains a
+ complete list of shard ranges for a sharded container then the response
+ body will be transformed to a ``NamespaceBoundsList`` and cached.
+
+ :param req: an instance of ``swob.Request``.
+ :return: an instance of ``swob.Response``.
+ """
# Note: We instruct the backend server to ignore name constraints in
# request params if returning shard ranges so that the response can
- # potentially be cached. Only do this if the container state is
+ # potentially be cached, but we only cache it if the container state is
# 'sharded'. We don't attempt to cache shard ranges for a 'sharding'
# container as they may include the container itself as a 'gap filler'
# for shard ranges that have not yet cleaved; listings from 'gap
@@ -232,10 +283,10 @@ class ContainerController(Controller):
if (resp_record_type == 'shard' and
sharding_state == 'sharded' and
complete_listing):
- cached_range_dicts = self._store_shard_ranges_in_cache(req, resp)
- if cached_range_dicts:
- resp.body = self._make_shard_ranges_response_body(
- req, cached_range_dicts)
+ ns_bound_list = self._store_shard_ranges_in_cache(req, resp)
+ if ns_bound_list:
+ resp.body = self._make_namespaces_response_body(
+ req, ns_bound_list)
return resp
def _record_shard_listing_cache_metrics(
@@ -334,7 +385,6 @@ class ContainerController(Controller):
params['states'] = 'listing'
req.params = params
- memcache = cache_from_env(req.environ, True)
if (req.method == 'GET'
and get_param(req, 'states') == 'listing'
and record_type != 'object'):
@@ -346,6 +396,7 @@ class ContainerController(Controller):
info = None
may_get_listing_shards = False
+ memcache = cache_from_env(req.environ, True)
sr_cache_state = None
if (may_get_listing_shards and
self.app.recheck_listing_shard_ranges > 0
@@ -424,8 +475,15 @@ class ContainerController(Controller):
# 'X-Backend-Storage-Policy-Index'.
req.headers[policy_key] = resp.headers[policy_key]
shard_listing_history.append((self.account_name, self.container_name))
- shard_ranges = [ShardRange.from_dict(data)
- for data in json.loads(resp.body)]
+ # Note: when the response body has been synthesised from cached data,
+ # each item in the list only has 'name', 'lower' and 'upper' keys. We
+ # therefore cannot use ShardRange.from_dict(), and the ShardRange
+ # instances constructed here will only have 'name', 'lower' and 'upper'
+ # attributes set.
+ # Ideally we would construct Namespace objects here, but later we use
+ # the ShardRange account and container properties to access parsed
+ # parts of the name.
+ shard_ranges = [ShardRange(**data) for data in json.loads(resp.body)]
self.logger.debug('GET listing from %s shards for: %s',
len(shard_ranges), req.path_qs)
if not shard_ranges:
diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py
index b69631538..fc0f8a6d1 100644
--- a/swift/proxy/controllers/obj.py
+++ b/swift/proxy/controllers/obj.py
@@ -48,7 +48,7 @@ from swift.common.utils import (
normalize_delete_at_timestamp, public, get_expirer_container,
document_iters_to_http_response_body, parse_content_range,
quorum_size, reiterate, close_if_possible, safe_json_loads, md5,
- ShardRange, find_shard_range, cache_from_env, NamespaceBoundList)
+ ShardRange, find_namespace, cache_from_env, NamespaceBoundList)
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_metadata, check_object_creation
from swift.common import constraints
@@ -388,7 +388,7 @@ class BaseObjectController(Controller):
memcache.set(
cache_key, cached_namespaces.bounds,
time=self.app.recheck_updating_shard_ranges)
- update_shard = find_shard_range(obj, shard_ranges or [])
+ update_shard = find_namespace(obj, shard_ranges or [])
record_cache_op_metrics(
self.logger, 'shard_updating', cache_state, response)
return update_shard
@@ -1518,7 +1518,7 @@ class ECAppIter(object):
except ChunkWriteTimeout:
# slow client disconnect
self.logger.exception(
- "ChunkWriteTimeout fetching fragments for %r",
+ "ChunkWriteTimeout feeding fragments for %r",
quote(self.path))
except: # noqa
self.logger.exception("Exception fetching fragments for %r",
@@ -2497,10 +2497,10 @@ class ECFragGetter(object):
self.backend_headers = backend_headers
self.header_provider = header_provider
self.req_query_string = req.query_string
- self.client_chunk_size = policy.fragment_size
+ self.fragment_size = policy.fragment_size
self.skip_bytes = 0
self.bytes_used_from_backend = 0
- self.source = None
+ self.source = self.node = None
self.logger_thread_locals = logger_thread_locals
self.logger = logger
@@ -2578,8 +2578,8 @@ class ECFragGetter(object):
def learn_size_from_content_range(self, start, end, length):
"""
- If client_chunk_size is set, makes sure we yield things starting on
- chunk boundaries based on the Content-Range header in the response.
+ Make sure we yield things starting on fragment boundaries based on the
+ Content-Range header in the response.
Sets our Range header's first byterange to the value learned from
the Content-Range header in the response; if we were given a
@@ -2593,8 +2593,7 @@ class ECFragGetter(object):
if length == 0:
return
- if self.client_chunk_size:
- self.skip_bytes = bytes_to_skip(self.client_chunk_size, start)
+ self.skip_bytes = bytes_to_skip(self.fragment_size, start)
if 'Range' in self.backend_headers:
try:
@@ -2620,170 +2619,155 @@ class ECFragGetter(object):
it = self._get_response_parts_iter(req)
return it
- def _get_response_parts_iter(self, req):
- try:
- client_chunk_size = self.client_chunk_size
- node_timeout = self.app.recoverable_node_timeout
-
- # This is safe; it sets up a generator but does not call next()
- # on it, so no IO is performed.
- parts_iter = [
- http_response_to_document_iters(
- self.source, read_chunk_size=self.app.object_chunk_size)]
+ def get_next_doc_part(self):
+ node_timeout = self.app.recoverable_node_timeout
- def get_next_doc_part():
- while True:
- # the loop here is to resume if trying to parse
- # multipart/byteranges response raises a ChunkReadTimeout
- # and resets the parts_iter
- try:
- with WatchdogTimeout(self.app.watchdog, node_timeout,
- ChunkReadTimeout):
- # If we don't have a multipart/byteranges response,
- # but just a 200 or a single-range 206, then this
- # performs no IO, and just returns source (or
- # raises StopIteration).
- # Otherwise, this call to next() performs IO when
- # we have a multipart/byteranges response; as it
- # will read the MIME boundary and part headers.
- start_byte, end_byte, length, headers, part = next(
- parts_iter[0])
- return (start_byte, end_byte, length, headers, part)
- except ChunkReadTimeout:
- new_source, new_node = self._dig_for_source_and_node()
- if not new_source:
- raise
- self.app.error_occurred(
- self.node, 'Trying to read next part of '
- 'EC multi-part GET (retrying)')
- # Close-out the connection as best as possible.
- if getattr(self.source, 'swift_conn', None):
- close_swift_conn(self.source)
- self.source = new_source
- self.node = new_node
- # This is safe; it sets up a generator but does
- # not call next() on it, so no IO is performed.
- parts_iter[0] = http_response_to_document_iters(
+ while True:
+ # the loop here is to resume if trying to parse
+ # multipart/byteranges response raises a ChunkReadTimeout
+ # and resets the source_parts_iter
+ try:
+ with WatchdogTimeout(self.app.watchdog, node_timeout,
+ ChunkReadTimeout):
+ # If we don't have a multipart/byteranges response,
+ # but just a 200 or a single-range 206, then this
+ # performs no IO, and just returns source (or
+ # raises StopIteration).
+ # Otherwise, this call to next() performs IO when
+ # we have a multipart/byteranges response; as it
+ # will read the MIME boundary and part headers.
+ start_byte, end_byte, length, headers, part = next(
+ self.source_parts_iter)
+ return (start_byte, end_byte, length, headers, part)
+ except ChunkReadTimeout:
+ new_source, new_node = self._dig_for_source_and_node()
+ if not new_source:
+ raise
+ self.app.error_occurred(
+ self.node, 'Trying to read next part of '
+ 'EC multi-part GET (retrying)')
+ # Close-out the connection as best as possible.
+ if getattr(self.source, 'swift_conn', None):
+ close_swift_conn(self.source)
+ self.source = new_source
+ self.node = new_node
+ # This is safe; it sets up a generator but does
+ # not call next() on it, so no IO is performed.
+ self.source_parts_iter = \
+ http_response_to_document_iters(
+ new_source,
+ read_chunk_size=self.app.object_chunk_size)
+
+ def iter_bytes_from_response_part(self, part_file, nbytes):
+ nchunks = 0
+ buf = b''
+ part_file = ByteCountEnforcer(part_file, nbytes)
+ while True:
+ try:
+ with WatchdogTimeout(self.app.watchdog,
+ self.app.recoverable_node_timeout,
+ ChunkReadTimeout):
+ chunk = part_file.read(self.app.object_chunk_size)
+ nchunks += 1
+ # NB: this append must be *inside* the context
+ # manager for test.unit.SlowBody to do its thing
+ buf += chunk
+ if nbytes is not None:
+ nbytes -= len(chunk)
+ except (ChunkReadTimeout, ShortReadError):
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ try:
+ self.fast_forward(self.bytes_used_from_backend)
+ except (HTTPException, ValueError):
+ self.logger.exception('Unable to fast forward')
+ six.reraise(exc_type, exc_value, exc_traceback)
+ except RangeAlreadyComplete:
+ break
+ buf = b''
+ old_node = self.node
+ new_source, new_node = self._dig_for_source_and_node()
+ if new_source:
+ self.app.error_occurred(
+ old_node, 'Trying to read EC fragment '
+ 'during GET (retrying)')
+ # Close-out the connection as best as possible.
+ if getattr(self.source, 'swift_conn', None):
+ close_swift_conn(self.source)
+ self.source = new_source
+ self.node = new_node
+ # This is safe; it just sets up a generator but
+ # does not call next() on it, so no IO is
+ # performed.
+ self.source_parts_iter = \
+ http_response_to_document_iters(
new_source,
read_chunk_size=self.app.object_chunk_size)
-
- def iter_bytes_from_response_part(part_file, nbytes):
- nchunks = 0
- buf = b''
- part_file = ByteCountEnforcer(part_file, nbytes)
- while True:
try:
- with WatchdogTimeout(self.app.watchdog, node_timeout,
- ChunkReadTimeout):
- chunk = part_file.read(self.app.object_chunk_size)
- nchunks += 1
- # NB: this append must be *inside* the context
- # manager for test.unit.SlowBody to do its thing
- buf += chunk
- if nbytes is not None:
- nbytes -= len(chunk)
- except (ChunkReadTimeout, ShortReadError):
- exc_type, exc_value, exc_traceback = sys.exc_info()
- try:
- self.fast_forward(self.bytes_used_from_backend)
- except (HTTPException, ValueError):
- self.logger.exception('Unable to fast forward')
- six.reraise(exc_type, exc_value, exc_traceback)
- except RangeAlreadyComplete:
- break
- buf = b''
- old_node = self.node
- new_source, new_node = self._dig_for_source_and_node()
- if new_source:
- self.app.error_occurred(
- old_node, 'Trying to read EC fragment '
- 'during GET (retrying)')
- # Close-out the connection as best as possible.
- if getattr(self.source, 'swift_conn', None):
- close_swift_conn(self.source)
- self.source = new_source
- self.node = new_node
- # This is safe; it just sets up a generator but
- # does not call next() on it, so no IO is
- # performed.
- parts_iter[0] = http_response_to_document_iters(
- new_source,
- read_chunk_size=self.app.object_chunk_size)
- try:
- _junk, _junk, _junk, _junk, part_file = \
- get_next_doc_part()
- except StopIteration:
- # it's not clear to me how to make
- # get_next_doc_part raise StopIteration for the
- # first doc part of a new request
- six.reraise(exc_type, exc_value, exc_traceback)
- part_file = ByteCountEnforcer(part_file, nbytes)
- else:
- six.reraise(exc_type, exc_value, exc_traceback)
+ _junk, _junk, _junk, _junk, part_file = \
+ self.get_next_doc_part()
+ except StopIteration:
+ # it's not clear to me how to make
+ # get_next_doc_part raise StopIteration for the
+ # first doc part of a new request
+ six.reraise(exc_type, exc_value, exc_traceback)
+ part_file = ByteCountEnforcer(part_file, nbytes)
+ else:
+ six.reraise(exc_type, exc_value, exc_traceback)
+ else:
+ if buf and self.skip_bytes:
+ if self.skip_bytes < len(buf):
+ buf = buf[self.skip_bytes:]
+ self.bytes_used_from_backend += self.skip_bytes
+ self.skip_bytes = 0
else:
- if buf and self.skip_bytes:
- if self.skip_bytes < len(buf):
- buf = buf[self.skip_bytes:]
- self.bytes_used_from_backend += self.skip_bytes
- self.skip_bytes = 0
- else:
- self.skip_bytes -= len(buf)
- self.bytes_used_from_backend += len(buf)
- buf = b''
-
- if not chunk:
- if buf:
- with WatchdogTimeout(self.app.watchdog,
- self.app.client_timeout,
- ChunkWriteTimeout):
- self.bytes_used_from_backend += len(buf)
- yield buf
- buf = b''
- break
-
- if client_chunk_size is not None:
- while len(buf) >= client_chunk_size:
- client_chunk = buf[:client_chunk_size]
- buf = buf[client_chunk_size:]
- with WatchdogTimeout(self.app.watchdog,
- self.app.client_timeout,
- ChunkWriteTimeout):
- self.bytes_used_from_backend += \
- len(client_chunk)
- yield client_chunk
- else:
- with WatchdogTimeout(self.app.watchdog,
- self.app.client_timeout,
- ChunkWriteTimeout):
- self.bytes_used_from_backend += len(buf)
- yield buf
- buf = b''
-
- # This is for fairness; if the network is outpacing
- # the CPU, we'll always be able to read and write
- # data without encountering an EWOULDBLOCK, and so
- # eventlet will not switch greenthreads on its own.
- # We do it manually so that clients don't starve.
- #
- # The number 5 here was chosen by making stuff up.
- # It's not every single chunk, but it's not too big
- # either, so it seemed like it would probably be an
- # okay choice.
- #
- # Note that we may trampoline to other greenthreads
- # more often than once every 5 chunks, depending on
- # how blocking our network IO is; the explicit sleep
- # here simply provides a lower bound on the rate of
- # trampolining.
- if nchunks % 5 == 0:
- sleep()
+ self.skip_bytes -= len(buf)
+ self.bytes_used_from_backend += len(buf)
+ buf = b''
+
+ while buf and (len(buf) >= self.fragment_size or not chunk):
+ client_chunk = buf[:self.fragment_size]
+ buf = buf[self.fragment_size:]
+ with WatchdogTimeout(self.app.watchdog,
+ self.app.client_timeout,
+ ChunkWriteTimeout):
+ self.bytes_used_from_backend += len(client_chunk)
+ yield client_chunk
+
+ if not chunk:
+ break
+
+ # This is for fairness; if the network is outpacing
+ # the CPU, we'll always be able to read and write
+ # data without encountering an EWOULDBLOCK, and so
+ # eventlet will not switch greenthreads on its own.
+ # We do it manually so that clients don't starve.
+ #
+ # The number 5 here was chosen by making stuff up.
+ # It's not every single chunk, but it's not too big
+ # either, so it seemed like it would probably be an
+ # okay choice.
+ #
+ # Note that we may trampoline to other greenthreads
+ # more often than once every 5 chunks, depending on
+ # how blocking our network IO is; the explicit sleep
+ # here simply provides a lower bound on the rate of
+ # trampolining.
+ if nchunks % 5 == 0:
+ sleep()
+
+ def _get_response_parts_iter(self, req):
+ try:
+ # This is safe; it sets up a generator but does not call next()
+ # on it, so no IO is performed.
+ self.source_parts_iter = http_response_to_document_iters(
+ self.source, read_chunk_size=self.app.object_chunk_size)
part_iter = None
try:
while True:
try:
start_byte, end_byte, length, headers, part = \
- get_next_doc_part()
+ self.get_next_doc_part()
except StopIteration:
# it seems this is the only way out of the loop; not
# sure why the req.environ update is always needed
@@ -2800,7 +2784,8 @@ class ECFragGetter(object):
if (end_byte is not None
and start_byte is not None)
else None)
- part_iter = iter_bytes_from_response_part(part, byte_count)
+ part_iter = self.iter_bytes_from_response_part(
+ part, byte_count)
yield {'start_byte': start_byte, 'end_byte': end_byte,
'entity_length': length, 'headers': headers,
'part_iter': part_iter}
diff --git a/test/probe/test_object_versioning.py b/test/probe/test_object_versioning.py
index 147cf84f4..09a209f54 100644
--- a/test/probe/test_object_versioning.py
+++ b/test/probe/test_object_versioning.py
@@ -15,12 +15,15 @@
# limitations under the License.
from unittest import main
+import random
from swiftclient import client
+from swift.common import direct_client
from swift.common.request_helpers import get_reserved_name
+from swift.obj import reconstructor
-from test.probe.common import ReplProbeTest
+from test.probe.common import ReplProbeTest, ECProbeTest
class TestObjectVersioning(ReplProbeTest):
@@ -229,5 +232,99 @@ class TestObjectVersioning(ReplProbeTest):
self.assertEqual(data, b'new version')
+class TestECObjectVersioning(ECProbeTest):
+
+ def setUp(self):
+ super(TestECObjectVersioning, self).setUp()
+ self.part, self.nodes = self.object_ring.get_nodes(
+ self.account, self.container_name, self.object_name)
+
+ def test_versioning_with_metadata_replication(self):
+ # Enable versioning
+ client.put_container(self.url, self.token, self.container_name,
+ headers={
+ 'X-Storage-Policy': self.policy.name,
+ 'X-Versions-Enabled': 'True',
+ })
+ # create version with metadata in a handoff location
+ failed_primary = random.choice(self.nodes)
+ failed_primary_device_path = self.device_dir(failed_primary)
+ self.kill_drive(failed_primary_device_path)
+ headers = {'x-object-meta-foo': 'meta-foo'}
+ client.put_object(self.url, self.token, self.container_name,
+ self.object_name, contents='some data',
+ headers=headers)
+ headers_post = {'x-object-meta-bar': 'meta-bar'}
+ client.post_object(self.url, self.token, self.container_name,
+ self.object_name, headers=headers_post)
+ # find the handoff
+ primary_ids = [n['id'] for n in self.nodes]
+ for handoff in self.object_ring.devs:
+ if handoff['id'] in primary_ids:
+ continue
+ try:
+ headers, etag = self.direct_get(handoff, self.part)
+ except direct_client.DirectClientException as err:
+ if err.http_status != 404:
+ raise
+ else:
+ break
+ else:
+ self.fail('unable to find object on handoffs')
+ # we want to repair the fault, but avoid doing the handoff revert
+ self.revive_drive(failed_primary_device_path)
+ handoff_config = self.config_number(handoff)
+ failed_config = self.config_number(failed_primary)
+ partner_nodes = reconstructor._get_partners(
+ failed_primary['index'], self.nodes)
+ random.shuffle(partner_nodes)
+ for partner in partner_nodes:
+ fix_config = self.config_number(partner)
+ if fix_config not in (handoff_config, failed_config):
+ break
+ else:
+ self.fail('unable to find fix_config in %r excluding %r & %r' % (
+ [(d['device'], self.config_number(d)) for d in partner_nodes],
+ handoff_config, failed_config))
+
+ self.reconstructor.once(number=fix_config)
+ # validate object in all locations
+ missing = []
+ etags = set()
+ metadata = []
+ for node in self.nodes:
+ try:
+ headers, etag = self.direct_get(node, self.part)
+ except direct_client.DirectClientException as err:
+ if err.http_status != 404:
+ raise
+ missing.append(node)
+ continue
+ etags.add(headers['X-Object-Sysmeta-Ec-Etag'])
+ metadata.append(headers['X-Object-Meta-Bar'])
+ if missing:
+ self.fail('Ran reconstructor config #%s to repair %r but '
+ 'found 404 on primary: %r' % (
+ fix_config, failed_primary['device'],
+ [d['device'] for d in missing]))
+ self.assertEqual(1, len(etags))
+ self.assertEqual(['meta-bar'] * len(self.nodes), metadata)
+ # process revert
+ self.reconstructor.once(number=handoff_config)
+ # validate object (still?) in primary locations
+ etags = set()
+ metadata = []
+ for node in self.nodes:
+ headers, etag = self.direct_get(node, self.part)
+ etags.add(headers['X-Object-Sysmeta-Ec-Etag'])
+ metadata.append(headers['X-Object-Meta-Bar'])
+ self.assertEqual(1, len(etags))
+ self.assertEqual(['meta-bar'] * len(self.nodes), metadata)
+ # and removed form handoff
+ with self.assertRaises(direct_client.DirectClientException) as ctx:
+ headers, etag = self.direct_get(handoff, self.part)
+ self.assertEqual(ctx.exception.http_status, 404)
+
+
if __name__ == '__main__':
main()
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
index 6f731b70a..0d0206f08 100644
--- a/test/unit/__init__.py
+++ b/test/unit/__init__.py
@@ -512,6 +512,17 @@ def readuntil2crlfs(fd):
return rv
+def readlength(fd, size, timeout=1.0):
+ buf = b''
+ with eventlet.Timeout(timeout):
+ while len(buf) < size:
+ chunk = fd.read(min(64, size - len(buf)))
+ buf += chunk
+ if len(buf) >= size:
+ break
+ return buf
+
+
def connect_tcp(hostport):
rv = socket.socket()
rv.connect(hostport)
@@ -1408,3 +1419,36 @@ def generate_db_path(tempdir, server_type):
return os.path.join(
tempdir, '%ss' % server_type, 'part', 'suffix', 'hash',
'%s-%s.db' % (server_type, uuid4()))
+
+
+class ConfigAssertMixin(object):
+ """
+ Use this with a TestCase to get py2/3 compatible assert for DuplicateOption
+ """
+ def assertDuplicateOption(self, app_config, option_name, option_value):
+ """
+ PY3 added a DuplicateOptionError, PY2 didn't seem to care
+ """
+ if six.PY3:
+ self.assertDuplicateOptionError(app_config, option_name)
+ else:
+ self.assertDuplicateOptionOK(app_config, option_name, option_value)
+
+ def assertDuplicateOptionError(self, app_config, option_name):
+ with self.assertRaises(
+ utils.configparser.DuplicateOptionError) as ctx:
+ app_config()
+ msg = str(ctx.exception)
+ self.assertIn(option_name, msg)
+ self.assertIn('already exists', msg)
+
+ def assertDuplicateOptionOK(self, app_config, option_name, option_value):
+ app = app_config()
+ if hasattr(app, 'conf'):
+ found_value = app.conf[option_name]
+ else:
+ if hasattr(app, '_pipeline_final_app'):
+ # special case for proxy app!
+ app = app._pipeline_final_app
+ found_value = getattr(app, option_name)
+ self.assertEqual(found_value, option_value)
diff --git a/test/unit/cli/test_info.py b/test/unit/cli/test_info.py
index 80813db87..85fb79819 100644
--- a/test/unit/cli/test_info.py
+++ b/test/unit/cli/test_info.py
@@ -1219,7 +1219,7 @@ class TestPrintObjFullMeta(TestCliInfoBase):
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
- 'Content-Length': 0}
+ 'Content-Length': '0'}
write_metadata(fp, md)
def test_print_obj(self):
@@ -1256,7 +1256,7 @@ class TestPrintObjFullMeta(TestCliInfoBase):
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
- 'Content-Length': 0}
+ 'Content-Length': '0'}
write_metadata(fp, md)
object_ring = ring.Ring(self.testdir, ring_name='object-2')
@@ -1297,7 +1297,7 @@ class TestPrintObjFullMeta(TestCliInfoBase):
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
- 'Content-Length': 0}
+ 'Content-Length': '0'}
write_metadata(fp, md)
object_ring = ring.Ring(self.testdir, ring_name='object-3')
@@ -1368,7 +1368,7 @@ class TestPrintObjFullMeta(TestCliInfoBase):
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'badetag',
- 'Content-Length': 0}
+ 'Content-Length': '0'}
write_metadata(fp, md)
out = StringIO()
@@ -1703,5 +1703,5 @@ class TestPrintObjWeirdPath(TestPrintObjFullMeta):
md = {'name': '/AUTH_admin/c/obj',
'Content-Type': 'application/octet-stream',
'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
- 'Content-Length': 0}
+ 'Content-Length': '0'}
write_metadata(fp, md)
diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py
index 0f7e58e0c..55f45862e 100644
--- a/test/unit/common/ring/test_ring.py
+++ b/test/unit/common/ring/test_ring.py
@@ -68,8 +68,10 @@ class TestRingData(unittest.TestCase):
def test_attrs(self):
r2p2d = [[0, 1, 0, 1], [0, 1, 0, 1]]
- d = [{'id': 0, 'zone': 0, 'region': 0, 'ip': '10.1.1.0', 'port': 7000},
- {'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1', 'port': 7000}]
+ d = [{'id': 0, 'zone': 0, 'region': 0, 'ip': '10.1.1.0', 'port': 7000,
+ 'replication_ip': '10.1.1.0', 'replication_port': 7000},
+ {'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1', 'port': 7000,
+ 'replication_ip': '10.1.1.1', 'replication_port': 7000}]
s = 30
rd = ring.RingData(r2p2d, d, s)
self.assertEqual(rd._replica2part2dev_id, r2p2d)
@@ -88,10 +90,12 @@ class TestRingData(unittest.TestCase):
pickle.dump(rd, f, protocol=p)
meta_only = ring.RingData.load(ring_fname, metadata_only=True)
self.assertEqual([
- {'id': 0, 'zone': 0, 'region': 1, 'ip': '10.1.1.0',
- 'port': 7000},
- {'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1',
- 'port': 7000},
+ {'id': 0, 'zone': 0, 'region': 1,
+ 'ip': '10.1.1.0', 'port': 7000,
+ 'replication_ip': '10.1.1.0', 'replication_port': 7000},
+ {'id': 1, 'zone': 1, 'region': 1,
+ 'ip': '10.1.1.1', 'port': 7000,
+ 'replication_ip': '10.1.1.1', 'replication_port': 7000},
], meta_only.devs)
# Pickled rings can't load only metadata, so you get it all
self.assert_ring_data_equal(rd, meta_only)
diff --git a/test/unit/common/test_daemon.py b/test/unit/common/test_daemon.py
index d53d15f10..fc49fd4e4 100644
--- a/test/unit/common/test_daemon.py
+++ b/test/unit/common/test_daemon.py
@@ -14,18 +14,19 @@
# limitations under the License.
import os
-from six import StringIO
+import six
import time
import unittest
from getpass import getuser
import logging
-from test.unit import tmpfile
+from test.unit import tmpfile, with_tempdir, ConfigAssertMixin
import mock
import signal
from contextlib import contextmanager
import itertools
from collections import defaultdict
import errno
+from textwrap import dedent
from swift.common import daemon, utils
from test.debug_logger import debug_logger
@@ -106,7 +107,7 @@ class TestWorkerDaemon(unittest.TestCase):
self.assertTrue(d.is_healthy())
-class TestRunDaemon(unittest.TestCase):
+class TestRunDaemon(unittest.TestCase, ConfigAssertMixin):
def setUp(self):
for patcher in [
@@ -147,9 +148,12 @@ class TestRunDaemon(unittest.TestCase):
])
def test_run_daemon(self):
+ logging.logThreads = 1 # reset to default
sample_conf = "[my-daemon]\nuser = %s\n" % getuser()
with tmpfile(sample_conf) as conf_file, \
- mock.patch('swift.common.daemon.use_hub') as mock_use_hub:
+ mock.patch('swift.common.utils.eventlet') as _utils_evt, \
+ mock.patch('eventlet.hubs.use_hub') as mock_use_hub, \
+ mock.patch('eventlet.debug') as _debug_evt:
with mock.patch.dict('os.environ', {'TZ': ''}), \
mock.patch('time.tzset') as mock_tzset:
daemon.run_daemon(MyDaemon, conf_file)
@@ -159,6 +163,12 @@ class TestRunDaemon(unittest.TestCase):
self.assertEqual(mock_use_hub.mock_calls,
[mock.call(utils.get_hub())])
daemon.run_daemon(MyDaemon, conf_file, once=True)
+ _utils_evt.patcher.monkey_patch.assert_called_with(all=False,
+ socket=True,
+ select=True,
+ thread=True)
+ self.assertEqual(0, logging.logThreads) # fixed in monkey_patch
+ _debug_evt.hub_exceptions.assert_called_with(False)
self.assertEqual(MyDaemon.once_called, True)
# test raise in daemon code
@@ -167,7 +177,7 @@ class TestRunDaemon(unittest.TestCase):
conf_file, once=True)
# test user quit
- sio = StringIO()
+ sio = six.StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
@@ -195,7 +205,9 @@ class TestRunDaemon(unittest.TestCase):
sample_conf = "[my-daemon]\nuser = %s\n" % getuser()
with tmpfile(sample_conf) as conf_file, \
- mock.patch('swift.common.daemon.use_hub'):
+ mock.patch('swift.common.utils.eventlet'), \
+ mock.patch('eventlet.hubs.use_hub'), \
+ mock.patch('eventlet.debug'):
daemon.run_daemon(MyDaemon, conf_file)
self.assertFalse(MyDaemon.once_called)
self.assertTrue(MyDaemon.forever_called)
@@ -207,6 +219,107 @@ class TestRunDaemon(unittest.TestCase):
os.environ['TZ'] = old_tz
time.tzset()
+ @with_tempdir
+ def test_run_deamon_from_conf_file(self, tempdir):
+ conf_path = os.path.join(tempdir, 'test-daemon.conf')
+ conf_body = """
+ [DEFAULT]
+ conn_timeout = 5
+ client_timeout = 1
+ [my-daemon]
+ CONN_timeout = 10
+ client_timeout = 2
+ """
+ contents = dedent(conf_body)
+ with open(conf_path, 'w') as f:
+ f.write(contents)
+ with mock.patch('swift.common.utils.eventlet'), \
+ mock.patch('eventlet.hubs.use_hub'), \
+ mock.patch('eventlet.debug'):
+ d = daemon.run_daemon(MyDaemon, conf_path)
+ # my-daemon section takes priority (!?)
+ self.assertEqual('2', d.conf['client_timeout'])
+ self.assertEqual('10', d.conf['CONN_timeout'])
+ self.assertEqual('5', d.conf['conn_timeout'])
+
+ @with_tempdir
+ def test_run_daemon_from_conf_file_with_duplicate_var(self, tempdir):
+ conf_path = os.path.join(tempdir, 'test-daemon.conf')
+ conf_body = """
+ [DEFAULT]
+ client_timeout = 3
+ [my-daemon]
+ CLIENT_TIMEOUT = 2
+ client_timeout = 1
+ conn_timeout = 1.1
+ conn_timeout = 1.2
+ """
+ contents = dedent(conf_body)
+ with open(conf_path, 'w') as f:
+ f.write(contents)
+ with mock.patch('swift.common.utils.eventlet'), \
+ mock.patch('eventlet.hubs.use_hub'), \
+ mock.patch('eventlet.debug'):
+ app_config = lambda: daemon.run_daemon(MyDaemon, tempdir)
+ # N.B. CLIENT_TIMEOUT/client_timeout are unique options
+ self.assertDuplicateOption(app_config, 'conn_timeout', '1.2')
+
+ @with_tempdir
+ def test_run_deamon_from_conf_dir(self, tempdir):
+ conf_files = {
+ 'default': """
+ [DEFAULT]
+ conn_timeout = 5
+ client_timeout = 1
+ """,
+ 'daemon': """
+ [DEFAULT]
+ CONN_timeout = 3
+ CLIENT_TIMEOUT = 4
+ [my-daemon]
+ CONN_timeout = 10
+ client_timeout = 2
+ """,
+ }
+ for filename, conf_body in conf_files.items():
+ path = os.path.join(tempdir, filename + '.conf')
+ with open(path, 'wt') as fd:
+ fd.write(dedent(conf_body))
+ with mock.patch('swift.common.utils.eventlet'), \
+ mock.patch('eventlet.hubs.use_hub'), \
+ mock.patch('eventlet.debug'):
+ d = daemon.run_daemon(MyDaemon, tempdir)
+ # my-daemon section takes priority (!?)
+ self.assertEqual('2', d.conf['client_timeout'])
+ self.assertEqual('10', d.conf['CONN_timeout'])
+ self.assertEqual('5', d.conf['conn_timeout'])
+
+ @with_tempdir
+ def test_run_daemon_from_conf_dir_with_duplicate_var(self, tempdir):
+ conf_files = {
+ 'default': """
+ [DEFAULT]
+ client_timeout = 3
+ """,
+ 'daemon': """
+ [my-daemon]
+ client_timeout = 2
+ CLIENT_TIMEOUT = 4
+ conn_timeout = 1.1
+ conn_timeout = 1.2
+ """,
+ }
+ for filename, conf_body in conf_files.items():
+ path = os.path.join(tempdir, filename + '.conf')
+ with open(path, 'wt') as fd:
+ fd.write(dedent(conf_body))
+ with mock.patch('swift.common.utils.eventlet'), \
+ mock.patch('eventlet.hubs.use_hub'), \
+ mock.patch('eventlet.debug'):
+ app_config = lambda: daemon.run_daemon(MyDaemon, tempdir)
+ # N.B. CLIENT_TIMEOUT/client_timeout are unique options
+ self.assertDuplicateOption(app_config, 'conn_timeout', '1.2')
+
@contextmanager
def mock_os(self, child_worker_cycles=3):
self.waitpid_calls = defaultdict(int)
@@ -228,6 +341,7 @@ class TestRunDaemon(unittest.TestCase):
yield
def test_fork_workers(self):
+ utils.logging_monkey_patch() # needed to log at notice
d = MyWorkerDaemon({'workers': 3})
strategy = daemon.DaemonStrategy(d, d.logger)
with self.mock_os():
diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py
index d26ef0e2d..65bbed9a9 100644
--- a/test/unit/common/test_internal_client.py
+++ b/test/unit/common/test_internal_client.py
@@ -30,6 +30,7 @@ from swift.common import exceptions, internal_client, request_helpers, swob, \
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import StoragePolicy
from swift.common.middleware.proxy_logging import ProxyLoggingMiddleware
+from swift.common.middleware.gatekeeper import GatekeeperMiddleware
from test.debug_logger import debug_logger
from test.unit import with_tempdir, write_fake_ring, patch_policies
@@ -392,6 +393,21 @@ class TestInternalClient(unittest.TestCase):
conf_path, user_agent, request_tries=0)
mock_loadapp.assert_not_called()
+ # if we load it with the gatekeeper middleware then we also get a
+ # value error
+ gate_keeper_app = GatekeeperMiddleware(app, {})
+ gate_keeper_app._pipeline_final_app = app
+ gate_keeper_app._pipeline = [gate_keeper_app, app]
+ with mock.patch.object(
+ internal_client, 'loadapp', return_value=gate_keeper_app) \
+ as mock_loadapp, self.assertRaises(ValueError) as err:
+ internal_client.InternalClient(
+ conf_path, user_agent, request_tries)
+ self.assertEqual(
+ str(err.exception),
+ ('Gatekeeper middleware is not allowed in the InternalClient '
+ 'proxy pipeline'))
+
with mock.patch.object(
internal_client, 'loadapp', return_value=app) as mock_loadapp:
client = internal_client.InternalClient(
@@ -421,6 +437,72 @@ class TestInternalClient(unittest.TestCase):
self.assertEqual(request_tries, client.request_tries)
self.assertTrue(client.use_replication_network)
+ def test_init_allow_modify_pipeline(self):
+ conf_path = 'some_path'
+ app = FakeSwift()
+ user_agent = 'some_user_agent'
+
+ with mock.patch.object(
+ internal_client, 'loadapp', return_value=app) as mock_loadapp,\
+ self.assertRaises(ValueError) as cm:
+ internal_client.InternalClient(
+ conf_path, user_agent, 1, allow_modify_pipeline=True)
+ mock_loadapp.assert_not_called()
+ self.assertIn("'allow_modify_pipeline' is no longer supported",
+ str(cm.exception))
+
+ with mock.patch.object(
+ internal_client, 'loadapp', return_value=app) as mock_loadapp:
+ internal_client.InternalClient(
+ conf_path, user_agent, 1, allow_modify_pipeline=False)
+ mock_loadapp.assert_called_once_with(
+ conf_path, allow_modify_pipeline=False, global_conf=None)
+
+ def test_gatekeeper_not_loaded(self):
+ app = FakeSwift()
+ pipeline = [app]
+
+ class RandomMiddleware(object):
+ def __init__(self, app):
+ self.app = app
+ self._pipeline_final_app = app
+ self._pipeline = pipeline
+ self._pipeline.insert(0, self)
+
+ # if there is no Gatekeeper middleware then it's false
+ # just the final app
+ self.assertFalse(
+ internal_client.InternalClient.check_gatekeeper_not_loaded(app))
+
+ # now with a bunch of middlewares
+ app_no_gatekeeper = app
+ for i in range(5):
+ app_no_gatekeeper = RandomMiddleware(app_no_gatekeeper)
+ self.assertFalse(
+ internal_client.InternalClient.check_gatekeeper_not_loaded(
+ app_no_gatekeeper))
+
+ # But if we put the gatekeeper on the end, it will be found
+ app_with_gatekeeper = GatekeeperMiddleware(app_no_gatekeeper, {})
+ pipeline.insert(0, app_with_gatekeeper)
+ app_with_gatekeeper._pipeline = pipeline
+ with self.assertRaises(ValueError) as err:
+ internal_client.InternalClient.check_gatekeeper_not_loaded(
+ app_with_gatekeeper)
+ self.assertEqual(str(err.exception),
+ ('Gatekeeper middleware is not allowed in the '
+ 'InternalClient proxy pipeline'))
+
+ # even if we bury deep into the pipeline
+ for i in range(5):
+ app_with_gatekeeper = RandomMiddleware(app_with_gatekeeper)
+ with self.assertRaises(ValueError) as err:
+ internal_client.InternalClient.check_gatekeeper_not_loaded(
+ app_with_gatekeeper)
+ self.assertEqual(str(err.exception),
+ ('Gatekeeper middleware is not allowed in the '
+ 'InternalClient proxy pipeline'))
+
def test_make_request_sets_user_agent(self):
class FakeApp(FakeSwift):
def __init__(self, test):
diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py
index ace7f3008..7de13ff10 100644
--- a/test/unit/common/test_memcached.py
+++ b/test/unit/common/test_memcached.py
@@ -34,7 +34,8 @@ from eventlet.pools import Pool
from eventlet.green import ssl
from swift.common import memcached
-from swift.common.memcached import MemcacheConnectionError
+from swift.common.memcached import MemcacheConnectionError, md5hash, \
+ get_key_prefix
from swift.common.utils import md5, human_readable
from mock import patch, MagicMock
from test.debug_logger import debug_logger
@@ -200,6 +201,21 @@ class TestMemcached(unittest.TestCase):
def setUp(self):
self.logger = debug_logger()
+ def test_get_key_prefix(self):
+ self.assertEqual(
+ get_key_prefix("shard-updating-v2/a/c"),
+ "shard-updating-v2/a")
+ self.assertEqual(
+ get_key_prefix("shard-listing-v2/accout/container3"),
+ "shard-listing-v2/accout")
+ self.assertEqual(
+ get_key_prefix("auth_reseller_name/token/X58E34EL2SDFLEY3"),
+ "auth_reseller_name/token")
+ self.assertEqual(
+ get_key_prefix("nvratelimit/v2/wf/2345392374"),
+ "nvratelimit/v2/wf")
+ self.assertEqual(get_key_prefix("some_key"), "some_key")
+
def test_logger_kwarg(self):
server_socket = '%s:%s' % ('[::1]', 11211)
client = memcached.MemcacheRing([server_socket])
@@ -219,7 +235,7 @@ class TestMemcached(unittest.TestCase):
self.assertIs(client._client_cache[server]._tls_context, context)
key = uuid4().hex.encode('ascii')
- list(client._get_conns(key))
+ list(client._get_conns('test', key))
context.wrap_socket.assert_called_once()
def test_get_conns(self):
@@ -241,7 +257,7 @@ class TestMemcached(unittest.TestCase):
one = two = True
while one or two: # Run until we match hosts one and two
key = uuid4().hex.encode('ascii')
- for conn in memcache_client._get_conns(key):
+ for conn in memcache_client._get_conns('test', key):
if 'b' not in getattr(conn[1], 'mode', ''):
self.assertIsInstance(conn[1], (
io.RawIOBase, io.BufferedIOBase))
@@ -268,7 +284,7 @@ class TestMemcached(unittest.TestCase):
memcache_client = memcached.MemcacheRing([server_socket],
logger=self.logger)
key = uuid4().hex.encode('ascii')
- for conn in memcache_client._get_conns(key):
+ for conn in memcache_client._get_conns('test', key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
self.assertEqual(peer_socket, server_socket)
@@ -290,7 +306,7 @@ class TestMemcached(unittest.TestCase):
memcache_client = memcached.MemcacheRing([server_host],
logger=self.logger)
key = uuid4().hex.encode('ascii')
- for conn in memcache_client._get_conns(key):
+ for conn in memcache_client._get_conns('test', key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
self.assertEqual(peer_socket, server_socket)
@@ -319,7 +335,7 @@ class TestMemcached(unittest.TestCase):
memcache_client = memcached.MemcacheRing([server_socket],
logger=self.logger)
key = uuid4().hex.encode('ascii')
- for conn in memcache_client._get_conns(key):
+ for conn in memcache_client._get_conns('test', key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '%s:%s' % (peer_sockaddr[0],
peer_sockaddr[1])
@@ -345,7 +361,7 @@ class TestMemcached(unittest.TestCase):
memcache_client = memcached.MemcacheRing([server_socket],
logger=self.logger)
key = uuid4().hex.encode('ascii')
- for conn in memcache_client._get_conns(key):
+ for conn in memcache_client._get_conns('test', key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0],
peer_sockaddr[1])
@@ -539,7 +555,7 @@ class TestMemcached(unittest.TestCase):
self.assertEqual(mock1.exploded, True)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
])
self.logger.clear()
@@ -548,7 +564,7 @@ class TestMemcached(unittest.TestCase):
self.assertEqual(mock1.exploded, True)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
])
# Check that we really did call create() twice
self.assertEqual(memcache_client._client_cache['1.2.3.5:11211'].mocks,
@@ -571,7 +587,7 @@ class TestMemcached(unittest.TestCase):
# to .4
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
] * 11 + [
'Error limiting server 1.2.3.5:11211'
])
@@ -583,10 +599,10 @@ class TestMemcached(unittest.TestCase):
# as we keep going, eventually .4 gets error limited, too
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
] * 10 + [
'Error talking to memcached: 1.2.3.4:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
'Error limiting server 1.2.3.4:11211',
'All memcached servers error-limited',
])
@@ -619,7 +635,7 @@ class TestMemcached(unittest.TestCase):
# to .4
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
] * 20)
def test_error_raising(self):
@@ -634,7 +650,7 @@ class TestMemcached(unittest.TestCase):
memcache_client.set('some_key', [1, 2, 3], raise_on_error=True)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
])
self.logger.clear()
@@ -642,7 +658,17 @@ class TestMemcached(unittest.TestCase):
memcache_client.get('some_key', raise_on_error=True)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
+ ])
+ self.logger.clear()
+
+ with self.assertRaises(MemcacheConnectionError):
+ memcache_client.set(
+ 'shard-updating-v2/acc/container', [1, 2, 3],
+ raise_on_error=True)
+ self.assertEqual(self.logger.get_lines_for_level('error'), [
+ 'Error talking to memcached: 1.2.3.4:11211: '
+ 'with key_prefix shard-updating-v2/acc: [Errno 32] Broken pipe',
])
self.logger.clear()
@@ -650,14 +676,21 @@ class TestMemcached(unittest.TestCase):
memcache_client.set('some_key', [1, 2, 3])
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
])
self.logger.clear()
memcache_client.get('some_key')
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.4:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
+ ])
+ self.logger.clear()
+
+ memcache_client.set('shard-updating-v2/acc/container', [1, 2, 3])
+ self.assertEqual(self.logger.get_lines_for_level('error'), [
+ 'Error talking to memcached: 1.2.3.4:11211: '
+ 'with key_prefix shard-updating-v2/acc: [Errno 32] Broken pipe',
])
def test_error_limiting_custom_config(self):
@@ -680,10 +713,10 @@ class TestMemcached(unittest.TestCase):
do_calls(5, 12)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
] * 10 + [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
'Error limiting server 1.2.3.5:11211',
'All memcached servers error-limited',
])
@@ -693,7 +726,7 @@ class TestMemcached(unittest.TestCase):
do_calls(6, 20)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
] * 20)
# with error_limit_time of 66, one call per 6 secs, twelfth one
@@ -701,10 +734,10 @@ class TestMemcached(unittest.TestCase):
do_calls(6, 12, error_limit_time=66)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
] * 10 + [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
'Error limiting server 1.2.3.5:11211',
'All memcached servers error-limited',
])
@@ -714,10 +747,10 @@ class TestMemcached(unittest.TestCase):
do_calls(6, 13, error_limit_time=70, error_limit_count=11)
self.assertEqual(self.logger.get_lines_for_level('error'), [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
] * 11 + [
'Error talking to memcached: 1.2.3.5:11211: '
- '[Errno 32] Broken pipe',
+ 'with key_prefix some_key: [Errno 32] Broken pipe',
'Error limiting server 1.2.3.5:11211',
'All memcached servers error-limited',
])
@@ -953,7 +986,8 @@ class TestMemcached(unittest.TestCase):
self.assertEqual(len(memcache_client._errors['1.2.3.5:11211']), 8)
self.assertEqual(
self.logger.get_lines_for_level('error'),
- ['Timeout getting a connection to memcached: 1.2.3.5:11211'] * 8)
+ ['Timeout getting a connection to memcached: 1.2.3.5:11211'
+ ': with key_prefix key'] * 8)
self.assertEqual(served['1.2.3.5'], 2)
self.assertEqual(pending['1.2.3.4'], 0)
self.assertEqual(len(memcache_client._errors['1.2.3.4:11211']), 0)
@@ -992,7 +1026,8 @@ class TestMemcached(unittest.TestCase):
# try to get connect and no connection found
# so it will result in StopIteration
- conn_generator = memcache_client._get_conns(b'key')
+ conn_generator = memcache_client._get_conns(
+ 'key', md5hash(b'key'))
with self.assertRaises(StopIteration):
next(conn_generator)
diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py
index 3ad0432c3..b92d492b3 100644
--- a/test/unit/common/test_utils.py
+++ b/test/unit/common/test_utils.py
@@ -23,7 +23,6 @@ from test.debug_logger import debug_logger
from test.unit import temptree, make_timestamp_iter, with_tempdir, \
mock_timestamp_now, FakeIterable
-import ctypes
import contextlib
import errno
import eventlet
@@ -33,7 +32,6 @@ import eventlet.patcher
import functools
import grp
import logging
-import platform
import os
import mock
import posix
@@ -198,855 +196,6 @@ class TestUTC(unittest.TestCase):
self.assertEqual(utils.UTC.tzname(None), 'UTC')
-class TestTimestamp(unittest.TestCase):
- """Tests for swift.common.utils.Timestamp"""
-
- def test_invalid_input(self):
- self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
- self.assertRaises(ValueError, utils.Timestamp, '123.456_78_90')
-
- def test_invalid_string_conversion(self):
- t = utils.Timestamp.now()
- self.assertRaises(TypeError, str, t)
-
- def test_offset_limit(self):
- t = 1417462430.78693
- # can't have a offset above MAX_OFFSET
- self.assertRaises(ValueError, utils.Timestamp, t,
- offset=utils.MAX_OFFSET + 1)
- # exactly max offset is fine
- ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
- self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
- # but you can't offset it further
- self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
- # unless you start below it
- ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
- self.assertEqual(utils.Timestamp(ts.internal, offset=1),
- '1417462430.78693_ffffffffffffffff')
-
- def test_normal_format_no_offset(self):
- expected = '1402436408.91203'
- test_values = (
- '1402436408.91203',
- '1402436408.91203_00000000',
- '1402436408.912030000',
- '1402436408.912030000_0000000000000',
- '000001402436408.912030000',
- '000001402436408.912030000_0000000000',
- 1402436408.91203,
- 1402436408.912029,
- 1402436408.9120300000000000,
- 1402436408.91202999999999999,
- utils.Timestamp(1402436408.91203),
- utils.Timestamp(1402436408.91203, offset=0),
- utils.Timestamp(1402436408.912029),
- utils.Timestamp(1402436408.912029, offset=0),
- utils.Timestamp('1402436408.91203'),
- utils.Timestamp('1402436408.91203', offset=0),
- utils.Timestamp('1402436408.91203_00000000'),
- utils.Timestamp('1402436408.91203_00000000', offset=0),
- )
- for value in test_values:
- timestamp = utils.Timestamp(value)
- self.assertEqual(timestamp.normal, expected)
- # timestamp instance can also compare to string or float
- self.assertEqual(timestamp, expected)
- self.assertEqual(timestamp, float(expected))
- self.assertEqual(timestamp, utils.normalize_timestamp(expected))
-
- def test_isoformat(self):
- expected = '2014-06-10T22:47:32.054580'
- test_values = (
- '1402440452.05458',
- '1402440452.054579',
- '1402440452.05458_00000000',
- '1402440452.054579_00000000',
- '1402440452.054580000',
- '1402440452.054579999',
- '1402440452.054580000_0000000000000',
- '1402440452.054579999_0000ff00',
- '000001402440452.054580000',
- '000001402440452.0545799',
- '000001402440452.054580000_0000000000',
- '000001402440452.054579999999_00000fffff',
- 1402440452.05458,
- 1402440452.054579,
- 1402440452.0545800000000000,
- 1402440452.054579999,
- utils.Timestamp(1402440452.05458),
- utils.Timestamp(1402440452.0545799),
- utils.Timestamp(1402440452.05458, offset=0),
- utils.Timestamp(1402440452.05457999999, offset=0),
- utils.Timestamp(1402440452.05458, offset=100),
- utils.Timestamp(1402440452.054579, offset=100),
- utils.Timestamp('1402440452.05458'),
- utils.Timestamp('1402440452.054579999'),
- utils.Timestamp('1402440452.05458', offset=0),
- utils.Timestamp('1402440452.054579', offset=0),
- utils.Timestamp('1402440452.05458', offset=300),
- utils.Timestamp('1402440452.05457999', offset=300),
- utils.Timestamp('1402440452.05458_00000000'),
- utils.Timestamp('1402440452.05457999_00000000'),
- utils.Timestamp('1402440452.05458_00000000', offset=0),
- utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
- utils.Timestamp('1402440452.05458_00000000', offset=400),
- utils.Timestamp('1402440452.054579_0a', offset=400),
- )
- for value in test_values:
- self.assertEqual(utils.Timestamp(value).isoformat, expected)
- expected = '1970-01-01T00:00:00.000000'
- test_values = (
- '0',
- '0000000000.00000',
- '0000000000.00000_ffffffffffff',
- 0,
- 0.0,
- )
- for value in test_values:
- self.assertEqual(utils.Timestamp(value).isoformat, expected)
-
- def test_from_isoformat(self):
- ts = utils.Timestamp.from_isoformat('2014-06-10T22:47:32.054580')
- self.assertIsInstance(ts, utils.Timestamp)
- self.assertEqual(1402440452.05458, float(ts))
- self.assertEqual('2014-06-10T22:47:32.054580', ts.isoformat)
-
- ts = utils.Timestamp.from_isoformat('1970-01-01T00:00:00.000000')
- self.assertIsInstance(ts, utils.Timestamp)
- self.assertEqual(0.0, float(ts))
- self.assertEqual('1970-01-01T00:00:00.000000', ts.isoformat)
-
- ts = utils.Timestamp(1402440452.05458)
- self.assertIsInstance(ts, utils.Timestamp)
- self.assertEqual(ts, utils.Timestamp.from_isoformat(ts.isoformat))
-
- def test_ceil(self):
- self.assertEqual(0.0, utils.Timestamp(0).ceil())
- self.assertEqual(1.0, utils.Timestamp(0.00001).ceil())
- self.assertEqual(1.0, utils.Timestamp(0.000001).ceil())
- self.assertEqual(12345678.0, utils.Timestamp(12345678.0).ceil())
- self.assertEqual(12345679.0, utils.Timestamp(12345678.000001).ceil())
-
- def test_not_equal(self):
- ts = '1402436408.91203_0000000000000001'
- test_values = (
- utils.Timestamp('1402436408.91203_0000000000000002'),
- utils.Timestamp('1402436408.91203'),
- utils.Timestamp(1402436408.91203),
- utils.Timestamp(1402436408.91204),
- utils.Timestamp(1402436408.91203, offset=0),
- utils.Timestamp(1402436408.91203, offset=2),
- )
- for value in test_values:
- self.assertTrue(value != ts)
-
- self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
- self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
- self.assertIs(False, utils.Timestamp(ts) != ts)
- self.assertIs(False, utils.Timestamp(ts) is None)
- self.assertIs(True, utils.Timestamp(ts) is not None)
-
- def test_no_force_internal_no_offset(self):
- """Test that internal is the same as normal with no offset"""
- with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
- self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
- self.assertEqual(utils.Timestamp(1402437380.58186).internal,
- '1402437380.58186')
- self.assertEqual(utils.Timestamp(1402437380.581859).internal,
- '1402437380.58186')
- self.assertEqual(utils.Timestamp(0).internal,
- utils.normalize_timestamp(0))
-
- def test_no_force_internal_with_offset(self):
- """Test that internal always includes the offset if significant"""
- with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
- self.assertEqual(utils.Timestamp(0, offset=1).internal,
- '0000000000.00000_0000000000000001')
- self.assertEqual(
- utils.Timestamp(1402437380.58186, offset=16).internal,
- '1402437380.58186_0000000000000010')
- self.assertEqual(
- utils.Timestamp(1402437380.581859, offset=240).internal,
- '1402437380.58186_00000000000000f0')
- self.assertEqual(
- utils.Timestamp('1402437380.581859_00000001',
- offset=240).internal,
- '1402437380.58186_00000000000000f1')
-
- def test_force_internal(self):
- """Test that internal always includes the offset if forced"""
- with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
- self.assertEqual(utils.Timestamp(0).internal,
- '0000000000.00000_0000000000000000')
- self.assertEqual(utils.Timestamp(1402437380.58186).internal,
- '1402437380.58186_0000000000000000')
- self.assertEqual(utils.Timestamp(1402437380.581859).internal,
- '1402437380.58186_0000000000000000')
- self.assertEqual(utils.Timestamp(0, offset=1).internal,
- '0000000000.00000_0000000000000001')
- self.assertEqual(
- utils.Timestamp(1402437380.58186, offset=16).internal,
- '1402437380.58186_0000000000000010')
- self.assertEqual(
- utils.Timestamp(1402437380.581859, offset=16).internal,
- '1402437380.58186_0000000000000010')
-
- def test_internal_format_no_offset(self):
- expected = '1402436408.91203_0000000000000000'
- test_values = (
- '1402436408.91203',
- '1402436408.91203_00000000',
- '1402436408.912030000',
- '1402436408.912030000_0000000000000',
- '000001402436408.912030000',
- '000001402436408.912030000_0000000000',
- 1402436408.91203,
- 1402436408.9120300000000000,
- 1402436408.912029,
- 1402436408.912029999999999999,
- utils.Timestamp(1402436408.91203),
- utils.Timestamp(1402436408.91203, offset=0),
- utils.Timestamp(1402436408.912029),
- utils.Timestamp(1402436408.91202999999999999, offset=0),
- utils.Timestamp('1402436408.91203'),
- utils.Timestamp('1402436408.91203', offset=0),
- utils.Timestamp('1402436408.912029'),
- utils.Timestamp('1402436408.912029', offset=0),
- utils.Timestamp('1402436408.912029999999999'),
- utils.Timestamp('1402436408.912029999999999', offset=0),
- )
- for value in test_values:
- # timestamp instance is always equivalent
- self.assertEqual(utils.Timestamp(value), expected)
- if utils.FORCE_INTERNAL:
- # the FORCE_INTERNAL flag makes the internal format always
- # include the offset portion of the timestamp even when it's
- # not significant and would be bad during upgrades
- self.assertEqual(utils.Timestamp(value).internal, expected)
- else:
- # unless we FORCE_INTERNAL, when there's no offset the
- # internal format is equivalent to the normalized format
- self.assertEqual(utils.Timestamp(value).internal,
- '1402436408.91203')
-
- def test_internal_format_with_offset(self):
- expected = '1402436408.91203_00000000000000f0'
- test_values = (
- '1402436408.91203_000000f0',
- u'1402436408.91203_000000f0',
- b'1402436408.91203_000000f0',
- '1402436408.912030000_0000000000f0',
- '1402436408.912029_000000f0',
- '1402436408.91202999999_0000000000f0',
- '000001402436408.912030000_000000000f0',
- '000001402436408.9120299999_000000000f0',
- utils.Timestamp(1402436408.91203, offset=240),
- utils.Timestamp(1402436408.912029, offset=240),
- utils.Timestamp('1402436408.91203', offset=240),
- utils.Timestamp('1402436408.91203_00000000', offset=240),
- utils.Timestamp('1402436408.91203_0000000f', offset=225),
- utils.Timestamp('1402436408.9120299999', offset=240),
- utils.Timestamp('1402436408.9120299999_00000000', offset=240),
- utils.Timestamp('1402436408.9120299999_00000010', offset=224),
- )
- for value in test_values:
- timestamp = utils.Timestamp(value)
- self.assertEqual(timestamp.internal, expected)
- # can compare with offset if the string is internalized
- self.assertEqual(timestamp, expected)
- # if comparison value only includes the normalized portion and the
- # timestamp includes an offset, it is considered greater
- normal = utils.Timestamp(expected).normal
- self.assertTrue(timestamp > normal,
- '%r is not bigger than %r given %r' % (
- timestamp, normal, value))
- self.assertTrue(timestamp > float(normal),
- '%r is not bigger than %f given %r' % (
- timestamp, float(normal), value))
-
- def test_short_format_with_offset(self):
- expected = '1402436408.91203_f0'
- timestamp = utils.Timestamp(1402436408.91203, 0xf0)
- self.assertEqual(expected, timestamp.short)
-
- expected = '1402436408.91203'
- timestamp = utils.Timestamp(1402436408.91203)
- self.assertEqual(expected, timestamp.short)
-
- def test_raw(self):
- expected = 140243640891203
- timestamp = utils.Timestamp(1402436408.91203)
- self.assertEqual(expected, timestamp.raw)
-
- # 'raw' does not include offset
- timestamp = utils.Timestamp(1402436408.91203, 0xf0)
- self.assertEqual(expected, timestamp.raw)
-
- def test_delta(self):
- def _assertWithinBounds(expected, timestamp):
- tolerance = 0.00001
- minimum = expected - tolerance
- maximum = expected + tolerance
- self.assertTrue(float(timestamp) > minimum)
- self.assertTrue(float(timestamp) < maximum)
-
- timestamp = utils.Timestamp(1402436408.91203, delta=100)
- _assertWithinBounds(1402436408.91303, timestamp)
- self.assertEqual(140243640891303, timestamp.raw)
-
- timestamp = utils.Timestamp(1402436408.91203, delta=-100)
- _assertWithinBounds(1402436408.91103, timestamp)
- self.assertEqual(140243640891103, timestamp.raw)
-
- timestamp = utils.Timestamp(1402436408.91203, delta=0)
- _assertWithinBounds(1402436408.91203, timestamp)
- self.assertEqual(140243640891203, timestamp.raw)
-
- # delta is independent of offset
- timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
- self.assertEqual(140243640891303, timestamp.raw)
- self.assertEqual(42, timestamp.offset)
-
- # cannot go negative
- self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
- delta=-140243640891203)
-
- def test_int(self):
- expected = 1402437965
- test_values = (
- '1402437965.91203',
- '1402437965.91203_00000000',
- '1402437965.912030000',
- '1402437965.912030000_0000000000000',
- '000001402437965.912030000',
- '000001402437965.912030000_0000000000',
- 1402437965.91203,
- 1402437965.9120300000000000,
- 1402437965.912029,
- 1402437965.912029999999999999,
- utils.Timestamp(1402437965.91203),
- utils.Timestamp(1402437965.91203, offset=0),
- utils.Timestamp(1402437965.91203, offset=500),
- utils.Timestamp(1402437965.912029),
- utils.Timestamp(1402437965.91202999999999999, offset=0),
- utils.Timestamp(1402437965.91202999999999999, offset=300),
- utils.Timestamp('1402437965.91203'),
- utils.Timestamp('1402437965.91203', offset=0),
- utils.Timestamp('1402437965.91203', offset=400),
- utils.Timestamp('1402437965.912029'),
- utils.Timestamp('1402437965.912029', offset=0),
- utils.Timestamp('1402437965.912029', offset=200),
- utils.Timestamp('1402437965.912029999999999'),
- utils.Timestamp('1402437965.912029999999999', offset=0),
- utils.Timestamp('1402437965.912029999999999', offset=100),
- )
- for value in test_values:
- timestamp = utils.Timestamp(value)
- self.assertEqual(int(timestamp), expected)
- self.assertTrue(timestamp > expected)
-
- def test_float(self):
- expected = 1402438115.91203
- test_values = (
- '1402438115.91203',
- '1402438115.91203_00000000',
- '1402438115.912030000',
- '1402438115.912030000_0000000000000',
- '000001402438115.912030000',
- '000001402438115.912030000_0000000000',
- 1402438115.91203,
- 1402438115.9120300000000000,
- 1402438115.912029,
- 1402438115.912029999999999999,
- utils.Timestamp(1402438115.91203),
- utils.Timestamp(1402438115.91203, offset=0),
- utils.Timestamp(1402438115.91203, offset=500),
- utils.Timestamp(1402438115.912029),
- utils.Timestamp(1402438115.91202999999999999, offset=0),
- utils.Timestamp(1402438115.91202999999999999, offset=300),
- utils.Timestamp('1402438115.91203'),
- utils.Timestamp('1402438115.91203', offset=0),
- utils.Timestamp('1402438115.91203', offset=400),
- utils.Timestamp('1402438115.912029'),
- utils.Timestamp('1402438115.912029', offset=0),
- utils.Timestamp('1402438115.912029', offset=200),
- utils.Timestamp('1402438115.912029999999999'),
- utils.Timestamp('1402438115.912029999999999', offset=0),
- utils.Timestamp('1402438115.912029999999999', offset=100),
- )
- tolerance = 0.00001
- minimum = expected - tolerance
- maximum = expected + tolerance
- for value in test_values:
- timestamp = utils.Timestamp(value)
- self.assertTrue(float(timestamp) > minimum,
- '%f is not bigger than %f given %r' % (
- timestamp, minimum, value))
- self.assertTrue(float(timestamp) < maximum,
- '%f is not smaller than %f given %r' % (
- timestamp, maximum, value))
- # direct comparison of timestamp works too
- self.assertTrue(timestamp > minimum,
- '%s is not bigger than %f given %r' % (
- timestamp.normal, minimum, value))
- self.assertTrue(timestamp < maximum,
- '%s is not smaller than %f given %r' % (
- timestamp.normal, maximum, value))
- # ... even against strings
- self.assertTrue(timestamp > '%f' % minimum,
- '%s is not bigger than %s given %r' % (
- timestamp.normal, minimum, value))
- self.assertTrue(timestamp < '%f' % maximum,
- '%s is not smaller than %s given %r' % (
- timestamp.normal, maximum, value))
-
- def test_false(self):
- self.assertFalse(utils.Timestamp(0))
- self.assertFalse(utils.Timestamp(0, offset=0))
- self.assertFalse(utils.Timestamp('0'))
- self.assertFalse(utils.Timestamp('0', offset=0))
- self.assertFalse(utils.Timestamp(0.0))
- self.assertFalse(utils.Timestamp(0.0, offset=0))
- self.assertFalse(utils.Timestamp('0.0'))
- self.assertFalse(utils.Timestamp('0.0', offset=0))
- self.assertFalse(utils.Timestamp(00000000.00000000))
- self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
- self.assertFalse(utils.Timestamp('00000000.00000000'))
- self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
-
- def test_true(self):
- self.assertTrue(utils.Timestamp(1))
- self.assertTrue(utils.Timestamp(1, offset=1))
- self.assertTrue(utils.Timestamp(0, offset=1))
- self.assertTrue(utils.Timestamp('1'))
- self.assertTrue(utils.Timestamp('1', offset=1))
- self.assertTrue(utils.Timestamp('0', offset=1))
- self.assertTrue(utils.Timestamp(1.1))
- self.assertTrue(utils.Timestamp(1.1, offset=1))
- self.assertTrue(utils.Timestamp(0.0, offset=1))
- self.assertTrue(utils.Timestamp('1.1'))
- self.assertTrue(utils.Timestamp('1.1', offset=1))
- self.assertTrue(utils.Timestamp('0.0', offset=1))
- self.assertTrue(utils.Timestamp(11111111.11111111))
- self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
- self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
- self.assertTrue(utils.Timestamp('11111111.11111111'))
- self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
- self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
-
- def test_greater_no_offset(self):
- now = time.time()
- older = now - 1
- timestamp = utils.Timestamp(now)
- test_values = (
- 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
- 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
- 1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
- older, '%f' % older, '%f_0000ffff' % older,
- )
- for value in test_values:
- other = utils.Timestamp(value)
- self.assertNotEqual(timestamp, other) # sanity
- self.assertTrue(timestamp > value,
- '%r is not greater than %r given %r' % (
- timestamp, value, value))
- self.assertTrue(timestamp > other,
- '%r is not greater than %r given %r' % (
- timestamp, other, value))
- self.assertTrue(timestamp > other.normal,
- '%r is not greater than %r given %r' % (
- timestamp, other.normal, value))
- self.assertTrue(timestamp > other.internal,
- '%r is not greater than %r given %r' % (
- timestamp, other.internal, value))
- self.assertTrue(timestamp > float(other),
- '%r is not greater than %r given %r' % (
- timestamp, float(other), value))
- self.assertTrue(timestamp > int(other),
- '%r is not greater than %r given %r' % (
- timestamp, int(other), value))
-
- def _test_greater_with_offset(self, now, test_values):
- for offset in range(1, 1000, 100):
- timestamp = utils.Timestamp(now, offset=offset)
- for value in test_values:
- other = utils.Timestamp(value)
- self.assertNotEqual(timestamp, other) # sanity
- self.assertTrue(timestamp > value,
- '%r is not greater than %r given %r' % (
- timestamp, value, value))
- self.assertTrue(timestamp > other,
- '%r is not greater than %r given %r' % (
- timestamp, other, value))
- self.assertTrue(timestamp > other.normal,
- '%r is not greater than %r given %r' % (
- timestamp, other.normal, value))
- self.assertTrue(timestamp > other.internal,
- '%r is not greater than %r given %r' % (
- timestamp, other.internal, value))
- self.assertTrue(timestamp > float(other),
- '%r is not greater than %r given %r' % (
- timestamp, float(other), value))
- self.assertTrue(timestamp > int(other),
- '%r is not greater than %r given %r' % (
- timestamp, int(other), value))
-
- def test_greater_with_offset(self):
- # Part 1: use the natural time of the Python. This is deliciously
- # unpredictable, but completely legitimate and realistic. Finds bugs!
- now = time.time()
- older = now - 1
- test_values = (
- 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
- 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
- 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
- older, now,
- )
- self._test_greater_with_offset(now, test_values)
- # Part 2: Same as above, but with fixed time values that reproduce
- # specific corner cases.
- now = 1519830570.6949348
- older = now - 1
- test_values = (
- 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
- 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
- 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
- older, now,
- )
- self._test_greater_with_offset(now, test_values)
- # Part 3: The '%f' problem. Timestamps cannot be converted to %f
- # strings, then back to timestamps, then compared with originals.
- # You can only "import" a floating point representation once.
- now = 1519830570.6949348
- now = float('%f' % now)
- older = now - 1
- test_values = (
- 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
- 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
- older, '%f' % older, '%f_0000ffff' % older,
- now, '%f' % now, '%s_00000000' % now,
- )
- self._test_greater_with_offset(now, test_values)
-
- def test_smaller_no_offset(self):
- now = time.time()
- newer = now + 1
- timestamp = utils.Timestamp(now)
- test_values = (
- 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
- newer, '%f' % newer, '%f_0000ffff' % newer,
- )
- for value in test_values:
- other = utils.Timestamp(value)
- self.assertNotEqual(timestamp, other) # sanity
- self.assertTrue(timestamp < value,
- '%r is not smaller than %r given %r' % (
- timestamp, value, value))
- self.assertTrue(timestamp < other,
- '%r is not smaller than %r given %r' % (
- timestamp, other, value))
- self.assertTrue(timestamp < other.normal,
- '%r is not smaller than %r given %r' % (
- timestamp, other.normal, value))
- self.assertTrue(timestamp < other.internal,
- '%r is not smaller than %r given %r' % (
- timestamp, other.internal, value))
- self.assertTrue(timestamp < float(other),
- '%r is not smaller than %r given %r' % (
- timestamp, float(other), value))
- self.assertTrue(timestamp < int(other),
- '%r is not smaller than %r given %r' % (
- timestamp, int(other), value))
-
- def test_smaller_with_offset(self):
- now = time.time()
- newer = now + 1
- test_values = (
- 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
- newer, '%f' % newer, '%f_0000ffff' % newer,
- )
- for offset in range(1, 1000, 100):
- timestamp = utils.Timestamp(now, offset=offset)
- for value in test_values:
- other = utils.Timestamp(value)
- self.assertNotEqual(timestamp, other) # sanity
- self.assertTrue(timestamp < value,
- '%r is not smaller than %r given %r' % (
- timestamp, value, value))
- self.assertTrue(timestamp < other,
- '%r is not smaller than %r given %r' % (
- timestamp, other, value))
- self.assertTrue(timestamp < other.normal,
- '%r is not smaller than %r given %r' % (
- timestamp, other.normal, value))
- self.assertTrue(timestamp < other.internal,
- '%r is not smaller than %r given %r' % (
- timestamp, other.internal, value))
- self.assertTrue(timestamp < float(other),
- '%r is not smaller than %r given %r' % (
- timestamp, float(other), value))
- self.assertTrue(timestamp < int(other),
- '%r is not smaller than %r given %r' % (
- timestamp, int(other), value))
-
- def test_cmp_with_none(self):
- self.assertGreater(utils.Timestamp(0), None)
- self.assertGreater(utils.Timestamp(1.0), None)
- self.assertGreater(utils.Timestamp(1.0, 42), None)
-
- def test_ordering(self):
- given = [
- '1402444820.62590_000000000000000a',
- '1402444820.62589_0000000000000001',
- '1402444821.52589_0000000000000004',
- '1402444920.62589_0000000000000004',
- '1402444821.62589_000000000000000a',
- '1402444821.72589_000000000000000a',
- '1402444920.62589_0000000000000002',
- '1402444820.62589_0000000000000002',
- '1402444820.62589_000000000000000a',
- '1402444820.62590_0000000000000004',
- '1402444920.62589_000000000000000a',
- '1402444820.62590_0000000000000002',
- '1402444821.52589_0000000000000002',
- '1402444821.52589_0000000000000000',
- '1402444920.62589',
- '1402444821.62589_0000000000000004',
- '1402444821.72589_0000000000000001',
- '1402444820.62590',
- '1402444820.62590_0000000000000001',
- '1402444820.62589_0000000000000004',
- '1402444821.72589_0000000000000000',
- '1402444821.52589_000000000000000a',
- '1402444821.72589_0000000000000004',
- '1402444821.62589',
- '1402444821.52589_0000000000000001',
- '1402444821.62589_0000000000000001',
- '1402444821.62589_0000000000000002',
- '1402444821.72589_0000000000000002',
- '1402444820.62589',
- '1402444920.62589_0000000000000001']
- expected = [
- '1402444820.62589',
- '1402444820.62589_0000000000000001',
- '1402444820.62589_0000000000000002',
- '1402444820.62589_0000000000000004',
- '1402444820.62589_000000000000000a',
- '1402444820.62590',
- '1402444820.62590_0000000000000001',
- '1402444820.62590_0000000000000002',
- '1402444820.62590_0000000000000004',
- '1402444820.62590_000000000000000a',
- '1402444821.52589',
- '1402444821.52589_0000000000000001',
- '1402444821.52589_0000000000000002',
- '1402444821.52589_0000000000000004',
- '1402444821.52589_000000000000000a',
- '1402444821.62589',
- '1402444821.62589_0000000000000001',
- '1402444821.62589_0000000000000002',
- '1402444821.62589_0000000000000004',
- '1402444821.62589_000000000000000a',
- '1402444821.72589',
- '1402444821.72589_0000000000000001',
- '1402444821.72589_0000000000000002',
- '1402444821.72589_0000000000000004',
- '1402444821.72589_000000000000000a',
- '1402444920.62589',
- '1402444920.62589_0000000000000001',
- '1402444920.62589_0000000000000002',
- '1402444920.62589_0000000000000004',
- '1402444920.62589_000000000000000a',
- ]
- # less visual version
- """
- now = time.time()
- given = [
- utils.Timestamp(now + i, offset=offset).internal
- for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
- for offset in (0, 1, 2, 4, 10)
- ]
- expected = [t for t in given]
- random.shuffle(given)
- """
- self.assertEqual(len(given), len(expected)) # sanity
- timestamps = [utils.Timestamp(t) for t in given]
- # our expected values don't include insignificant offsets
- with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
- self.assertEqual(
- [t.internal for t in sorted(timestamps)], expected)
- # string sorting works as well
- self.assertEqual(
- sorted([t.internal for t in timestamps]), expected)
-
- def test_hashable(self):
- ts_0 = utils.Timestamp('1402444821.72589')
- ts_0_also = utils.Timestamp('1402444821.72589')
- self.assertEqual(ts_0, ts_0_also) # sanity
- self.assertEqual(hash(ts_0), hash(ts_0_also))
- d = {ts_0: 'whatever'}
- self.assertIn(ts_0, d) # sanity
- self.assertIn(ts_0_also, d)
-
- def test_out_of_range_comparisons(self):
- now = utils.Timestamp.now()
-
- def check_is_later(val):
- self.assertTrue(now != val)
- self.assertFalse(now == val)
- self.assertTrue(now <= val)
- self.assertTrue(now < val)
- self.assertTrue(val > now)
- self.assertTrue(val >= now)
-
- check_is_later(1e30)
- check_is_later(1579753284000) # someone gave us ms instead of s!
- check_is_later('1579753284000')
- check_is_later(b'1e15')
- check_is_later(u'1.e+10_f')
-
- def check_is_earlier(val):
- self.assertTrue(now != val)
- self.assertFalse(now == val)
- self.assertTrue(now >= val)
- self.assertTrue(now > val)
- self.assertTrue(val < now)
- self.assertTrue(val <= now)
-
- check_is_earlier(-1)
- check_is_earlier(-0.1)
- check_is_earlier('-9999999')
- check_is_earlier(b'-9999.999')
- check_is_earlier(u'-1234_5678')
-
- def test_inversion(self):
- ts = utils.Timestamp(0)
- self.assertIsInstance(~ts, utils.Timestamp)
- self.assertEqual((~ts).internal, '9999999999.99999')
-
- ts = utils.Timestamp(123456.789)
- self.assertIsInstance(~ts, utils.Timestamp)
- self.assertEqual(ts.internal, '0000123456.78900')
- self.assertEqual((~ts).internal, '9999876543.21099')
-
- timestamps = sorted(utils.Timestamp(random.random() * 1e10)
- for _ in range(20))
- self.assertEqual([x.internal for x in timestamps],
- sorted(x.internal for x in timestamps))
- self.assertEqual([(~x).internal for x in reversed(timestamps)],
- sorted((~x).internal for x in timestamps))
-
- ts = utils.Timestamp.now()
- self.assertGreater(~ts, ts) # NB: will break around 2128
-
- ts = utils.Timestamp.now(offset=1)
- with self.assertRaises(ValueError) as caught:
- ~ts
- self.assertEqual(caught.exception.args[0],
- 'Cannot invert timestamps with offsets')
-
-
-class TestTimestampEncoding(unittest.TestCase):
-
- def setUp(self):
- t0 = utils.Timestamp(0.0)
- t1 = utils.Timestamp(997.9996)
- t2 = utils.Timestamp(999)
- t3 = utils.Timestamp(1000, 24)
- t4 = utils.Timestamp(1001)
- t5 = utils.Timestamp(1002.00040)
-
- # encodings that are expected when explicit = False
- self.non_explicit_encodings = (
- ('0000001000.00000_18', (t3, t3, t3)),
- ('0000001000.00000_18', (t3, t3, None)),
- )
-
- # mappings that are expected when explicit = True
- self.explicit_encodings = (
- ('0000001000.00000_18+0+0', (t3, t3, t3)),
- ('0000001000.00000_18+0', (t3, t3, None)),
- )
-
- # mappings that are expected when explicit = True or False
- self.encodings = (
- ('0000001000.00000_18+0+186a0', (t3, t3, t4)),
- ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
- ('0000001000.00000_18-186a0+0', (t3, t2, t2)),
- ('0000001000.00000_18+0-186a0', (t3, t3, t2)),
- ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
- ('0000001000.00000_18', (t3, None, None)),
- ('0000001000.00000_18+186a0', (t3, t4, None)),
- ('0000001000.00000_18-186a0', (t3, t2, None)),
- ('0000001000.00000_18', (t3, None, t1)),
- ('0000001000.00000_18-5f5e100', (t3, t0, None)),
- ('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
- ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
- )
-
- # decodings that are expected when explicit = False
- self.non_explicit_decodings = (
- ('0000001000.00000_18', (t3, t3, t3)),
- ('0000001000.00000_18+186a0', (t3, t4, t4)),
- ('0000001000.00000_18-186a0', (t3, t2, t2)),
- ('0000001000.00000_18+186a0', (t3, t4, t4)),
- ('0000001000.00000_18-186a0', (t3, t2, t2)),
- ('0000001000.00000_18-5f5e100', (t3, t0, t0)),
- )
-
- # decodings that are expected when explicit = True
- self.explicit_decodings = (
- ('0000001000.00000_18+0+0', (t3, t3, t3)),
- ('0000001000.00000_18+0', (t3, t3, None)),
- ('0000001000.00000_18', (t3, None, None)),
- ('0000001000.00000_18+186a0', (t3, t4, None)),
- ('0000001000.00000_18-186a0', (t3, t2, None)),
- ('0000001000.00000_18-5f5e100', (t3, t0, None)),
- )
-
- # decodings that are expected when explicit = True or False
- self.decodings = (
- ('0000001000.00000_18+0+186a0', (t3, t3, t4)),
- ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
- ('0000001000.00000_18-186a0+0', (t3, t2, t2)),
- ('0000001000.00000_18+0-186a0', (t3, t3, t2)),
- ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
- ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
- )
-
- def _assertEqual(self, expected, actual, test):
- self.assertEqual(expected, actual,
- 'Got %s but expected %s for parameters %s'
- % (actual, expected, test))
-
- def test_encoding(self):
- for test in self.explicit_encodings:
- actual = utils.encode_timestamps(test[1][0], test[1][1],
- test[1][2], True)
- self._assertEqual(test[0], actual, test[1])
- for test in self.non_explicit_encodings:
- actual = utils.encode_timestamps(test[1][0], test[1][1],
- test[1][2], False)
- self._assertEqual(test[0], actual, test[1])
- for explicit in (True, False):
- for test in self.encodings:
- actual = utils.encode_timestamps(test[1][0], test[1][1],
- test[1][2], explicit)
- self._assertEqual(test[0], actual, test[1])
-
- def test_decoding(self):
- for test in self.explicit_decodings:
- actual = utils.decode_timestamps(test[0], True)
- self._assertEqual(test[1], actual, test[0])
- for test in self.non_explicit_decodings:
- actual = utils.decode_timestamps(test[0], False)
- self._assertEqual(test[1], actual, test[0])
- for explicit in (True, False):
- for test in self.decodings:
- actual = utils.decode_timestamps(test[0], explicit)
- self._assertEqual(test[1], actual, test[0])
-
-
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
@@ -1061,6 +210,61 @@ class TestUtils(unittest.TestCase):
self.md5_digest = '0d6dc3c588ae71a04ce9a6beebbbba06'
self.fips_enabled = True
+ def test_monkey_patch(self):
+ def take_and_release(lock):
+ try:
+ lock.acquire()
+ finally:
+ lock.release()
+
+ def do_test():
+ res = 0
+ try:
+ # this module imports eventlet original threading, so re-import
+ # locally...
+ import threading
+ import traceback
+ logging_lock_before = logging._lock
+ my_lock_before = threading.RLock()
+ self.assertIsInstance(logging_lock_before,
+ type(my_lock_before))
+
+ utils.monkey_patch()
+
+ logging_lock_after = logging._lock
+ my_lock_after = threading.RLock()
+ self.assertIsInstance(logging_lock_after,
+ type(my_lock_after))
+
+ self.assertTrue(logging_lock_after.acquire())
+ thread = threading.Thread(target=take_and_release,
+ args=(logging_lock_after,))
+ thread.start()
+ self.assertTrue(thread.isAlive())
+ # we should timeout while the thread is still blocking on lock
+ eventlet.sleep()
+ thread.join(timeout=0.1)
+ self.assertTrue(thread.isAlive())
+
+ logging._lock.release()
+ thread.join(timeout=0.1)
+ self.assertFalse(thread.isAlive())
+ except AssertionError:
+ traceback.print_exc()
+ res = 1
+ finally:
+ os._exit(res)
+
+ pid = os.fork()
+ if pid == 0:
+ # run the test in an isolated environment to avoid monkey patching
+ # in this one
+ do_test()
+ else:
+ child_pid, errcode = os.waitpid(pid, 0)
+ self.assertEqual(0, os.WEXITSTATUS(errcode),
+ 'Forked do_test failed')
+
def test_get_zero_indexed_base_string(self):
self.assertEqual(utils.get_zero_indexed_base_string('something', 0),
'something')
@@ -1995,11 +1199,15 @@ class TestUtils(unittest.TestCase):
# test eventlet.Timeout
with ConnectionTimeout(42, 'my error message') \
as connection_timeout:
- log_exception(connection_timeout)
+ now = time.time()
+ connection_timeout.created_at = now - 123.456
+ with mock.patch('swift.common.utils.time.time',
+ return_value=now):
+ log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertNotIn('Traceback', log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
- self.assertTrue('(42s)' in log_msg)
+ self.assertTrue('(42s after 123.46s)' in log_msg)
self.assertNotIn('my error message', log_msg)
with MessageTimeout(42, 'my error message') as message_timeout:
@@ -2204,6 +1412,36 @@ class TestUtils(unittest.TestCase):
finally:
base_logger.logger.removeHandler(handler)
+ @reset_logger_state
+ def test_nested_prefixlogger(self):
+ # setup stream logging
+ sio = StringIO()
+ base_logger = utils.get_logger(None)
+ handler = logging.StreamHandler(sio)
+ base_logger.logger.addHandler(handler)
+ inner_logger = utils.PrefixLoggerAdapter(base_logger, {})
+ inner_logger.set_prefix('one: ')
+ outer_logger = utils.PrefixLoggerAdapter(inner_logger, {})
+ outer_logger.set_prefix('two: ')
+
+ def strip_value(sio):
+ sio.seek(0)
+ v = sio.getvalue()
+ sio.truncate(0)
+ return v
+
+ try:
+ # establish base case
+ self.assertEqual(strip_value(sio), '')
+ inner_logger.info('test')
+ self.assertEqual(strip_value(sio), 'one: test\n')
+
+ outer_logger.info('test')
+ self.assertEqual(strip_value(sio), 'one: two: test\n')
+ self.assertEqual(strip_value(sio), '')
+ finally:
+ base_logger.logger.removeHandler(handler)
+
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
@@ -3498,44 +2736,6 @@ cluster_dfw1 = http://dfw1.host/v1/
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertIsNone(ts)
- def test_config_fallocate_value(self):
- fallocate_value, is_percent = utils.config_fallocate_value('10%')
- self.assertEqual(fallocate_value, 10)
- self.assertTrue(is_percent)
- fallocate_value, is_percent = utils.config_fallocate_value('10')
- self.assertEqual(fallocate_value, 10)
- self.assertFalse(is_percent)
- try:
- fallocate_value, is_percent = utils.config_fallocate_value('ab%')
- except ValueError as err:
- exc = err
- self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
- 'fallocate_reserve.')
- try:
- fallocate_value, is_percent = utils.config_fallocate_value('ab')
- except ValueError as err:
- exc = err
- self.assertEqual(str(exc), 'Error: ab is an invalid value for '
- 'fallocate_reserve.')
- try:
- fallocate_value, is_percent = utils.config_fallocate_value('1%%')
- except ValueError as err:
- exc = err
- self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
- 'fallocate_reserve.')
- try:
- fallocate_value, is_percent = utils.config_fallocate_value('10.0')
- except ValueError as err:
- exc = err
- self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
- 'fallocate_reserve.')
- fallocate_value, is_percent = utils.config_fallocate_value('10.5%')
- self.assertEqual(fallocate_value, 10.5)
- self.assertTrue(is_percent)
- fallocate_value, is_percent = utils.config_fallocate_value('10.000%')
- self.assertEqual(fallocate_value, 10.000)
- self.assertTrue(is_percent)
-
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
@@ -4262,7 +3462,7 @@ cluster_dfw1 = http://dfw1.host/v1/
if tempdir:
shutil.rmtree(tempdir)
- def test_find_shard_range(self):
+ def test_find_namespace(self):
ts = utils.Timestamp.now().internal
start = utils.ShardRange('a/-a', ts, '', 'a')
atof = utils.ShardRange('a/a-f', ts, 'a', 'f')
@@ -4272,29 +3472,29 @@ cluster_dfw1 = http://dfw1.host/v1/
end = utils.ShardRange('a/z-', ts, 'z', '')
ranges = [start, atof, ftol, ltor, rtoz, end]
- found = utils.find_shard_range('', ranges)
+ found = utils.find_namespace('', ranges)
self.assertEqual(found, None)
- found = utils.find_shard_range(' ', ranges)
+ found = utils.find_namespace(' ', ranges)
self.assertEqual(found, start)
- found = utils.find_shard_range(' ', ranges[1:])
+ found = utils.find_namespace(' ', ranges[1:])
self.assertEqual(found, None)
- found = utils.find_shard_range('b', ranges)
+ found = utils.find_namespace('b', ranges)
self.assertEqual(found, atof)
- found = utils.find_shard_range('f', ranges)
+ found = utils.find_namespace('f', ranges)
self.assertEqual(found, atof)
- found = utils.find_shard_range('f\x00', ranges)
+ found = utils.find_namespace('f\x00', ranges)
self.assertEqual(found, ftol)
- found = utils.find_shard_range('x', ranges)
+ found = utils.find_namespace('x', ranges)
self.assertEqual(found, rtoz)
- found = utils.find_shard_range('r', ranges)
+ found = utils.find_namespace('r', ranges)
self.assertEqual(found, ltor)
- found = utils.find_shard_range('}', ranges)
+ found = utils.find_namespace('}', ranges)
self.assertEqual(found, end)
- found = utils.find_shard_range('}', ranges[:-1])
+ found = utils.find_namespace('}', ranges[:-1])
self.assertEqual(found, None)
# remove l-r from list of ranges and try and find a shard range for an
# item in that range.
- found = utils.find_shard_range('p', ranges[:-3] + ranges[-2:])
+ found = utils.find_namespace('p', ranges[:-3] + ranges[-2:])
self.assertEqual(found, None)
# add some sub-shards; a sub-shard's state is less than its parent
@@ -4304,20 +3504,20 @@ cluster_dfw1 = http://dfw1.host/v1/
htok = utils.ShardRange('a/h-k', ts, 'h', 'k')
overlapping_ranges = ranges[:2] + [ftoh, htok] + ranges[2:]
- found = utils.find_shard_range('g', overlapping_ranges)
+ found = utils.find_namespace('g', overlapping_ranges)
self.assertEqual(found, ftoh)
- found = utils.find_shard_range('h', overlapping_ranges)
+ found = utils.find_namespace('h', overlapping_ranges)
self.assertEqual(found, ftoh)
- found = utils.find_shard_range('k', overlapping_ranges)
+ found = utils.find_namespace('k', overlapping_ranges)
self.assertEqual(found, htok)
- found = utils.find_shard_range('l', overlapping_ranges)
+ found = utils.find_namespace('l', overlapping_ranges)
self.assertEqual(found, ftol)
- found = utils.find_shard_range('m', overlapping_ranges)
+ found = utils.find_namespace('m', overlapping_ranges)
self.assertEqual(found, ltor)
ktol = utils.ShardRange('a/k-l', ts, 'k', 'l')
overlapping_ranges = ranges[:2] + [ftoh, htok, ktol] + ranges[2:]
- found = utils.find_shard_range('l', overlapping_ranges)
+ found = utils.find_namespace('l', overlapping_ranges)
self.assertEqual(found, ktol)
def test_parse_db_filename(self):
@@ -4363,110 +3563,6 @@ cluster_dfw1 = http://dfw1.host/v1/
self.assertRaises(ValueError, utils.make_db_file_path,
'/path/to/hash.db', 'bad epoch')
- def test_modify_priority(self):
- pid = os.getpid()
- logger = debug_logger()
- called = {}
-
- def _fake_setpriority(*args):
- called['setpriority'] = args
-
- def _fake_syscall(*args):
- called['syscall'] = args
-
- # Test if current architecture supports changing of priority
- try:
- utils.NR_ioprio_set()
- except OSError as e:
- raise unittest.SkipTest(e)
-
- with patch('swift.common.utils._libc_setpriority',
- _fake_setpriority), \
- patch('swift.common.utils._posix_syscall', _fake_syscall):
- called = {}
- # not set / default
- utils.modify_priority({}, logger)
- self.assertEqual(called, {})
- called = {}
- # just nice
- utils.modify_priority({'nice_priority': '1'}, logger)
- self.assertEqual(called, {'setpriority': (0, pid, 1)})
- called = {}
- # just ionice class uses default priority 0
- utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger)
- architecture = os.uname()[4]
- arch_bits = platform.architecture()[0]
- if architecture == 'x86_64' and arch_bits == '64bit':
- self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)})
- elif architecture == 'aarch64' and arch_bits == '64bit':
- self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)})
- else:
- self.fail("Unexpected call: %r" % called)
- called = {}
- # just ionice priority is ignored
- utils.modify_priority({'ionice_priority': '4'}, logger)
- self.assertEqual(called, {})
- called = {}
- # bad ionice class
- utils.modify_priority({'ionice_class': 'class_foo'}, logger)
- self.assertEqual(called, {})
- called = {}
- # ionice class & priority
- utils.modify_priority({
- 'ionice_class': 'IOPRIO_CLASS_BE',
- 'ionice_priority': '4',
- }, logger)
- if architecture == 'x86_64' and arch_bits == '64bit':
- self.assertEqual(called, {
- 'syscall': (251, 1, pid, 2 << 13 | 4)
- })
- elif architecture == 'aarch64' and arch_bits == '64bit':
- self.assertEqual(called, {
- 'syscall': (30, 1, pid, 2 << 13 | 4)
- })
- else:
- self.fail("Unexpected call: %r" % called)
- called = {}
- # all
- utils.modify_priority({
- 'nice_priority': '-15',
- 'ionice_class': 'IOPRIO_CLASS_IDLE',
- 'ionice_priority': '6',
- }, logger)
- if architecture == 'x86_64' and arch_bits == '64bit':
- self.assertEqual(called, {
- 'setpriority': (0, pid, -15),
- 'syscall': (251, 1, pid, 3 << 13 | 6),
- })
- elif architecture == 'aarch64' and arch_bits == '64bit':
- self.assertEqual(called, {
- 'setpriority': (0, pid, -15),
- 'syscall': (30, 1, pid, 3 << 13 | 6),
- })
- else:
- self.fail("Unexpected call: %r" % called)
-
- def test__NR_ioprio_set(self):
- with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
- patch('platform.architecture', return_value=('64bit', '')):
- self.assertEqual(251, utils.NR_ioprio_set())
-
- with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
- patch('platform.architecture', return_value=('32bit', '')):
- self.assertRaises(OSError, utils.NR_ioprio_set)
-
- with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
- patch('platform.architecture', return_value=('64bit', '')):
- self.assertEqual(30, utils.NR_ioprio_set())
-
- with patch('os.uname', return_value=('', '', '', '', 'aarch64')), \
- patch('platform.architecture', return_value=('32bit', '')):
- self.assertRaises(OSError, utils.NR_ioprio_set)
-
- with patch('os.uname', return_value=('', '', '', '', 'alpha')), \
- patch('platform.architecture', return_value=('64bit', '')):
- self.assertRaises(OSError, utils.NR_ioprio_set)
-
@requires_o_tmpfile_support_in_tmp
def test_link_fd_to_path_linkat_success(self):
tempdir = mkdtemp()
@@ -6666,6 +5762,24 @@ class TestMetricsPrefixLoggerAdapter(unittest.TestCase):
mock.call('test3'), mock.call('test')],
mock_increment.call_args_list)
+ def test_wrapped_prefixing(self):
+ logger = utils.get_logger({}, 'logger_name')
+ adapter1 = utils.MetricsPrefixLoggerAdapter(logger, {}, 'one')
+ adapter2 = utils.MetricsPrefixLoggerAdapter(adapter1, {}, 'two')
+ self.assertEqual('logger_name', logger.name)
+ self.assertEqual('logger_name', adapter1.logger.name)
+ self.assertEqual('logger_name', adapter2.logger.name)
+
+ with mock.patch.object(logger, 'increment') as mock_increment:
+ adapter1.increment('test1')
+ adapter2.increment('test2')
+ logger.increment('test')
+ self.assertEqual(
+ [mock.call('one.test1'),
+ mock.call('one.two.test2'),
+ mock.call('test')],
+ mock_increment.call_args_list)
+
class TestAuditLocationGenerator(unittest.TestCase):
@@ -8914,7 +8028,7 @@ class TestShardRange(unittest.TestCase):
with self.assertRaises(KeyError):
utils.ShardRange.from_dict(bad_dict)
# But __init__ still (generally) works!
- if key not in ('name', 'timestamp'):
+ if key != 'name':
utils.ShardRange(**bad_dict)
else:
with self.assertRaises(TypeError):
@@ -9692,435 +8806,22 @@ class TestShardRangeList(unittest.TestCase):
do_test([utils.ShardRange.ACTIVE]))
-@patch('ctypes.get_errno')
-@patch.object(utils, '_sys_posix_fallocate')
-@patch.object(utils, '_sys_fallocate')
-@patch.object(utils, 'FALLOCATE_RESERVE', 0)
-class TestFallocate(unittest.TestCase):
- def test_fallocate(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
-
- utils.fallocate(1234, 5000 * 2 ** 20)
-
- # We can't use sys_fallocate_mock.assert_called_once_with because no
- # two ctypes.c_uint64 objects are equal even if their values are
- # equal. Yes, ctypes.c_uint64(123) != ctypes.c_uint64(123).
- calls = sys_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 4)
- self.assertEqual(args[0], 1234)
- self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
- self.assertEqual(args[2].value, 0)
- self.assertEqual(args[3].value, 5000 * 2 ** 20)
-
- sys_posix_fallocate_mock.assert_not_called()
-
- def test_fallocate_offset(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
-
- utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
- calls = sys_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 4)
- self.assertEqual(args[0], 1234)
- self.assertEqual(args[1], utils.FALLOC_FL_KEEP_SIZE)
- self.assertEqual(args[2].value, 3 * 2 ** 30)
- self.assertEqual(args[3].value, 5000 * 2 ** 20)
-
- sys_posix_fallocate_mock.assert_not_called()
-
- def test_fallocate_fatal_error(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = -1
- get_errno_mock.return_value = errno.EIO
-
- with self.assertRaises(OSError) as cm:
- utils.fallocate(1234, 5000 * 2 ** 20)
- self.assertEqual(cm.exception.errno, errno.EIO)
-
- def test_fallocate_silent_errors(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = -1
-
- for silent_error in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL):
- get_errno_mock.return_value = silent_error
- try:
- utils.fallocate(1234, 5678)
- except OSError:
- self.fail("fallocate() raised an error on %d", silent_error)
-
- def test_posix_fallocate_fallback(self, sys_fallocate_mock,
- sys_posix_fallocate_mock,
- get_errno_mock):
- sys_fallocate_mock.available = False
- sys_fallocate_mock.side_effect = NotImplementedError
-
- sys_posix_fallocate_mock.available = True
- sys_posix_fallocate_mock.return_value = 0
-
- utils.fallocate(1234, 567890)
- sys_fallocate_mock.assert_not_called()
-
- calls = sys_posix_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 3)
- self.assertEqual(args[0], 1234)
- self.assertEqual(args[1].value, 0)
- self.assertEqual(args[2].value, 567890)
-
- def test_posix_fallocate_offset(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = False
- sys_fallocate_mock.side_effect = NotImplementedError
-
- sys_posix_fallocate_mock.available = True
- sys_posix_fallocate_mock.return_value = 0
-
- utils.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
- calls = sys_posix_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 3)
- self.assertEqual(args[0], 1234)
- self.assertEqual(args[1].value, 3 * 2 ** 30)
- self.assertEqual(args[2].value, 5000 * 2 ** 20)
-
- sys_fallocate_mock.assert_not_called()
-
- def test_no_fallocates_available(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = False
- sys_posix_fallocate_mock.available = False
-
- with mock.patch("logging.warning") as warning_mock, \
- mock.patch.object(utils, "_fallocate_warned_about_missing",
- False):
- utils.fallocate(321, 654)
- utils.fallocate(321, 654)
-
- sys_fallocate_mock.assert_not_called()
- sys_posix_fallocate_mock.assert_not_called()
- get_errno_mock.assert_not_called()
-
- self.assertEqual(len(warning_mock.mock_calls), 1)
-
- def test_arg_bounds(self, sys_fallocate_mock,
- sys_posix_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
- with self.assertRaises(ValueError):
- utils.fallocate(0, 1 << 64, 0)
- with self.assertRaises(ValueError):
- utils.fallocate(0, 0, -1)
- with self.assertRaises(ValueError):
- utils.fallocate(0, 0, 1 << 64)
- self.assertEqual([], sys_fallocate_mock.mock_calls)
- # sanity check
- utils.fallocate(0, 0, 0)
- self.assertEqual(
- [mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
- sys_fallocate_mock.mock_calls)
- # Go confirm the ctypes values separately; apparently == doesn't
- # work the way you'd expect with ctypes :-/
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
- sys_fallocate_mock.reset_mock()
-
- # negative size will be adjusted as 0
- utils.fallocate(0, -1, 0)
- self.assertEqual(
- [mock.call(0, utils.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
- sys_fallocate_mock.mock_calls)
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
-
-
-@patch.object(os, 'fstatvfs')
-@patch.object(utils, '_sys_fallocate', available=True, return_value=0)
-@patch.object(utils, 'FALLOCATE_RESERVE', 0)
-@patch.object(utils, 'FALLOCATE_IS_PERCENT', False)
-@patch.object(utils, '_fallocate_enabled', True)
-class TestFallocateReserve(unittest.TestCase):
- def _statvfs_result(self, f_frsize, f_bavail):
- # Only 3 values are relevant to us, so use zeros for the rest
- f_blocks = 100
- return posix.statvfs_result((0, f_frsize, f_blocks, 0, f_bavail,
- 0, 0, 0, 0, 0))
-
- def test_disabled(self, sys_fallocate_mock, fstatvfs_mock):
- utils.disable_fallocate()
- utils.fallocate(123, 456)
-
- sys_fallocate_mock.assert_not_called()
- fstatvfs_mock.assert_not_called()
-
- def test_zero_reserve(self, sys_fallocate_mock, fstatvfs_mock):
- utils.fallocate(123, 456)
-
- fstatvfs_mock.assert_not_called()
- self.assertEqual(len(sys_fallocate_mock.mock_calls), 1)
-
- def test_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1023 allocated, and have 2 blocks
- # of size 1024 free, so succeed
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
- utils.fallocate(88, 1023)
-
- def test_not_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1024 allocated, and have 2 blocks
- # of size 1024 free, so fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 1024)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
- def test_not_enough_space_large(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1GB allocated, and have 2 blocks
- # of size 1024 free, so fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 1 << 30)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail %g <= 1024'
- % (errno.ENOSPC, ((2 * 1024) - (1 << 30))))
- sys_fallocate_mock.assert_not_called()
-
- def test_enough_space_small_blocks(self, sys_fallocate_mock,
- fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1023 allocated, and have 4 blocks
- # of size 512 free, so succeed
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(512, 4)
- utils.fallocate(88, 1023)
-
- def test_not_enough_space_small_blocks(self, sys_fallocate_mock,
- fstatvfs_mock):
- # Want 1024 bytes in reserve plus 1024 allocated, and have 4 blocks
- # of size 512 free, so fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1024')
-
- fstatvfs_mock.return_value = self._statvfs_result(512, 4)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 1024)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
- def test_free_space_under_reserve(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 2048 bytes in reserve but have only 3 blocks of size 512, so
- # allocating even 0 bytes fails
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('2048')
-
- fstatvfs_mock.return_value = self._statvfs_result(512, 3)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 0)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 1536 <= 2048'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
- def test_all_reserved(self, sys_fallocate_mock, fstatvfs_mock):
- # Filesystem is empty, but our reserve is bigger than the
- # filesystem, so any allocation will fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('9999999999999')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
- self.assertRaises(OSError, utils.fallocate, 88, 0)
- sys_fallocate_mock.assert_not_called()
-
- def test_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
- # and file size is 2047, so succeed
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1%')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
- utils.fallocate(88, 2047)
-
- def test_not_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
- # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
- # and file size is 2048, so fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('1%')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 2048)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 1 <= 1'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
- def test_all_space_reserved_pct(self, sys_fallocate_mock, fstatvfs_mock):
- # Filesystem is empty, but our reserve is the whole filesystem, so
- # any allocation will fail
- utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
- utils.config_fallocate_value('100%')
-
- fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
- with self.assertRaises(OSError) as catcher:
- utils.fallocate(88, 0)
- self.assertEqual(
- str(catcher.exception),
- '[Errno %d] FALLOCATE_RESERVE fail 100 <= 100'
- % errno.ENOSPC)
- sys_fallocate_mock.assert_not_called()
-
-
-@patch('ctypes.get_errno')
-@patch.object(utils, '_sys_fallocate')
-class TestPunchHole(unittest.TestCase):
- def test_punch_hole(self, sys_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
-
- utils.punch_hole(123, 456, 789)
-
- calls = sys_fallocate_mock.mock_calls
- self.assertEqual(len(calls), 1)
- args = calls[0][1]
- self.assertEqual(len(args), 4)
- self.assertEqual(args[0], 123)
- self.assertEqual(
- args[1], utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE)
- self.assertEqual(args[2].value, 456)
- self.assertEqual(args[3].value, 789)
-
- def test_error(self, sys_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = -1
- get_errno_mock.return_value = errno.EISDIR
-
- with self.assertRaises(OSError) as cm:
- utils.punch_hole(123, 456, 789)
- self.assertEqual(cm.exception.errno, errno.EISDIR)
-
- def test_arg_bounds(self, sys_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = True
- sys_fallocate_mock.return_value = 0
-
- with self.assertRaises(ValueError):
- utils.punch_hole(0, 1, -1)
- with self.assertRaises(ValueError):
- utils.punch_hole(0, 1 << 64, 1)
- with self.assertRaises(ValueError):
- utils.punch_hole(0, -1, 1)
- with self.assertRaises(ValueError):
- utils.punch_hole(0, 1, 0)
- with self.assertRaises(ValueError):
- utils.punch_hole(0, 1, 1 << 64)
- self.assertEqual([], sys_fallocate_mock.mock_calls)
-
- # sanity check
- utils.punch_hole(0, 0, 1)
- self.assertEqual(
- [mock.call(
- 0, utils.FALLOC_FL_PUNCH_HOLE | utils.FALLOC_FL_KEEP_SIZE,
- mock.ANY, mock.ANY)],
- sys_fallocate_mock.mock_calls)
- # Go confirm the ctypes values separately; apparently == doesn't
- # work the way you'd expect with ctypes :-/
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
- self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 1)
-
- def test_no_fallocate(self, sys_fallocate_mock, get_errno_mock):
- sys_fallocate_mock.available = False
-
- with self.assertRaises(OSError) as cm:
- utils.punch_hole(123, 456, 789)
- self.assertEqual(cm.exception.errno, errno.ENOTSUP)
-
-
-class TestPunchHoleReally(unittest.TestCase):
- def setUp(self):
- if not utils._sys_fallocate.available:
- raise unittest.SkipTest("utils._sys_fallocate not available")
-
- def test_punch_a_hole(self):
- with TemporaryFile() as tf:
- tf.write(b"x" * 64 + b"y" * 64 + b"z" * 64)
- tf.flush()
-
- # knock out the first half of the "y"s
- utils.punch_hole(tf.fileno(), 64, 32)
-
- tf.seek(0)
- contents = tf.read(4096)
- self.assertEqual(
- contents,
- b"x" * 64 + b"\0" * 32 + b"y" * 32 + b"z" * 64)
-
-
-class Test_LibcWrapper(unittest.TestCase):
- def test_available_function(self):
- # This should pretty much always exist
- getpid_wrapper = utils._LibcWrapper('getpid')
- self.assertTrue(getpid_wrapper.available)
- self.assertEqual(getpid_wrapper(), os.getpid())
-
- def test_unavailable_function(self):
- # This won't exist
- no_func_wrapper = utils._LibcWrapper('diffractively_protectorship')
- self.assertFalse(no_func_wrapper.available)
- self.assertRaises(NotImplementedError, no_func_wrapper)
-
- def test_argument_plumbing(self):
- lseek_wrapper = utils._LibcWrapper('lseek')
- with TemporaryFile() as tf:
- tf.write(b"abcdefgh")
- tf.flush()
- lseek_wrapper(tf.fileno(),
- ctypes.c_uint64(3),
- # 0 is SEEK_SET
- 0)
- self.assertEqual(tf.read(100), b"defgh")
-
-
class TestWatchdog(unittest.TestCase):
def test_start_stop(self):
w = utils.Watchdog()
w._evt.send = mock.Mock(side_effect=w._evt.send)
gth = object()
+ now = time.time()
+ timeout_value = 1.0
with patch('eventlet.greenthread.getcurrent', return_value=gth),\
- patch('time.time', return_value=10.0):
+ patch('time.time', return_value=now):
# On first call, _next_expiration is None, it should unblock
# greenthread that is blocked for ever
- key = w.start(1.0, Timeout)
+ key = w.start(timeout_value, Timeout)
self.assertIn(key, w._timeouts)
- self.assertEqual(w._timeouts[key], (1.0, 11.0, gth, Timeout))
+ self.assertEqual(w._timeouts[key], (
+ timeout_value, now + timeout_value, gth, Timeout, now))
w._evt.send.assert_called_once()
w.stop(key)
diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py
index d43f6730b..d2f13b205 100644
--- a/test/unit/common/test_wsgi.py
+++ b/test/unit/common/test_wsgi.py
@@ -43,7 +43,7 @@ from swift.common.storage_policy import POLICIES
from test import listen_zero
from test.debug_logger import debug_logger
from test.unit import (
- temptree, with_tempdir, write_fake_ring, patch_policies)
+ temptree, with_tempdir, write_fake_ring, patch_policies, ConfigAssertMixin)
from paste.deploy import loadwsgi
@@ -60,7 +60,7 @@ def _fake_rings(tmpdir):
@patch_policies
-class TestWSGI(unittest.TestCase):
+class TestWSGI(unittest.TestCase, ConfigAssertMixin):
"""Tests for swift.common.wsgi"""
def test_init_request_processor(self):
@@ -133,14 +133,40 @@ class TestWSGI(unittest.TestCase):
def test_loadapp_from_file(self, tempdir):
conf_path = os.path.join(tempdir, 'object-server.conf')
conf_body = """
+ [DEFAULT]
+ CONN_timeout = 10
+ client_timeout = 1
[app:main]
use = egg:swift#object
+ conn_timeout = 5
+ client_timeout = 2
+ CLIENT_TIMEOUT = 3
"""
contents = dedent(conf_body)
with open(conf_path, 'w') as f:
f.write(contents)
app = wsgi.loadapp(conf_path)
self.assertIsInstance(app, obj_server.ObjectController)
+ self.assertTrue(isinstance(app, obj_server.ObjectController))
+ # N.B. paste config loading from *file* is already case-sensitive,
+ # so, CLIENT_TIMEOUT/client_timeout are unique options
+ self.assertEqual(1, app.client_timeout)
+ self.assertEqual(5, app.conn_timeout)
+
+ @with_tempdir
+ def test_loadapp_from_file_with_duplicate_var(self, tempdir):
+ conf_path = os.path.join(tempdir, 'object-server.conf')
+ conf_body = """
+ [app:main]
+ use = egg:swift#object
+ client_timeout = 2
+ client_timeout = 3
+ """
+ contents = dedent(conf_body)
+ with open(conf_path, 'w') as f:
+ f.write(contents)
+ app_config = lambda: wsgi.loadapp(conf_path)
+ self.assertDuplicateOption(app_config, 'client_timeout', 3.0)
@with_tempdir
def test_loadapp_from_file_with_global_conf(self, tempdir):
@@ -204,11 +230,89 @@ class TestWSGI(unittest.TestCase):
def test_loadapp_from_string(self):
conf_body = """
+ [DEFAULT]
+ CONN_timeout = 10
+ client_timeout = 1
[app:main]
use = egg:swift#object
+ conn_timeout = 5
+ client_timeout = 2
"""
app = wsgi.loadapp(wsgi.ConfigString(conf_body))
self.assertTrue(isinstance(app, obj_server.ObjectController))
+ self.assertEqual(1, app.client_timeout)
+ self.assertEqual(5, app.conn_timeout)
+
+ @with_tempdir
+ def test_loadapp_from_dir(self, tempdir):
+ conf_files = {
+ 'pipeline': """
+ [pipeline:main]
+ pipeline = tempauth proxy-server
+ """,
+ 'tempauth': """
+ [DEFAULT]
+ swift_dir = %s
+ random_VAR = foo
+ [filter:tempauth]
+ use = egg:swift#tempauth
+ random_var = bar
+ """ % tempdir,
+ 'proxy': """
+ [DEFAULT]
+ conn_timeout = 5
+ client_timeout = 1
+ [app:proxy-server]
+ use = egg:swift#proxy
+ CONN_timeout = 10
+ client_timeout = 2
+ """,
+ }
+ _fake_rings(tempdir)
+ for filename, conf_body in conf_files.items():
+ path = os.path.join(tempdir, filename + '.conf')
+ with open(path, 'wt') as fd:
+ fd.write(dedent(conf_body))
+ app = wsgi.loadapp(tempdir)
+ # DEFAULT takes priority (!?)
+ self.assertEqual(5, app._pipeline_final_app.conn_timeout)
+ self.assertEqual(1, app._pipeline_final_app.client_timeout)
+ self.assertEqual('foo', app.app.app.app.conf['random_VAR'])
+ self.assertEqual('bar', app.app.app.app.conf['random_var'])
+
+ @with_tempdir
+ def test_loadapp_from_dir_with_duplicate_var(self, tempdir):
+ conf_files = {
+ 'pipeline': """
+ [pipeline:main]
+ pipeline = tempauth proxy-server
+ """,
+ 'tempauth': """
+ [DEFAULT]
+ swift_dir = %s
+ random_VAR = foo
+ [filter:tempauth]
+ use = egg:swift#tempauth
+ random_var = bar
+ """ % tempdir,
+ 'proxy': """
+ [app:proxy-server]
+ use = egg:swift#proxy
+ client_timeout = 2
+ CLIENT_TIMEOUT = 1
+ conn_timeout = 3
+ conn_timeout = 4
+ """,
+ }
+ _fake_rings(tempdir)
+ for filename, conf_body in conf_files.items():
+ path = os.path.join(tempdir, filename + '.conf')
+ with open(path, 'wt') as fd:
+ fd.write(dedent(conf_body))
+ app_config = lambda: wsgi.loadapp(tempdir)
+ # N.B. our paste conf.d parsing re-uses readconf,
+ # so, CLIENT_TIMEOUT/client_timeout are unique options
+ self.assertDuplicateOption(app_config, 'conn_timeout', 4.0)
@with_tempdir
def test_load_app_config(self, tempdir):
@@ -496,6 +600,7 @@ class TestWSGI(unittest.TestCase):
config = """
[DEFAULT]
client_timeout = 30
+ keepalive_timeout = 10
max_clients = 1000
swift_dir = TEMPDIR
@@ -535,6 +640,7 @@ class TestWSGI(unittest.TestCase):
self.assertTrue('custom_pool' in kwargs)
self.assertEqual(1000, kwargs['custom_pool'].size)
self.assertEqual(30, kwargs['socket_timeout'])
+ self.assertEqual(10, kwargs['keepalive'])
proto_class = kwargs['protocol']
self.assertEqual(proto_class, wsgi.SwiftHttpProtocol)
@@ -585,6 +691,7 @@ class TestWSGI(unittest.TestCase):
self.assertTrue('custom_pool' in kwargs)
self.assertEqual(10, kwargs['custom_pool'].size)
self.assertEqual(2.5, kwargs['socket_timeout'])
+ self.assertNotIn('keepalive', kwargs) # eventlet defaults to True
proto_class = kwargs['protocol']
self.assertEqual(proto_class, wsgi.SwiftHttpProxiedProtocol)
@@ -594,6 +701,7 @@ class TestWSGI(unittest.TestCase):
config = """
[DEFAULT]
swift_dir = TEMPDIR
+ keepalive_timeout = 0
[pipeline:main]
pipeline = proxy-server
@@ -623,6 +731,7 @@ class TestWSGI(unittest.TestCase):
self.assertTrue('protocol' in kwargs)
self.assertEqual('HTTP/1.0',
kwargs['protocol'].default_request_version)
+ self.assertIs(False, kwargs['keepalive'])
def test_run_server_conf_dir(self):
config_dir = {
@@ -891,6 +1000,7 @@ class TestWSGI(unittest.TestCase):
def _loadapp(uri, name=None, **kwargs):
calls['_loadapp'] += 1
+ logging.logThreads = 1 # reset to default
with mock.patch.object(wsgi, '_initrp', _initrp), \
mock.patch.object(wsgi, 'get_socket'), \
mock.patch.object(wsgi, 'drop_privileges') as _d_privs, \
@@ -911,6 +1021,7 @@ class TestWSGI(unittest.TestCase):
# just clean_up_deemon_hygene()
self.assertEqual([], _d_privs.mock_calls)
self.assertEqual([mock.call()], _c_hyg.mock_calls)
+ self.assertEqual(0, logging.logThreads) # fixed in our monkey_patch
@mock.patch('swift.common.wsgi.run_server')
@mock.patch('swift.common.wsgi.WorkersStrategy')
@@ -1642,6 +1753,12 @@ class TestPipelineModification(unittest.TestCase):
self.assertIs(app.app.app, app._pipeline_final_app)
self.assertIs(app.app.app, app.app._pipeline_final_app)
self.assertIs(app.app.app, app.app.app._pipeline_final_app)
+ exp_pipeline = [app, app.app, app.app.app]
+ self.assertEqual(exp_pipeline, app._pipeline)
+ self.assertEqual(exp_pipeline, app.app._pipeline)
+ self.assertEqual(exp_pipeline, app.app.app._pipeline)
+ self.assertIs(app._pipeline, app.app._pipeline)
+ self.assertIs(app._pipeline, app.app.app._pipeline)
# make sure you can turn off the pipeline modification if you want
def blow_up(*_, **__):
diff --git a/test/unit/common/utils/__init__.py b/test/unit/common/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/test/unit/common/utils/__init__.py
diff --git a/test/unit/common/utils/test_libc.py b/test/unit/common/utils/test_libc.py
new file mode 100644
index 000000000..5357ce34d
--- /dev/null
+++ b/test/unit/common/utils/test_libc.py
@@ -0,0 +1,599 @@
+# Copyright (c) 2010-2023 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for swift.common.utils.libc"""
+
+import ctypes
+import errno
+import os
+import platform
+import posix
+import tempfile
+import unittest
+
+import mock
+
+from swift.common.utils import libc
+
+from test.debug_logger import debug_logger
+
+
+@mock.patch('ctypes.get_errno')
+@mock.patch.object(libc, '_sys_posix_fallocate')
+@mock.patch.object(libc, '_sys_fallocate')
+@mock.patch.object(libc, 'FALLOCATE_RESERVE', 0)
+class TestFallocate(unittest.TestCase):
+ def test_config_fallocate_value(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ fallocate_value, is_percent = libc.config_fallocate_value('10%')
+ self.assertEqual(fallocate_value, 10)
+ self.assertTrue(is_percent)
+ fallocate_value, is_percent = libc.config_fallocate_value('10')
+ self.assertEqual(fallocate_value, 10)
+ self.assertFalse(is_percent)
+ try:
+ fallocate_value, is_percent = libc.config_fallocate_value('ab%')
+ except ValueError as err:
+ exc = err
+ self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
+ 'fallocate_reserve.')
+ try:
+ fallocate_value, is_percent = libc.config_fallocate_value('ab')
+ except ValueError as err:
+ exc = err
+ self.assertEqual(str(exc), 'Error: ab is an invalid value for '
+ 'fallocate_reserve.')
+ try:
+ fallocate_value, is_percent = libc.config_fallocate_value('1%%')
+ except ValueError as err:
+ exc = err
+ self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
+ 'fallocate_reserve.')
+ try:
+ fallocate_value, is_percent = libc.config_fallocate_value('10.0')
+ except ValueError as err:
+ exc = err
+ self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
+ 'fallocate_reserve.')
+ fallocate_value, is_percent = libc.config_fallocate_value('10.5%')
+ self.assertEqual(fallocate_value, 10.5)
+ self.assertTrue(is_percent)
+ fallocate_value, is_percent = libc.config_fallocate_value('10.000%')
+ self.assertEqual(fallocate_value, 10.000)
+ self.assertTrue(is_percent)
+
+ def test_fallocate(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+
+ libc.fallocate(1234, 5000 * 2 ** 20)
+
+ # We can't use sys_fallocate_mock.assert_called_once_with because no
+ # two ctypes.c_uint64 objects are equal even if their values are
+ # equal. Yes, ctypes.c_uint64(123) != ctypes.c_uint64(123).
+ calls = sys_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 4)
+ self.assertEqual(args[0], 1234)
+ self.assertEqual(args[1], libc.FALLOC_FL_KEEP_SIZE)
+ self.assertEqual(args[2].value, 0)
+ self.assertEqual(args[3].value, 5000 * 2 ** 20)
+
+ sys_posix_fallocate_mock.assert_not_called()
+
+ def test_fallocate_offset(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+
+ libc.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
+ calls = sys_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 4)
+ self.assertEqual(args[0], 1234)
+ self.assertEqual(args[1], libc.FALLOC_FL_KEEP_SIZE)
+ self.assertEqual(args[2].value, 3 * 2 ** 30)
+ self.assertEqual(args[3].value, 5000 * 2 ** 20)
+
+ sys_posix_fallocate_mock.assert_not_called()
+
+ def test_fallocate_fatal_error(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = -1
+ get_errno_mock.return_value = errno.EIO
+
+ with self.assertRaises(OSError) as cm:
+ libc.fallocate(1234, 5000 * 2 ** 20)
+ self.assertEqual(cm.exception.errno, errno.EIO)
+
+ def test_fallocate_silent_errors(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = -1
+
+ for silent_error in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL):
+ get_errno_mock.return_value = silent_error
+ try:
+ libc.fallocate(1234, 5678)
+ except OSError:
+ self.fail("fallocate() raised an error on %d", silent_error)
+
+ def test_posix_fallocate_fallback(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock,
+ get_errno_mock):
+ sys_fallocate_mock.available = False
+ sys_fallocate_mock.side_effect = NotImplementedError
+
+ sys_posix_fallocate_mock.available = True
+ sys_posix_fallocate_mock.return_value = 0
+
+ libc.fallocate(1234, 567890)
+ sys_fallocate_mock.assert_not_called()
+
+ calls = sys_posix_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 3)
+ self.assertEqual(args[0], 1234)
+ self.assertEqual(args[1].value, 0)
+ self.assertEqual(args[2].value, 567890)
+
+ def test_posix_fallocate_offset(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = False
+ sys_fallocate_mock.side_effect = NotImplementedError
+
+ sys_posix_fallocate_mock.available = True
+ sys_posix_fallocate_mock.return_value = 0
+
+ libc.fallocate(1234, 5000 * 2 ** 20, offset=3 * 2 ** 30)
+ calls = sys_posix_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 3)
+ self.assertEqual(args[0], 1234)
+ self.assertEqual(args[1].value, 3 * 2 ** 30)
+ self.assertEqual(args[2].value, 5000 * 2 ** 20)
+
+ sys_fallocate_mock.assert_not_called()
+
+ def test_no_fallocates_available(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = False
+ sys_posix_fallocate_mock.available = False
+
+ with mock.patch("logging.warning") as warning_mock, \
+ mock.patch.object(libc, "_fallocate_warned_about_missing",
+ False):
+ libc.fallocate(321, 654)
+ libc.fallocate(321, 654)
+
+ sys_fallocate_mock.assert_not_called()
+ sys_posix_fallocate_mock.assert_not_called()
+ get_errno_mock.assert_not_called()
+
+ self.assertEqual(len(warning_mock.mock_calls), 1)
+
+ def test_arg_bounds(self, sys_fallocate_mock,
+ sys_posix_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+ with self.assertRaises(ValueError):
+ libc.fallocate(0, 1 << 64, 0)
+ with self.assertRaises(ValueError):
+ libc.fallocate(0, 0, -1)
+ with self.assertRaises(ValueError):
+ libc.fallocate(0, 0, 1 << 64)
+ self.assertEqual([], sys_fallocate_mock.mock_calls)
+ # sanity check
+ libc.fallocate(0, 0, 0)
+ self.assertEqual(
+ [mock.call(0, libc.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
+ sys_fallocate_mock.mock_calls)
+ # Go confirm the ctypes values separately; apparently == doesn't
+ # work the way you'd expect with ctypes :-/
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
+ sys_fallocate_mock.reset_mock()
+
+ # negative size will be adjusted as 0
+ libc.fallocate(0, -1, 0)
+ self.assertEqual(
+ [mock.call(0, libc.FALLOC_FL_KEEP_SIZE, mock.ANY, mock.ANY)],
+ sys_fallocate_mock.mock_calls)
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 0)
+
+
+@mock.patch.object(os, 'fstatvfs')
+@mock.patch.object(libc, '_sys_fallocate', available=True, return_value=0)
+@mock.patch.object(libc, 'FALLOCATE_RESERVE', 0)
+@mock.patch.object(libc, 'FALLOCATE_IS_PERCENT', False)
+@mock.patch.object(libc, '_fallocate_enabled', True)
+class TestFallocateReserve(unittest.TestCase):
+ def _statvfs_result(self, f_frsize, f_bavail):
+ # Only 3 values are relevant to us, so use zeros for the rest
+ f_blocks = 100
+ return posix.statvfs_result((0, f_frsize, f_blocks, 0, f_bavail,
+ 0, 0, 0, 0, 0))
+
+ def test_disabled(self, sys_fallocate_mock, fstatvfs_mock):
+ libc.disable_fallocate()
+ libc.fallocate(123, 456)
+
+ sys_fallocate_mock.assert_not_called()
+ fstatvfs_mock.assert_not_called()
+
+ def test_zero_reserve(self, sys_fallocate_mock, fstatvfs_mock):
+ libc.fallocate(123, 456)
+
+ fstatvfs_mock.assert_not_called()
+ self.assertEqual(len(sys_fallocate_mock.mock_calls), 1)
+
+ def test_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1023 allocated, and have 2 blocks
+ # of size 1024 free, so succeed
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
+ libc.fallocate(88, 1023)
+
+ def test_not_enough_space(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1024 allocated, and have 2 blocks
+ # of size 1024 free, so fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 1024)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_not_enough_space_large(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1GB allocated, and have 2 blocks
+ # of size 1024 free, so fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 2)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 1 << 30)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail %g <= 1024'
+ % (errno.ENOSPC, ((2 * 1024) - (1 << 30))))
+ sys_fallocate_mock.assert_not_called()
+
+ def test_enough_space_small_blocks(self, sys_fallocate_mock,
+ fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1023 allocated, and have 4 blocks
+ # of size 512 free, so succeed
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(512, 4)
+ libc.fallocate(88, 1023)
+
+ def test_not_enough_space_small_blocks(self, sys_fallocate_mock,
+ fstatvfs_mock):
+ # Want 1024 bytes in reserve plus 1024 allocated, and have 4 blocks
+ # of size 512 free, so fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1024')
+
+ fstatvfs_mock.return_value = self._statvfs_result(512, 4)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 1024)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_free_space_under_reserve(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 2048 bytes in reserve but have only 3 blocks of size 512, so
+ # allocating even 0 bytes fails
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('2048')
+
+ fstatvfs_mock.return_value = self._statvfs_result(512, 3)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 0)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 1536 <= 2048'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_all_reserved(self, sys_fallocate_mock, fstatvfs_mock):
+ # Filesystem is empty, but our reserve is bigger than the
+ # filesystem, so any allocation will fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('9999999999999')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
+ self.assertRaises(OSError, libc.fallocate, 88, 0)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
+ # and file size is 2047, so succeed
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1%')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
+ libc.fallocate(88, 2047)
+
+ def test_not_enough_space_pct(self, sys_fallocate_mock, fstatvfs_mock):
+ # Want 1% reserved, filesystem has 3/100 blocks of size 1024 free
+ # and file size is 2048, so fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('1%')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 3)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 2048)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 1 <= 1'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+ def test_all_space_reserved_pct(self, sys_fallocate_mock, fstatvfs_mock):
+ # Filesystem is empty, but our reserve is the whole filesystem, so
+ # any allocation will fail
+ libc.FALLOCATE_RESERVE, libc.FALLOCATE_IS_PERCENT = \
+ libc.config_fallocate_value('100%')
+
+ fstatvfs_mock.return_value = self._statvfs_result(1024, 100)
+ with self.assertRaises(OSError) as catcher:
+ libc.fallocate(88, 0)
+ self.assertEqual(
+ str(catcher.exception),
+ '[Errno %d] FALLOCATE_RESERVE fail 100 <= 100'
+ % errno.ENOSPC)
+ sys_fallocate_mock.assert_not_called()
+
+
+@mock.patch('ctypes.get_errno')
+@mock.patch.object(libc, '_sys_fallocate')
+class TestPunchHole(unittest.TestCase):
+ def test_punch_hole(self, sys_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+
+ libc.punch_hole(123, 456, 789)
+
+ calls = sys_fallocate_mock.mock_calls
+ self.assertEqual(len(calls), 1)
+ args = calls[0][1]
+ self.assertEqual(len(args), 4)
+ self.assertEqual(args[0], 123)
+ self.assertEqual(
+ args[1], libc.FALLOC_FL_PUNCH_HOLE | libc.FALLOC_FL_KEEP_SIZE)
+ self.assertEqual(args[2].value, 456)
+ self.assertEqual(args[3].value, 789)
+
+ def test_error(self, sys_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = -1
+ get_errno_mock.return_value = errno.EISDIR
+
+ with self.assertRaises(OSError) as cm:
+ libc.punch_hole(123, 456, 789)
+ self.assertEqual(cm.exception.errno, errno.EISDIR)
+
+ def test_arg_bounds(self, sys_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = True
+ sys_fallocate_mock.return_value = 0
+
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, 1, -1)
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, 1 << 64, 1)
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, -1, 1)
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, 1, 0)
+ with self.assertRaises(ValueError):
+ libc.punch_hole(0, 1, 1 << 64)
+ self.assertEqual([], sys_fallocate_mock.mock_calls)
+
+ # sanity check
+ libc.punch_hole(0, 0, 1)
+ self.assertEqual(
+ [mock.call(
+ 0, libc.FALLOC_FL_PUNCH_HOLE | libc.FALLOC_FL_KEEP_SIZE,
+ mock.ANY, mock.ANY)],
+ sys_fallocate_mock.mock_calls)
+ # Go confirm the ctypes values separately; apparently == doesn't
+ # work the way you'd expect with ctypes :-/
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][2].value, 0)
+ self.assertEqual(sys_fallocate_mock.mock_calls[0][1][3].value, 1)
+
+ def test_no_fallocate(self, sys_fallocate_mock, get_errno_mock):
+ sys_fallocate_mock.available = False
+
+ with self.assertRaises(OSError) as cm:
+ libc.punch_hole(123, 456, 789)
+ self.assertEqual(cm.exception.errno, errno.ENOTSUP)
+
+
+class TestPunchHoleReally(unittest.TestCase):
+ def setUp(self):
+ if not libc._sys_fallocate.available:
+ raise unittest.SkipTest("libc._sys_fallocate not available")
+
+ def test_punch_a_hole(self):
+ with tempfile.TemporaryFile() as tf:
+ tf.write(b"x" * 64 + b"y" * 64 + b"z" * 64)
+ tf.flush()
+
+ # knock out the first half of the "y"s
+ libc.punch_hole(tf.fileno(), 64, 32)
+
+ tf.seek(0)
+ contents = tf.read(4096)
+ self.assertEqual(
+ contents,
+ b"x" * 64 + b"\0" * 32 + b"y" * 32 + b"z" * 64)
+
+
+class Test_LibcWrapper(unittest.TestCase):
+ def test_available_function(self):
+ # This should pretty much always exist
+ getpid_wrapper = libc._LibcWrapper('getpid')
+ self.assertTrue(getpid_wrapper.available)
+ self.assertEqual(getpid_wrapper(), os.getpid())
+
+ def test_unavailable_function(self):
+ # This won't exist
+ no_func_wrapper = libc._LibcWrapper('diffractively_protectorship')
+ self.assertFalse(no_func_wrapper.available)
+ self.assertRaises(NotImplementedError, no_func_wrapper)
+
+ def test_argument_plumbing(self):
+ lseek_wrapper = libc._LibcWrapper('lseek')
+ with tempfile.TemporaryFile() as tf:
+ tf.write(b"abcdefgh")
+ tf.flush()
+ lseek_wrapper(tf.fileno(),
+ ctypes.c_uint64(3),
+ # 0 is SEEK_SET
+ 0)
+ self.assertEqual(tf.read(100), b"defgh")
+
+
+class TestModifyPriority(unittest.TestCase):
+ def test_modify_priority(self):
+ pid = os.getpid()
+ logger = debug_logger()
+ called = {}
+
+ def _fake_setpriority(*args):
+ called['setpriority'] = args
+
+ def _fake_syscall(*args):
+ called['syscall'] = args
+
+ # Test if current architecture supports changing of priority
+ try:
+ libc.NR_ioprio_set()
+ except OSError as e:
+ raise unittest.SkipTest(e)
+
+ with mock.patch('swift.common.utils.libc._libc_setpriority',
+ _fake_setpriority), \
+ mock.patch('swift.common.utils.libc._posix_syscall',
+ _fake_syscall):
+ called = {}
+ # not set / default
+ libc.modify_priority({}, logger)
+ self.assertEqual(called, {})
+ called = {}
+ # just nice
+ libc.modify_priority({'nice_priority': '1'}, logger)
+ self.assertEqual(called, {'setpriority': (0, pid, 1)})
+ called = {}
+ # just ionice class uses default priority 0
+ libc.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger)
+ architecture = os.uname()[4]
+ arch_bits = platform.architecture()[0]
+ if architecture == 'x86_64' and arch_bits == '64bit':
+ self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)})
+ elif architecture == 'aarch64' and arch_bits == '64bit':
+ self.assertEqual(called, {'syscall': (30, 1, pid, 1 << 13)})
+ else:
+ self.fail("Unexpected call: %r" % called)
+ called = {}
+ # just ionice priority is ignored
+ libc.modify_priority({'ionice_priority': '4'}, logger)
+ self.assertEqual(called, {})
+ called = {}
+ # bad ionice class
+ libc.modify_priority({'ionice_class': 'class_foo'}, logger)
+ self.assertEqual(called, {})
+ called = {}
+ # ionice class & priority
+ libc.modify_priority({
+ 'ionice_class': 'IOPRIO_CLASS_BE',
+ 'ionice_priority': '4',
+ }, logger)
+ if architecture == 'x86_64' and arch_bits == '64bit':
+ self.assertEqual(called, {
+ 'syscall': (251, 1, pid, 2 << 13 | 4)
+ })
+ elif architecture == 'aarch64' and arch_bits == '64bit':
+ self.assertEqual(called, {
+ 'syscall': (30, 1, pid, 2 << 13 | 4)
+ })
+ else:
+ self.fail("Unexpected call: %r" % called)
+ called = {}
+ # all
+ libc.modify_priority({
+ 'nice_priority': '-15',
+ 'ionice_class': 'IOPRIO_CLASS_IDLE',
+ 'ionice_priority': '6',
+ }, logger)
+ if architecture == 'x86_64' and arch_bits == '64bit':
+ self.assertEqual(called, {
+ 'setpriority': (0, pid, -15),
+ 'syscall': (251, 1, pid, 3 << 13 | 6),
+ })
+ elif architecture == 'aarch64' and arch_bits == '64bit':
+ self.assertEqual(called, {
+ 'setpriority': (0, pid, -15),
+ 'syscall': (30, 1, pid, 3 << 13 | 6),
+ })
+ else:
+ self.fail("Unexpected call: %r" % called)
+
+ def test__NR_ioprio_set(self):
+ with mock.patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
+ mock.patch('platform.architecture',
+ return_value=('64bit', '')):
+ self.assertEqual(251, libc.NR_ioprio_set())
+
+ with mock.patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
+ mock.patch('platform.architecture',
+ return_value=('32bit', '')):
+ self.assertRaises(OSError, libc.NR_ioprio_set)
+
+ with mock.patch('os.uname',
+ return_value=('', '', '', '', 'aarch64')), \
+ mock.patch('platform.architecture',
+ return_value=('64bit', '')):
+ self.assertEqual(30, libc.NR_ioprio_set())
+
+ with mock.patch('os.uname',
+ return_value=('', '', '', '', 'aarch64')), \
+ mock.patch('platform.architecture',
+ return_value=('32bit', '')):
+ self.assertRaises(OSError, libc.NR_ioprio_set)
+
+ with mock.patch('os.uname', return_value=('', '', '', '', 'alpha')), \
+ mock.patch('platform.architecture',
+ return_value=('64bit', '')):
+ self.assertRaises(OSError, libc.NR_ioprio_set)
diff --git a/test/unit/common/utils/test_timestamp.py b/test/unit/common/utils/test_timestamp.py
new file mode 100644
index 000000000..23f2535e4
--- /dev/null
+++ b/test/unit/common/utils/test_timestamp.py
@@ -0,0 +1,882 @@
+# Copyright (c) 2010-2023 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for swift.common.utils.timestamp"""
+import random
+import time
+import unittest
+
+import mock
+
+from swift.common.utils import timestamp
+
+
+class TestTimestamp(unittest.TestCase):
+ """Tests for swift.common.utils.timestamp.Timestamp"""
+
+ def test_invalid_input(self):
+ with self.assertRaises(ValueError):
+ timestamp.Timestamp(time.time(), offset=-1)
+ with self.assertRaises(ValueError):
+ timestamp.Timestamp('123.456_78_90')
+
+ def test_invalid_string_conversion(self):
+ t = timestamp.Timestamp.now()
+ self.assertRaises(TypeError, str, t)
+
+ def test_offset_limit(self):
+ t = 1417462430.78693
+ # can't have a offset above MAX_OFFSET
+ with self.assertRaises(ValueError):
+ timestamp.Timestamp(t, offset=timestamp.MAX_OFFSET + 1)
+ # exactly max offset is fine
+ ts = timestamp.Timestamp(t, offset=timestamp.MAX_OFFSET)
+ self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
+ # but you can't offset it further
+ with self.assertRaises(ValueError):
+ timestamp.Timestamp(ts.internal, offset=1)
+ # unless you start below it
+ ts = timestamp.Timestamp(t, offset=timestamp.MAX_OFFSET - 1)
+ self.assertEqual(timestamp.Timestamp(ts.internal, offset=1),
+ '1417462430.78693_ffffffffffffffff')
+
+ def test_normal_format_no_offset(self):
+ expected = '1402436408.91203'
+ test_values = (
+ '1402436408.91203',
+ '1402436408.91203_00000000',
+ '1402436408.912030000',
+ '1402436408.912030000_0000000000000',
+ '000001402436408.912030000',
+ '000001402436408.912030000_0000000000',
+ 1402436408.91203,
+ 1402436408.912029,
+ 1402436408.9120300000000000,
+ 1402436408.91202999999999999,
+ timestamp.Timestamp(1402436408.91203),
+ timestamp.Timestamp(1402436408.91203, offset=0),
+ timestamp.Timestamp(1402436408.912029),
+ timestamp.Timestamp(1402436408.912029, offset=0),
+ timestamp.Timestamp('1402436408.91203'),
+ timestamp.Timestamp('1402436408.91203', offset=0),
+ timestamp.Timestamp('1402436408.91203_00000000'),
+ timestamp.Timestamp('1402436408.91203_00000000', offset=0),
+ )
+ for value in test_values:
+ ts = timestamp.Timestamp(value)
+ self.assertEqual(ts.normal, expected)
+ # timestamp instance can also compare to string or float
+ self.assertEqual(ts, expected)
+ self.assertEqual(ts, float(expected))
+ self.assertEqual(ts, timestamp.normalize_timestamp(expected))
+
+ def test_isoformat(self):
+ expected = '2014-06-10T22:47:32.054580'
+ test_values = (
+ '1402440452.05458',
+ '1402440452.054579',
+ '1402440452.05458_00000000',
+ '1402440452.054579_00000000',
+ '1402440452.054580000',
+ '1402440452.054579999',
+ '1402440452.054580000_0000000000000',
+ '1402440452.054579999_0000ff00',
+ '000001402440452.054580000',
+ '000001402440452.0545799',
+ '000001402440452.054580000_0000000000',
+ '000001402440452.054579999999_00000fffff',
+ 1402440452.05458,
+ 1402440452.054579,
+ 1402440452.0545800000000000,
+ 1402440452.054579999,
+ timestamp.Timestamp(1402440452.05458),
+ timestamp.Timestamp(1402440452.0545799),
+ timestamp.Timestamp(1402440452.05458, offset=0),
+ timestamp.Timestamp(1402440452.05457999999, offset=0),
+ timestamp.Timestamp(1402440452.05458, offset=100),
+ timestamp.Timestamp(1402440452.054579, offset=100),
+ timestamp.Timestamp('1402440452.05458'),
+ timestamp.Timestamp('1402440452.054579999'),
+ timestamp.Timestamp('1402440452.05458', offset=0),
+ timestamp.Timestamp('1402440452.054579', offset=0),
+ timestamp.Timestamp('1402440452.05458', offset=300),
+ timestamp.Timestamp('1402440452.05457999', offset=300),
+ timestamp.Timestamp('1402440452.05458_00000000'),
+ timestamp.Timestamp('1402440452.05457999_00000000'),
+ timestamp.Timestamp('1402440452.05458_00000000', offset=0),
+ timestamp.Timestamp('1402440452.05457999_00000aaa', offset=0),
+ timestamp.Timestamp('1402440452.05458_00000000', offset=400),
+ timestamp.Timestamp('1402440452.054579_0a', offset=400),
+ )
+ for value in test_values:
+ self.assertEqual(timestamp.Timestamp(value).isoformat, expected)
+ expected = '1970-01-01T00:00:00.000000'
+ test_values = (
+ '0',
+ '0000000000.00000',
+ '0000000000.00000_ffffffffffff',
+ 0,
+ 0.0,
+ )
+ for value in test_values:
+ self.assertEqual(timestamp.Timestamp(value).isoformat, expected)
+
+ def test_from_isoformat(self):
+ ts = timestamp.Timestamp.from_isoformat('2014-06-10T22:47:32.054580')
+ self.assertIsInstance(ts, timestamp.Timestamp)
+ self.assertEqual(1402440452.05458, float(ts))
+ self.assertEqual('2014-06-10T22:47:32.054580', ts.isoformat)
+
+ ts = timestamp.Timestamp.from_isoformat('1970-01-01T00:00:00.000000')
+ self.assertIsInstance(ts, timestamp.Timestamp)
+ self.assertEqual(0.0, float(ts))
+ self.assertEqual('1970-01-01T00:00:00.000000', ts.isoformat)
+
+ ts = timestamp.Timestamp(1402440452.05458)
+ self.assertIsInstance(ts, timestamp.Timestamp)
+ self.assertEqual(ts, timestamp.Timestamp.from_isoformat(ts.isoformat))
+
+ def test_ceil(self):
+ self.assertEqual(0.0, timestamp.Timestamp(0).ceil())
+ self.assertEqual(1.0, timestamp.Timestamp(0.00001).ceil())
+ self.assertEqual(1.0, timestamp.Timestamp(0.000001).ceil())
+ self.assertEqual(12345678.0, timestamp.Timestamp(12345678.0).ceil())
+ self.assertEqual(12345679.0,
+ timestamp.Timestamp(12345678.000001).ceil())
+
+ def test_not_equal(self):
+ ts = '1402436408.91203_0000000000000001'
+ test_values = (
+ timestamp.Timestamp('1402436408.91203_0000000000000002'),
+ timestamp.Timestamp('1402436408.91203'),
+ timestamp.Timestamp(1402436408.91203),
+ timestamp.Timestamp(1402436408.91204),
+ timestamp.Timestamp(1402436408.91203, offset=0),
+ timestamp.Timestamp(1402436408.91203, offset=2),
+ )
+ for value in test_values:
+ self.assertTrue(value != ts)
+
+ self.assertIs(True, timestamp.Timestamp(ts) == ts) # sanity
+ self.assertIs(False,
+ timestamp.Timestamp(ts) != timestamp.Timestamp(ts))
+ self.assertIs(False, timestamp.Timestamp(ts) != ts)
+ self.assertIs(False, timestamp.Timestamp(ts) is None)
+ self.assertIs(True, timestamp.Timestamp(ts) is not None)
+
+ def test_no_force_internal_no_offset(self):
+ """Test that internal is the same as normal with no offset"""
+ with mock.patch('swift.common.utils.timestamp.FORCE_INTERNAL',
+ new=False):
+ self.assertEqual(timestamp.Timestamp(0).internal,
+ '0000000000.00000')
+ self.assertEqual(timestamp.Timestamp(1402437380.58186).internal,
+ '1402437380.58186')
+ self.assertEqual(timestamp.Timestamp(1402437380.581859).internal,
+ '1402437380.58186')
+ self.assertEqual(timestamp.Timestamp(0).internal,
+ timestamp.normalize_timestamp(0))
+
+ def test_no_force_internal_with_offset(self):
+ """Test that internal always includes the offset if significant"""
+ with mock.patch('swift.common.utils.timestamp.FORCE_INTERNAL',
+ new=False):
+ self.assertEqual(timestamp.Timestamp(0, offset=1).internal,
+ '0000000000.00000_0000000000000001')
+ self.assertEqual(
+ timestamp.Timestamp(1402437380.58186, offset=16).internal,
+ '1402437380.58186_0000000000000010')
+ self.assertEqual(
+ timestamp.Timestamp(1402437380.581859, offset=240).internal,
+ '1402437380.58186_00000000000000f0')
+ self.assertEqual(
+ timestamp.Timestamp('1402437380.581859_00000001',
+ offset=240).internal,
+ '1402437380.58186_00000000000000f1')
+
+ def test_force_internal(self):
+ """Test that internal always includes the offset if forced"""
+ with mock.patch('swift.common.utils.timestamp.FORCE_INTERNAL',
+ new=True):
+ self.assertEqual(timestamp.Timestamp(0).internal,
+ '0000000000.00000_0000000000000000')
+ self.assertEqual(timestamp.Timestamp(1402437380.58186).internal,
+ '1402437380.58186_0000000000000000')
+ self.assertEqual(timestamp.Timestamp(1402437380.581859).internal,
+ '1402437380.58186_0000000000000000')
+ self.assertEqual(timestamp.Timestamp(0, offset=1).internal,
+ '0000000000.00000_0000000000000001')
+ self.assertEqual(
+ timestamp.Timestamp(1402437380.58186, offset=16).internal,
+ '1402437380.58186_0000000000000010')
+ self.assertEqual(
+ timestamp.Timestamp(1402437380.581859, offset=16).internal,
+ '1402437380.58186_0000000000000010')
+
+ def test_internal_format_no_offset(self):
+ expected = '1402436408.91203_0000000000000000'
+ test_values = (
+ '1402436408.91203',
+ '1402436408.91203_00000000',
+ '1402436408.912030000',
+ '1402436408.912030000_0000000000000',
+ '000001402436408.912030000',
+ '000001402436408.912030000_0000000000',
+ 1402436408.91203,
+ 1402436408.9120300000000000,
+ 1402436408.912029,
+ 1402436408.912029999999999999,
+ timestamp.Timestamp(1402436408.91203),
+ timestamp.Timestamp(1402436408.91203, offset=0),
+ timestamp.Timestamp(1402436408.912029),
+ timestamp.Timestamp(1402436408.91202999999999999, offset=0),
+ timestamp.Timestamp('1402436408.91203'),
+ timestamp.Timestamp('1402436408.91203', offset=0),
+ timestamp.Timestamp('1402436408.912029'),
+ timestamp.Timestamp('1402436408.912029', offset=0),
+ timestamp.Timestamp('1402436408.912029999999999'),
+ timestamp.Timestamp('1402436408.912029999999999', offset=0),
+ )
+ for value in test_values:
+ # timestamp instance is always equivalent
+ self.assertEqual(timestamp.Timestamp(value), expected)
+ if timestamp.FORCE_INTERNAL:
+ # the FORCE_INTERNAL flag makes the internal format always
+ # include the offset portion of the timestamp even when it's
+ # not significant and would be bad during upgrades
+ self.assertEqual(timestamp.Timestamp(value).internal, expected)
+ else:
+ # unless we FORCE_INTERNAL, when there's no offset the
+ # internal format is equivalent to the normalized format
+ self.assertEqual(timestamp.Timestamp(value).internal,
+ '1402436408.91203')
+
+ def test_internal_format_with_offset(self):
+ expected = '1402436408.91203_00000000000000f0'
+ test_values = (
+ '1402436408.91203_000000f0',
+ u'1402436408.91203_000000f0',
+ b'1402436408.91203_000000f0',
+ '1402436408.912030000_0000000000f0',
+ '1402436408.912029_000000f0',
+ '1402436408.91202999999_0000000000f0',
+ '000001402436408.912030000_000000000f0',
+ '000001402436408.9120299999_000000000f0',
+ timestamp.Timestamp(1402436408.91203, offset=240),
+ timestamp.Timestamp(1402436408.912029, offset=240),
+ timestamp.Timestamp('1402436408.91203', offset=240),
+ timestamp.Timestamp('1402436408.91203_00000000', offset=240),
+ timestamp.Timestamp('1402436408.91203_0000000f', offset=225),
+ timestamp.Timestamp('1402436408.9120299999', offset=240),
+ timestamp.Timestamp('1402436408.9120299999_00000000', offset=240),
+ timestamp.Timestamp('1402436408.9120299999_00000010', offset=224),
+ )
+ for value in test_values:
+ ts = timestamp.Timestamp(value)
+ self.assertEqual(ts.internal, expected)
+ # can compare with offset if the string is internalized
+ self.assertEqual(ts, expected)
+ # if comparison value only includes the normalized portion and the
+ # timestamp includes an offset, it is considered greater
+ normal = timestamp.Timestamp(expected).normal
+ self.assertTrue(ts > normal,
+ '%r is not bigger than %r given %r' % (
+ ts, normal, value))
+ self.assertTrue(ts > float(normal),
+ '%r is not bigger than %f given %r' % (
+ ts, float(normal), value))
+
+ def test_short_format_with_offset(self):
+ expected = '1402436408.91203_f0'
+ ts = timestamp.Timestamp(1402436408.91203, 0xf0)
+ self.assertEqual(expected, ts.short)
+
+ expected = '1402436408.91203'
+ ts = timestamp.Timestamp(1402436408.91203)
+ self.assertEqual(expected, ts.short)
+
+ def test_raw(self):
+ expected = 140243640891203
+ ts = timestamp.Timestamp(1402436408.91203)
+ self.assertEqual(expected, ts.raw)
+
+ # 'raw' does not include offset
+ ts = timestamp.Timestamp(1402436408.91203, 0xf0)
+ self.assertEqual(expected, ts.raw)
+
+ def test_delta(self):
+ def _assertWithinBounds(expected, timestamp):
+ tolerance = 0.00001
+ minimum = expected - tolerance
+ maximum = expected + tolerance
+ self.assertTrue(float(timestamp) > minimum)
+ self.assertTrue(float(timestamp) < maximum)
+
+ ts = timestamp.Timestamp(1402436408.91203, delta=100)
+ _assertWithinBounds(1402436408.91303, ts)
+ self.assertEqual(140243640891303, ts.raw)
+
+ ts = timestamp.Timestamp(1402436408.91203, delta=-100)
+ _assertWithinBounds(1402436408.91103, ts)
+ self.assertEqual(140243640891103, ts.raw)
+
+ ts = timestamp.Timestamp(1402436408.91203, delta=0)
+ _assertWithinBounds(1402436408.91203, ts)
+ self.assertEqual(140243640891203, ts.raw)
+
+ # delta is independent of offset
+ ts = timestamp.Timestamp(1402436408.91203, offset=42, delta=100)
+ self.assertEqual(140243640891303, ts.raw)
+ self.assertEqual(42, ts.offset)
+
+ # cannot go negative
+ self.assertRaises(ValueError, timestamp.Timestamp, 1402436408.91203,
+ delta=-140243640891203)
+
+ def test_int(self):
+ expected = 1402437965
+ test_values = (
+ '1402437965.91203',
+ '1402437965.91203_00000000',
+ '1402437965.912030000',
+ '1402437965.912030000_0000000000000',
+ '000001402437965.912030000',
+ '000001402437965.912030000_0000000000',
+ 1402437965.91203,
+ 1402437965.9120300000000000,
+ 1402437965.912029,
+ 1402437965.912029999999999999,
+ timestamp.Timestamp(1402437965.91203),
+ timestamp.Timestamp(1402437965.91203, offset=0),
+ timestamp.Timestamp(1402437965.91203, offset=500),
+ timestamp.Timestamp(1402437965.912029),
+ timestamp.Timestamp(1402437965.91202999999999999, offset=0),
+ timestamp.Timestamp(1402437965.91202999999999999, offset=300),
+ timestamp.Timestamp('1402437965.91203'),
+ timestamp.Timestamp('1402437965.91203', offset=0),
+ timestamp.Timestamp('1402437965.91203', offset=400),
+ timestamp.Timestamp('1402437965.912029'),
+ timestamp.Timestamp('1402437965.912029', offset=0),
+ timestamp.Timestamp('1402437965.912029', offset=200),
+ timestamp.Timestamp('1402437965.912029999999999'),
+ timestamp.Timestamp('1402437965.912029999999999', offset=0),
+ timestamp.Timestamp('1402437965.912029999999999', offset=100),
+ )
+ for value in test_values:
+ ts = timestamp.Timestamp(value)
+ self.assertEqual(int(ts), expected)
+ self.assertTrue(ts > expected)
+
+ def test_float(self):
+ expected = 1402438115.91203
+ test_values = (
+ '1402438115.91203',
+ '1402438115.91203_00000000',
+ '1402438115.912030000',
+ '1402438115.912030000_0000000000000',
+ '000001402438115.912030000',
+ '000001402438115.912030000_0000000000',
+ 1402438115.91203,
+ 1402438115.9120300000000000,
+ 1402438115.912029,
+ 1402438115.912029999999999999,
+ timestamp.Timestamp(1402438115.91203),
+ timestamp.Timestamp(1402438115.91203, offset=0),
+ timestamp.Timestamp(1402438115.91203, offset=500),
+ timestamp.Timestamp(1402438115.912029),
+ timestamp.Timestamp(1402438115.91202999999999999, offset=0),
+ timestamp.Timestamp(1402438115.91202999999999999, offset=300),
+ timestamp.Timestamp('1402438115.91203'),
+ timestamp.Timestamp('1402438115.91203', offset=0),
+ timestamp.Timestamp('1402438115.91203', offset=400),
+ timestamp.Timestamp('1402438115.912029'),
+ timestamp.Timestamp('1402438115.912029', offset=0),
+ timestamp.Timestamp('1402438115.912029', offset=200),
+ timestamp.Timestamp('1402438115.912029999999999'),
+ timestamp.Timestamp('1402438115.912029999999999', offset=0),
+ timestamp.Timestamp('1402438115.912029999999999', offset=100),
+ )
+ tolerance = 0.00001
+ minimum = expected - tolerance
+ maximum = expected + tolerance
+ for value in test_values:
+ ts = timestamp.Timestamp(value)
+ self.assertTrue(float(ts) > minimum,
+ '%f is not bigger than %f given %r' % (
+ ts, minimum, value))
+ self.assertTrue(float(ts) < maximum,
+ '%f is not smaller than %f given %r' % (
+ ts, maximum, value))
+ # direct comparison of timestamp works too
+ self.assertTrue(ts > minimum,
+ '%s is not bigger than %f given %r' % (
+ ts.normal, minimum, value))
+ self.assertTrue(ts < maximum,
+ '%s is not smaller than %f given %r' % (
+ ts.normal, maximum, value))
+ # ... even against strings
+ self.assertTrue(ts > '%f' % minimum,
+ '%s is not bigger than %s given %r' % (
+ ts.normal, minimum, value))
+ self.assertTrue(ts < '%f' % maximum,
+ '%s is not smaller than %s given %r' % (
+ ts.normal, maximum, value))
+
+ def test_false(self):
+ self.assertFalse(timestamp.Timestamp(0))
+ self.assertFalse(timestamp.Timestamp(0, offset=0))
+ self.assertFalse(timestamp.Timestamp('0'))
+ self.assertFalse(timestamp.Timestamp('0', offset=0))
+ self.assertFalse(timestamp.Timestamp(0.0))
+ self.assertFalse(timestamp.Timestamp(0.0, offset=0))
+ self.assertFalse(timestamp.Timestamp('0.0'))
+ self.assertFalse(timestamp.Timestamp('0.0', offset=0))
+ self.assertFalse(timestamp.Timestamp(00000000.00000000))
+ self.assertFalse(timestamp.Timestamp(00000000.00000000, offset=0))
+ self.assertFalse(timestamp.Timestamp('00000000.00000000'))
+ self.assertFalse(timestamp.Timestamp('00000000.00000000', offset=0))
+
+ def test_true(self):
+ self.assertTrue(timestamp.Timestamp(1))
+ self.assertTrue(timestamp.Timestamp(1, offset=1))
+ self.assertTrue(timestamp.Timestamp(0, offset=1))
+ self.assertTrue(timestamp.Timestamp('1'))
+ self.assertTrue(timestamp.Timestamp('1', offset=1))
+ self.assertTrue(timestamp.Timestamp('0', offset=1))
+ self.assertTrue(timestamp.Timestamp(1.1))
+ self.assertTrue(timestamp.Timestamp(1.1, offset=1))
+ self.assertTrue(timestamp.Timestamp(0.0, offset=1))
+ self.assertTrue(timestamp.Timestamp('1.1'))
+ self.assertTrue(timestamp.Timestamp('1.1', offset=1))
+ self.assertTrue(timestamp.Timestamp('0.0', offset=1))
+ self.assertTrue(timestamp.Timestamp(11111111.11111111))
+ self.assertTrue(timestamp.Timestamp(11111111.11111111, offset=1))
+ self.assertTrue(timestamp.Timestamp(00000000.00000000, offset=1))
+ self.assertTrue(timestamp.Timestamp('11111111.11111111'))
+ self.assertTrue(timestamp.Timestamp('11111111.11111111', offset=1))
+ self.assertTrue(timestamp.Timestamp('00000000.00000000', offset=1))
+
+ def test_greater_no_offset(self):
+ now = time.time()
+ older = now - 1
+ ts = timestamp.Timestamp(now)
+ test_values = (
+ 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
+ 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
+ 1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
+ older, '%f' % older, '%f_0000ffff' % older,
+ )
+ for value in test_values:
+ other = timestamp.Timestamp(value)
+ self.assertNotEqual(ts, other) # sanity
+ self.assertTrue(ts > value,
+ '%r is not greater than %r given %r' % (
+ ts, value, value))
+ self.assertTrue(ts > other,
+ '%r is not greater than %r given %r' % (
+ ts, other, value))
+ self.assertTrue(ts > other.normal,
+ '%r is not greater than %r given %r' % (
+ ts, other.normal, value))
+ self.assertTrue(ts > other.internal,
+ '%r is not greater than %r given %r' % (
+ ts, other.internal, value))
+ self.assertTrue(ts > float(other),
+ '%r is not greater than %r given %r' % (
+ ts, float(other), value))
+ self.assertTrue(ts > int(other),
+ '%r is not greater than %r given %r' % (
+ ts, int(other), value))
+
+ def _test_greater_with_offset(self, now, test_values):
+ for offset in range(1, 1000, 100):
+ ts = timestamp.Timestamp(now, offset=offset)
+ for value in test_values:
+ other = timestamp.Timestamp(value)
+ self.assertNotEqual(ts, other) # sanity
+ self.assertTrue(ts > value,
+ '%r is not greater than %r given %r' % (
+ ts, value, value))
+ self.assertTrue(ts > other,
+ '%r is not greater than %r given %r' % (
+ ts, other, value))
+ self.assertTrue(ts > other.normal,
+ '%r is not greater than %r given %r' % (
+ ts, other.normal, value))
+ self.assertTrue(ts > other.internal,
+ '%r is not greater than %r given %r' % (
+ ts, other.internal, value))
+ self.assertTrue(ts > float(other),
+ '%r is not greater than %r given %r' % (
+ ts, float(other), value))
+ self.assertTrue(ts > int(other),
+ '%r is not greater than %r given %r' % (
+ ts, int(other), value))
+
+ def test_greater_with_offset(self):
+ # Part 1: use the natural time of the Python. This is deliciously
+ # unpredictable, but completely legitimate and realistic. Finds bugs!
+ now = time.time()
+ older = now - 1
+ test_values = (
+ 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
+ 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
+ 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
+ older, now,
+ )
+ self._test_greater_with_offset(now, test_values)
+ # Part 2: Same as above, but with fixed time values that reproduce
+ # specific corner cases.
+ now = 1519830570.6949348
+ older = now - 1
+ test_values = (
+ 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
+ 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
+ 1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
+ older, now,
+ )
+ self._test_greater_with_offset(now, test_values)
+ # Part 3: The '%f' problem. Timestamps cannot be converted to %f
+ # strings, then back to timestamps, then compared with originals.
+ # You can only "import" a floating point representation once.
+ now = 1519830570.6949348
+ now = float('%f' % now)
+ older = now - 1
+ test_values = (
+ 0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
+ 1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
+ older, '%f' % older, '%f_0000ffff' % older,
+ now, '%f' % now, '%s_00000000' % now,
+ )
+ self._test_greater_with_offset(now, test_values)
+
+ def test_smaller_no_offset(self):
+ now = time.time()
+ newer = now + 1
+ ts = timestamp.Timestamp(now)
+ test_values = (
+ 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
+ newer, '%f' % newer, '%f_0000ffff' % newer,
+ )
+ for value in test_values:
+ other = timestamp.Timestamp(value)
+ self.assertNotEqual(ts, other) # sanity
+ self.assertTrue(ts < value,
+ '%r is not smaller than %r given %r' % (
+ ts, value, value))
+ self.assertTrue(ts < other,
+ '%r is not smaller than %r given %r' % (
+ ts, other, value))
+ self.assertTrue(ts < other.normal,
+ '%r is not smaller than %r given %r' % (
+ ts, other.normal, value))
+ self.assertTrue(ts < other.internal,
+ '%r is not smaller than %r given %r' % (
+ ts, other.internal, value))
+ self.assertTrue(ts < float(other),
+ '%r is not smaller than %r given %r' % (
+ ts, float(other), value))
+ self.assertTrue(ts < int(other),
+ '%r is not smaller than %r given %r' % (
+ ts, int(other), value))
+
+ def test_smaller_with_offset(self):
+ now = time.time()
+ newer = now + 1
+ test_values = (
+ 9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
+ newer, '%f' % newer, '%f_0000ffff' % newer,
+ )
+ for offset in range(1, 1000, 100):
+ ts = timestamp.Timestamp(now, offset=offset)
+ for value in test_values:
+ other = timestamp.Timestamp(value)
+ self.assertNotEqual(ts, other) # sanity
+ self.assertTrue(ts < value,
+ '%r is not smaller than %r given %r' % (
+ ts, value, value))
+ self.assertTrue(ts < other,
+ '%r is not smaller than %r given %r' % (
+ ts, other, value))
+ self.assertTrue(ts < other.normal,
+ '%r is not smaller than %r given %r' % (
+ ts, other.normal, value))
+ self.assertTrue(ts < other.internal,
+ '%r is not smaller than %r given %r' % (
+ ts, other.internal, value))
+ self.assertTrue(ts < float(other),
+ '%r is not smaller than %r given %r' % (
+ ts, float(other), value))
+ self.assertTrue(ts < int(other),
+ '%r is not smaller than %r given %r' % (
+ ts, int(other), value))
+
+ def test_cmp_with_none(self):
+ self.assertGreater(timestamp.Timestamp(0), None)
+ self.assertGreater(timestamp.Timestamp(1.0), None)
+ self.assertGreater(timestamp.Timestamp(1.0, 42), None)
+
+ def test_ordering(self):
+ given = [
+ '1402444820.62590_000000000000000a',
+ '1402444820.62589_0000000000000001',
+ '1402444821.52589_0000000000000004',
+ '1402444920.62589_0000000000000004',
+ '1402444821.62589_000000000000000a',
+ '1402444821.72589_000000000000000a',
+ '1402444920.62589_0000000000000002',
+ '1402444820.62589_0000000000000002',
+ '1402444820.62589_000000000000000a',
+ '1402444820.62590_0000000000000004',
+ '1402444920.62589_000000000000000a',
+ '1402444820.62590_0000000000000002',
+ '1402444821.52589_0000000000000002',
+ '1402444821.52589_0000000000000000',
+ '1402444920.62589',
+ '1402444821.62589_0000000000000004',
+ '1402444821.72589_0000000000000001',
+ '1402444820.62590',
+ '1402444820.62590_0000000000000001',
+ '1402444820.62589_0000000000000004',
+ '1402444821.72589_0000000000000000',
+ '1402444821.52589_000000000000000a',
+ '1402444821.72589_0000000000000004',
+ '1402444821.62589',
+ '1402444821.52589_0000000000000001',
+ '1402444821.62589_0000000000000001',
+ '1402444821.62589_0000000000000002',
+ '1402444821.72589_0000000000000002',
+ '1402444820.62589',
+ '1402444920.62589_0000000000000001']
+ expected = [
+ '1402444820.62589',
+ '1402444820.62589_0000000000000001',
+ '1402444820.62589_0000000000000002',
+ '1402444820.62589_0000000000000004',
+ '1402444820.62589_000000000000000a',
+ '1402444820.62590',
+ '1402444820.62590_0000000000000001',
+ '1402444820.62590_0000000000000002',
+ '1402444820.62590_0000000000000004',
+ '1402444820.62590_000000000000000a',
+ '1402444821.52589',
+ '1402444821.52589_0000000000000001',
+ '1402444821.52589_0000000000000002',
+ '1402444821.52589_0000000000000004',
+ '1402444821.52589_000000000000000a',
+ '1402444821.62589',
+ '1402444821.62589_0000000000000001',
+ '1402444821.62589_0000000000000002',
+ '1402444821.62589_0000000000000004',
+ '1402444821.62589_000000000000000a',
+ '1402444821.72589',
+ '1402444821.72589_0000000000000001',
+ '1402444821.72589_0000000000000002',
+ '1402444821.72589_0000000000000004',
+ '1402444821.72589_000000000000000a',
+ '1402444920.62589',
+ '1402444920.62589_0000000000000001',
+ '1402444920.62589_0000000000000002',
+ '1402444920.62589_0000000000000004',
+ '1402444920.62589_000000000000000a',
+ ]
+ # less visual version
+ """
+ now = time.time()
+ given = [
+ timestamp.Timestamp(now + i, offset=offset).internal
+ for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
+ for offset in (0, 1, 2, 4, 10)
+ ]
+ expected = [t for t in given]
+ random.shuffle(given)
+ """
+ self.assertEqual(len(given), len(expected)) # sanity
+ timestamps = [timestamp.Timestamp(t) for t in given]
+ # our expected values don't include insignificant offsets
+ with mock.patch('swift.common.utils.timestamp.FORCE_INTERNAL',
+ new=False):
+ self.assertEqual(
+ [t.internal for t in sorted(timestamps)], expected)
+ # string sorting works as well
+ self.assertEqual(
+ sorted([t.internal for t in timestamps]), expected)
+
+ def test_hashable(self):
+ ts_0 = timestamp.Timestamp('1402444821.72589')
+ ts_0_also = timestamp.Timestamp('1402444821.72589')
+ self.assertEqual(ts_0, ts_0_also) # sanity
+ self.assertEqual(hash(ts_0), hash(ts_0_also))
+ d = {ts_0: 'whatever'}
+ self.assertIn(ts_0, d) # sanity
+ self.assertIn(ts_0_also, d)
+
+ def test_out_of_range_comparisons(self):
+ now = timestamp.Timestamp.now()
+
+ def check_is_later(val):
+ self.assertTrue(now != val)
+ self.assertFalse(now == val)
+ self.assertTrue(now <= val)
+ self.assertTrue(now < val)
+ self.assertTrue(val > now)
+ self.assertTrue(val >= now)
+
+ check_is_later(1e30)
+ check_is_later(1579753284000) # someone gave us ms instead of s!
+ check_is_later('1579753284000')
+ check_is_later(b'1e15')
+ check_is_later(u'1.e+10_f')
+
+ def check_is_earlier(val):
+ self.assertTrue(now != val)
+ self.assertFalse(now == val)
+ self.assertTrue(now >= val)
+ self.assertTrue(now > val)
+ self.assertTrue(val < now)
+ self.assertTrue(val <= now)
+
+ check_is_earlier(-1)
+ check_is_earlier(-0.1)
+ check_is_earlier('-9999999')
+ check_is_earlier(b'-9999.999')
+ check_is_earlier(u'-1234_5678')
+
+ def test_inversion(self):
+ ts = timestamp.Timestamp(0)
+ self.assertIsInstance(~ts, timestamp.Timestamp)
+ self.assertEqual((~ts).internal, '9999999999.99999')
+
+ ts = timestamp.Timestamp(123456.789)
+ self.assertIsInstance(~ts, timestamp.Timestamp)
+ self.assertEqual(ts.internal, '0000123456.78900')
+ self.assertEqual((~ts).internal, '9999876543.21099')
+
+ timestamps = sorted(timestamp.Timestamp(random.random() * 1e10)
+ for _ in range(20))
+ self.assertEqual([x.internal for x in timestamps],
+ sorted(x.internal for x in timestamps))
+ self.assertEqual([(~x).internal for x in reversed(timestamps)],
+ sorted((~x).internal for x in timestamps))
+
+ ts = timestamp.Timestamp.now()
+ self.assertGreater(~ts, ts) # NB: will break around 2128
+
+ ts = timestamp.Timestamp.now(offset=1)
+ with self.assertRaises(ValueError) as caught:
+ ~ts
+ self.assertEqual(caught.exception.args[0],
+ 'Cannot invert timestamps with offsets')
+
+
+class TestTimestampEncoding(unittest.TestCase):
+
+ def setUp(self):
+ t0 = timestamp.Timestamp(0.0)
+ t1 = timestamp.Timestamp(997.9996)
+ t2 = timestamp.Timestamp(999)
+ t3 = timestamp.Timestamp(1000, 24)
+ t4 = timestamp.Timestamp(1001)
+ t5 = timestamp.Timestamp(1002.00040)
+
+ # encodings that are expected when explicit = False
+ self.non_explicit_encodings = (
+ ('0000001000.00000_18', (t3, t3, t3)),
+ ('0000001000.00000_18', (t3, t3, None)),
+ )
+
+ # mappings that are expected when explicit = True
+ self.explicit_encodings = (
+ ('0000001000.00000_18+0+0', (t3, t3, t3)),
+ ('0000001000.00000_18+0', (t3, t3, None)),
+ )
+
+ # mappings that are expected when explicit = True or False
+ self.encodings = (
+ ('0000001000.00000_18+0+186a0', (t3, t3, t4)),
+ ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
+ ('0000001000.00000_18-186a0+0', (t3, t2, t2)),
+ ('0000001000.00000_18+0-186a0', (t3, t3, t2)),
+ ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
+ ('0000001000.00000_18', (t3, None, None)),
+ ('0000001000.00000_18+186a0', (t3, t4, None)),
+ ('0000001000.00000_18-186a0', (t3, t2, None)),
+ ('0000001000.00000_18', (t3, None, t1)),
+ ('0000001000.00000_18-5f5e100', (t3, t0, None)),
+ ('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
+ ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
+ )
+
+ # decodings that are expected when explicit = False
+ self.non_explicit_decodings = (
+ ('0000001000.00000_18', (t3, t3, t3)),
+ ('0000001000.00000_18+186a0', (t3, t4, t4)),
+ ('0000001000.00000_18-186a0', (t3, t2, t2)),
+ ('0000001000.00000_18+186a0', (t3, t4, t4)),
+ ('0000001000.00000_18-186a0', (t3, t2, t2)),
+ ('0000001000.00000_18-5f5e100', (t3, t0, t0)),
+ )
+
+ # decodings that are expected when explicit = True
+ self.explicit_decodings = (
+ ('0000001000.00000_18+0+0', (t3, t3, t3)),
+ ('0000001000.00000_18+0', (t3, t3, None)),
+ ('0000001000.00000_18', (t3, None, None)),
+ ('0000001000.00000_18+186a0', (t3, t4, None)),
+ ('0000001000.00000_18-186a0', (t3, t2, None)),
+ ('0000001000.00000_18-5f5e100', (t3, t0, None)),
+ )
+
+ # decodings that are expected when explicit = True or False
+ self.decodings = (
+ ('0000001000.00000_18+0+186a0', (t3, t3, t4)),
+ ('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
+ ('0000001000.00000_18-186a0+0', (t3, t2, t2)),
+ ('0000001000.00000_18+0-186a0', (t3, t3, t2)),
+ ('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
+ ('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
+ )
+
+ def _assertEqual(self, expected, actual, test):
+ self.assertEqual(expected, actual,
+ 'Got %s but expected %s for parameters %s'
+ % (actual, expected, test))
+
+ def test_encoding(self):
+ for test in self.explicit_encodings:
+ actual = timestamp.encode_timestamps(test[1][0], test[1][1],
+ test[1][2], True)
+ self._assertEqual(test[0], actual, test[1])
+ for test in self.non_explicit_encodings:
+ actual = timestamp.encode_timestamps(test[1][0], test[1][1],
+ test[1][2], False)
+ self._assertEqual(test[0], actual, test[1])
+ for explicit in (True, False):
+ for test in self.encodings:
+ actual = timestamp.encode_timestamps(test[1][0], test[1][1],
+ test[1][2], explicit)
+ self._assertEqual(test[0], actual, test[1])
+
+ def test_decoding(self):
+ for test in self.explicit_decodings:
+ actual = timestamp.decode_timestamps(test[0], True)
+ self._assertEqual(test[1], actual, test[0])
+ for test in self.non_explicit_decodings:
+ actual = timestamp.decode_timestamps(test[0], False)
+ self._assertEqual(test[1], actual, test[0])
+ for explicit in (True, False):
+ for test in self.decodings:
+ actual = timestamp.decode_timestamps(test[0], explicit)
+ self._assertEqual(test[1], actual, test[0])
diff --git a/test/unit/container/test_sharder.py b/test/unit/container/test_sharder.py
index 98a847699..76387d137 100644
--- a/test/unit/container/test_sharder.py
+++ b/test/unit/container/test_sharder.py
@@ -203,7 +203,6 @@ class TestSharder(BaseTestSharder):
'container-sharder', sharder.logger.logger.name)
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf', 'Swift Container Sharder', 3,
- allow_modify_pipeline=False,
use_replication_network=True,
global_conf={'log_name': 'container-sharder-ic'})
@@ -218,7 +217,6 @@ class TestSharder(BaseTestSharder):
sharder, mock_ic = self._do_test_init(conf, expected)
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf', 'Swift Container Sharder', 3,
- allow_modify_pipeline=False,
use_replication_network=True,
global_conf={'log_name': 'container-sharder-ic'})
@@ -280,7 +278,6 @@ class TestSharder(BaseTestSharder):
sharder, mock_ic = self._do_test_init(conf, expected)
mock_ic.assert_called_once_with(
'/etc/swift/my-sharder-ic.conf', 'Swift Container Sharder', 2,
- allow_modify_pipeline=False,
use_replication_network=True,
global_conf={'log_name': 'container-sharder-ic'})
self.assertEqual(self.logger.get_lines_for_level('warning'), [
@@ -418,7 +415,6 @@ class TestSharder(BaseTestSharder):
mock_ic.assert_called_once_with(
'/etc/swift/internal-client.conf',
'Swift Container Sharder', 3,
- allow_modify_pipeline=False,
global_conf={'log_name': exp_internal_client_log_name},
use_replication_network=True)
@@ -2470,6 +2466,16 @@ class TestSharder(BaseTestSharder):
self.assertEqual('', context.cursor)
self.assertEqual(10, context.cleave_to_row)
self.assertEqual(12, context.max_row) # note that max row increased
+ self.assertTrue(self.logger.log_dict['timing_since'])
+ self.assertEqual('sharder.sharding.move_misplaced',
+ self.logger.log_dict['timing_since'][-3][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-3][0][1], 0)
+ self.assertEqual('sharder.sharding.set_state',
+ self.logger.log_dict['timing_since'][-2][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-2][0][1], 0)
+ self.assertEqual('sharder.sharding.cleave',
+ self.logger.log_dict['timing_since'][-1][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-1][0][1], 0)
lines = sharder.logger.get_lines_for_level('info')
self.assertEqual(
["Kick off container cleaving, own shard range in state "
@@ -2515,6 +2521,80 @@ class TestSharder(BaseTestSharder):
'Completed cleaving, DB set to sharded state, path: a/c, db: %s'
% broker.db_file, lines[1:])
self.assertFalse(sharder.logger.get_lines_for_level('warning'))
+ self.assertTrue(self.logger.log_dict['timing_since'])
+ self.assertEqual('sharder.sharding.move_misplaced',
+ self.logger.log_dict['timing_since'][-4][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-4][0][1], 0)
+ self.assertEqual('sharder.sharding.cleave',
+ self.logger.log_dict['timing_since'][-3][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-3][0][1], 0)
+ self.assertEqual('sharder.sharding.completed',
+ self.logger.log_dict['timing_since'][-2][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-2][0][1], 0)
+ self.assertEqual('sharder.sharding.send_sr',
+ self.logger.log_dict['timing_since'][-1][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-1][0][1], 0)
+
+ def test_cleave_timing_metrics(self):
+ broker = self._make_broker()
+ objects = [{'name': 'obj_%03d' % i,
+ 'created_at': Timestamp.now().normal,
+ 'content_type': 'text/plain',
+ 'etag': 'etag_%d' % i,
+ 'size': 1024 * i,
+ 'deleted': i % 2,
+ 'storage_policy_index': 0,
+ } for i in range(1, 8)]
+ broker.merge_items([dict(obj) for obj in objects])
+ broker.enable_sharding(Timestamp.now())
+ shard_ranges = self._make_shard_ranges(
+ (('', 'obj_004'), ('obj_004', '')), state=ShardRange.CREATED)
+ expected_shard_dbs = []
+ for shard_range in shard_ranges:
+ db_hash = hash_path(shard_range.account, shard_range.container)
+ expected_shard_dbs.append(
+ os.path.join(self.tempdir, 'sda', 'containers', '0',
+ db_hash[-3:], db_hash, db_hash + '.db'))
+ broker.merge_shard_ranges(shard_ranges)
+ self.assertTrue(broker.set_sharding_state())
+ node = {'ip': '1.2.3.4', 'port': 6040, 'device': 'sda5', 'id': '2',
+ 'index': 0}
+
+ with self._mock_sharder() as sharder:
+ sharder._audit_container = mock.MagicMock()
+ sharder._process_broker(broker, node, 99)
+
+ lines = sharder.logger.get_lines_for_level('info')
+ self.assertEqual(
+ 'Starting to cleave (2 todo), path: a/c, db: %s'
+ % broker.db_file, lines[0])
+ self.assertIn(
+ 'Completed cleaving, DB set to sharded state, path: a/c, db: %s'
+ % broker.db_file, lines[1:])
+
+ self.assertTrue(self.logger.log_dict['timing_since'])
+ self.assertEqual('sharder.sharding.move_misplaced',
+ self.logger.log_dict['timing_since'][-4][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-4][0][1], 0)
+ self.assertEqual('sharder.sharding.cleave',
+ self.logger.log_dict['timing_since'][-3][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-3][0][1], 0)
+ self.assertEqual('sharder.sharding.completed',
+ self.logger.log_dict['timing_since'][-2][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-2][0][1], 0)
+ self.assertEqual('sharder.sharding.send_sr',
+ self.logger.log_dict['timing_since'][-1][0][0])
+ self.assertGreater(self.logger.log_dict['timing_since'][-1][0][1], 0)
+
+ # check shard ranges were updated to ACTIVE
+ self.assertEqual([ShardRange.ACTIVE] * 2,
+ [sr.state for sr in broker.get_shard_ranges()])
+ shard_broker = ContainerBroker(expected_shard_dbs[0])
+ actual_objects = shard_broker.get_objects()
+ self.assertEqual(objects[:4], actual_objects)
+ shard_broker = ContainerBroker(expected_shard_dbs[1])
+ actual_objects = shard_broker.get_objects()
+ self.assertEqual(objects[4:], actual_objects)
def test_cleave_multiple_storage_policies(self):
# verify that objects in all storage policies are cleaved
diff --git a/test/unit/helpers.py b/test/unit/helpers.py
index 32035bb48..1cb89fd0d 100644
--- a/test/unit/helpers.py
+++ b/test/unit/helpers.py
@@ -96,6 +96,7 @@ def setup_servers(the_object_server=object_server, extra_conf=None):
'allow_versions': 't', 'node_timeout': 20}
if extra_conf:
conf.update(extra_conf)
+ context['conf'] = conf
prolis = listen_zero()
acc1lis = listen_zero()
acc2lis = listen_zero()
diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py
index 59baa38ab..9f251f241 100644
--- a/test/unit/obj/test_diskfile.py
+++ b/test/unit/obj/test_diskfile.py
@@ -15,6 +15,8 @@
"""Tests for swift.obj.diskfile"""
+from __future__ import print_function
+import binascii
import six.moves.cPickle as pickle
import os
import errno
@@ -28,6 +30,7 @@ import uuid
import xattr
import re
import six
+import sys
from collections import defaultdict
from random import shuffle, randint
from shutil import rmtree
@@ -47,6 +50,7 @@ from test.unit import (mock as unit_mock, temptree, mock_check_drive,
encode_frag_archive_bodies, skip_if_no_xattrs)
from swift.obj import diskfile
from swift.common import utils
+from swift.common.utils import libc
from swift.common.utils import hash_path, mkdirs, Timestamp, \
encode_timestamps, O_TMPFILE, md5 as _md5
from swift.common import ring
@@ -553,6 +557,266 @@ class TestDiskFileModuleMethods(unittest.TestCase):
exp_dir = '/srv/node/sdb5/objects-1/123'
self.assertEqual(part_dir, exp_dir)
+ def test_can_read_old_meta(self):
+ # outputs taken from `xattr -l <diskfile>`
+ cases = {
+ 'python_2.7.18_swift_2.13_replicated': '''
+0000 80 02 7D 71 01 28 55 0E 43 6F 6E 74 65 6E 74 2D ..}q.(U.Content-
+0010 4C 65 6E 67 74 68 71 02 55 02 31 33 55 04 6E 61 Lengthq.U.13U.na
+0020 6D 65 71 03 55 12 2F 41 55 54 48 5F 74 65 73 74 meq.U./AUTH_test
+0030 2F E2 98 83 2F E2 98 83 71 04 55 13 58 2D 4F 62 /.../...q.U.X-Ob
+0040 6A 65 63 74 2D 4D 65 74 61 2D 4D 74 69 6D 65 55 ject-Meta-MtimeU
+0050 11 31 36 38 32 39 35 39 38 37 34 2E 37 35 36 32 .1682959874.7562
+0060 30 35 71 05 55 04 45 54 61 67 71 06 55 20 36 62 05q.U.ETagq.U 6b
+0070 37 64 39 61 31 63 35 64 31 36 37 63 63 35 30 30 7d9a1c5d167cc500
+0080 33 37 66 32 39 66 32 39 30 62 62 33 37 35 71 07 37f29f290bb375q.
+0090 55 0B 58 2D 54 69 6D 65 73 74 61 6D 70 71 08 55 U.X-Timestampq.U
+00A0 10 31 36 38 32 39 36 32 36 35 31 2E 39 37 34 39 .1682962651.9749
+00B0 34 55 11 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 61 4U.X-Object-Meta
+00C0 2D E2 98 83 55 03 E2 98 83 71 09 55 0C 43 6F 6E -...U....q.U.Con
+00D0 74 65 6E 74 2D 54 79 70 65 71 0A 55 18 61 70 70 tent-Typeq.U.app
+00E0 6C 69 63 61 74 69 6F 6E 2F 6F 63 74 65 74 2D 73 lication/octet-s
+00F0 74 72 65 61 6D 71 0B 75 2E treamq.u.
+ ''',
+ 'python_2.7.18_swift_2.13_ec': '''
+0000 80 02 7D 71 01 28 55 0E 43 6F 6E 74 65 6E 74 2D ..}q.(U.Content-
+0010 4C 65 6E 67 74 68 71 02 55 02 38 34 55 04 6E 61 Lengthq.U.84U.na
+0020 6D 65 71 03 55 12 2F 41 55 54 48 5F 74 65 73 74 meq.U./AUTH_test
+0030 2F E2 98 83 2F E2 98 83 71 04 58 1E 00 00 00 58 /.../...q.X....X
+0040 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 2D -Object-Sysmeta-
+0050 45 63 2D 46 72 61 67 2D 49 6E 64 65 78 71 05 55 Ec-Frag-Indexq.U
+0060 01 35 55 13 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 .5U.X-Object-Met
+0070 61 2D 4D 74 69 6D 65 55 11 31 36 38 32 39 35 39 a-MtimeU.1682959
+0080 38 37 34 2E 37 35 36 32 30 35 71 06 58 22 00 00 874.756205q.X"..
+0090 00 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 .X-Object-Sysmet
+00A0 61 2D 45 63 2D 43 6F 6E 74 65 6E 74 2D 4C 65 6E a-Ec-Content-Len
+00B0 67 74 68 71 07 55 02 31 33 71 08 58 18 00 00 00 gthq.U.13q.X....
+00C0 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 X-Object-Sysmeta
+00D0 2D 45 63 2D 45 74 61 67 71 09 55 20 36 62 37 64 -Ec-Etagq.U 6b7d
+00E0 39 61 31 63 35 64 31 36 37 63 63 35 30 30 33 37 9a1c5d167cc50037
+00F0 66 32 39 66 32 39 30 62 62 33 37 35 71 0A 55 04 f29f290bb375q.U.
+0100 45 54 61 67 71 0B 55 20 65 32 66 64 34 33 30 65 ETagq.U e2fd430e
+0110 61 66 37 32 32 33 63 32 35 30 33 63 34 65 38 33 af7223c2503c4e83
+0120 30 31 63 66 66 33 37 63 71 0C 55 0B 58 2D 54 69 01cff37cq.U.X-Ti
+0130 6D 65 73 74 61 6D 70 71 0D 55 10 31 36 38 32 39 mestampq.U.16829
+0140 36 32 32 36 32 2E 31 36 31 39 39 55 11 58 2D 4F 62262.16199U.X-O
+0150 62 6A 65 63 74 2D 4D 65 74 61 2D E2 98 83 55 03 bject-Meta-...U.
+0160 E2 98 83 71 0E 58 1A 00 00 00 58 2D 4F 62 6A 65 ...q.X....X-Obje
+0170 63 74 2D 53 79 73 6D 65 74 61 2D 45 63 2D 53 63 ct-Sysmeta-Ec-Sc
+0180 68 65 6D 65 71 0F 55 1A 6C 69 62 65 72 61 73 75 hemeq.U.liberasu
+0190 72 65 63 6F 64 65 5F 72 73 5F 76 61 6E 64 20 34 recode_rs_vand 4
+01A0 2B 32 71 10 55 0C 43 6F 6E 74 65 6E 74 2D 54 79 +2q.U.Content-Ty
+01B0 70 65 71 11 55 18 61 70 70 6C 69 63 61 74 69 6F peq.U.applicatio
+01C0 6E 2F 6F 63 74 65 74 2D 73 74 72 65 61 6D 71 12 n/octet-streamq.
+01D0 58 20 00 00 00 58 2D 4F 62 6A 65 63 74 2D 53 79 X ...X-Object-Sy
+01E0 73 6D 65 74 61 2D 45 63 2D 53 65 67 6D 65 6E 74 smeta-Ec-Segment
+01F0 2D 53 69 7A 65 71 13 55 07 31 30 34 38 35 37 36 -Sizeq.U.1048576
+0200 71 14 75 2E q.u.
+ ''',
+ 'python_2.7.18_swift_2.23_replicated': '''
+0000 80 02 7D 71 01 28 55 0E 43 6F 6E 74 65 6E 74 2D ..}q.(U.Content-
+0010 4C 65 6E 67 74 68 71 02 55 02 31 33 71 03 55 04 Lengthq.U.13q.U.
+0020 6E 61 6D 65 71 04 55 12 2F 41 55 54 48 5F 74 65 nameq.U./AUTH_te
+0030 73 74 2F E2 98 83 2F E2 98 83 71 05 55 0C 43 6F st/.../...q.U.Co
+0040 6E 74 65 6E 74 2D 54 79 70 65 71 06 55 18 61 70 ntent-Typeq.U.ap
+0050 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 74 65 74 2D plication/octet-
+0060 73 74 72 65 61 6D 71 07 55 04 45 54 61 67 71 08 streamq.U.ETagq.
+0070 55 20 36 62 37 64 39 61 31 63 35 64 31 36 37 63 U 6b7d9a1c5d167c
+0080 63 35 30 30 33 37 66 32 39 66 32 39 30 62 62 33 c50037f29f290bb3
+0090 37 35 71 09 55 0B 58 2D 54 69 6D 65 73 74 61 6D 75q.U.X-Timestam
+00A0 70 71 0A 55 10 31 36 38 32 39 36 33 32 30 39 2E pq.U.1682963209.
+00B0 38 32 32 37 32 71 0B 55 11 58 2D 4F 62 6A 65 63 82272q.U.X-Objec
+00C0 74 2D 4D 65 74 61 2D E2 98 83 71 0C 55 03 E2 98 t-Meta-...q.U...
+00D0 83 71 0D 55 13 58 2D 4F 62 6A 65 63 74 2D 4D 65 .q.U.X-Object-Me
+00E0 74 61 2D 4D 74 69 6D 65 71 0E 55 11 31 36 38 32 ta-Mtimeq.U.1682
+00F0 39 35 39 38 37 34 2E 37 35 36 32 30 35 71 0F 75 959874.756205q.u
+0100 2E .
+ ''',
+ 'python_3.10.6_swift_2.23_replicated': '''
+0000 80 02 7D 71 00 28 63 5F 63 6F 64 65 63 73 0A 65 ..}q.(c_codecs.e
+0010 6E 63 6F 64 65 0A 71 01 58 0B 00 00 00 58 2D 54 ncode.q.X....X-T
+0020 69 6D 65 73 74 61 6D 70 71 02 58 06 00 00 00 6C imestampq.X....l
+0030 61 74 69 6E 31 71 03 86 71 04 52 71 05 68 01 58 atin1q..q.Rq.h.X
+0040 10 00 00 00 31 36 38 32 39 36 33 30 31 37 2E 31 ....1682963017.1
+0050 30 34 37 32 71 06 68 03 86 71 07 52 71 08 68 01 0472q.h..q.Rq.h.
+0060 58 0C 00 00 00 43 6F 6E 74 65 6E 74 2D 54 79 70 X....Content-Typ
+0070 65 71 09 68 03 86 71 0A 52 71 0B 68 01 58 18 00 eq.h..q.Rq.h.X..
+0080 00 00 61 70 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 ..application/oc
+0090 74 65 74 2D 73 74 72 65 61 6D 71 0C 68 03 86 71 tet-streamq.h..q
+00A0 0D 52 71 0E 68 01 58 0E 00 00 00 43 6F 6E 74 65 .Rq.h.X....Conte
+00B0 6E 74 2D 4C 65 6E 67 74 68 71 0F 68 03 86 71 10 nt-Lengthq.h..q.
+00C0 52 71 11 68 01 58 02 00 00 00 31 33 71 12 68 03 Rq.h.X....13q.h.
+00D0 86 71 13 52 71 14 68 01 58 04 00 00 00 45 54 61 .q.Rq.h.X....ETa
+00E0 67 71 15 68 03 86 71 16 52 71 17 68 01 58 20 00 gq.h..q.Rq.h.X .
+00F0 00 00 36 62 37 64 39 61 31 63 35 64 31 36 37 63 ..6b7d9a1c5d167c
+0100 63 35 30 30 33 37 66 32 39 66 32 39 30 62 62 33 c50037f29f290bb3
+0110 37 35 71 18 68 03 86 71 19 52 71 1A 68 01 58 13 75q.h..q.Rq.h.X.
+0120 00 00 00 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 61 ...X-Object-Meta
+0130 2D 4D 74 69 6D 65 71 1B 68 03 86 71 1C 52 71 1D -Mtimeq.h..q.Rq.
+0140 68 01 58 11 00 00 00 31 36 38 32 39 35 39 38 37 h.X....168295987
+0150 34 2E 37 35 36 32 30 35 71 1E 68 03 86 71 1F 52 4.756205q.h..q.R
+0160 71 20 68 01 58 1A 00 00 00 58 2D 4F 62 6A 65 63 q h.X....X-Objec
+0170 74 2D 4D 65 74 61 2D C3 83 C2 A2 C3 82 C2 98 C3 t-Meta-.........
+0180 82 C2 83 71 21 68 03 86 71 22 52 71 23 68 01 58 ...q!h..q"Rq#h.X
+0190 0C 00 00 00 C3 83 C2 A2 C3 82 C2 98 C3 82 C2 83 ................
+01A0 71 24 68 03 86 71 25 52 71 26 68 01 58 04 00 00 q$h..q%Rq&h.X...
+01B0 00 6E 61 6D 65 71 27 68 03 86 71 28 52 71 29 68 .nameq'h..q(Rq)h
+01C0 01 58 18 00 00 00 2F 41 55 54 48 5F 74 65 73 74 .X..../AUTH_test
+01D0 2F C3 A2 C2 98 C2 83 2F C3 A2 C2 98 C2 83 71 2A /....../......q*
+01E0 68 03 86 71 2B 52 71 2C 75 2E h..q+Rq,u.
+ ''',
+ 'python_2.7.18_swift_2.23_ec': '''
+0000 80 02 7D 71 01 28 55 0E 43 6F 6E 74 65 6E 74 2D ..}q.(U.Content-
+0010 4C 65 6E 67 74 68 71 02 55 02 38 34 71 03 55 04 Lengthq.U.84q.U.
+0020 6E 61 6D 65 71 04 55 12 2F 41 55 54 48 5F 74 65 nameq.U./AUTH_te
+0030 73 74 2F E2 98 83 2F E2 98 83 71 05 55 1E 58 2D st/.../...q.U.X-
+0040 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 2D 45 Object-Sysmeta-E
+0050 63 2D 46 72 61 67 2D 49 6E 64 65 78 55 01 35 55 c-Frag-IndexU.5U
+0060 0C 43 6F 6E 74 65 6E 74 2D 54 79 70 65 71 06 55 .Content-Typeq.U
+0070 18 61 70 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 74 .application/oct
+0080 65 74 2D 73 74 72 65 61 6D 71 07 55 22 58 2D 4F et-streamq.U"X-O
+0090 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 2D 45 63 bject-Sysmeta-Ec
+00A0 2D 43 6F 6E 74 65 6E 74 2D 4C 65 6E 67 74 68 55 -Content-LengthU
+00B0 02 31 33 71 08 55 18 58 2D 4F 62 6A 65 63 74 2D .13q.U.X-Object-
+00C0 53 79 73 6D 65 74 61 2D 45 63 2D 45 74 61 67 55 Sysmeta-Ec-EtagU
+00D0 20 36 62 37 64 39 61 31 63 35 64 31 36 37 63 63 6b7d9a1c5d167cc
+00E0 35 30 30 33 37 66 32 39 66 32 39 30 62 62 33 37 50037f29f290bb37
+00F0 35 71 09 55 04 45 54 61 67 71 0A 55 20 65 32 66 5q.U.ETagq.U e2f
+0100 64 34 33 30 65 61 66 37 32 32 33 63 32 35 30 33 d430eaf7223c2503
+0110 63 34 65 38 33 30 31 63 66 66 33 37 63 71 0B 55 c4e8301cff37cq.U
+0120 0B 58 2D 54 69 6D 65 73 74 61 6D 70 71 0C 55 10 .X-Timestampq.U.
+0130 31 36 38 32 39 36 33 31 33 30 2E 33 35 39 38 36 1682963130.35986
+0140 71 0D 55 11 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 q.U.X-Object-Met
+0150 61 2D E2 98 83 71 0E 55 03 E2 98 83 71 0F 55 1A a-...q.U....q.U.
+0160 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 61 X-Object-Sysmeta
+0170 2D 45 63 2D 53 63 68 65 6D 65 55 1A 6C 69 62 65 -Ec-SchemeU.libe
+0180 72 61 73 75 72 65 63 6F 64 65 5F 72 73 5F 76 61 rasurecode_rs_va
+0190 6E 64 20 34 2B 32 71 10 55 13 58 2D 4F 62 6A 65 nd 4+2q.U.X-Obje
+01A0 63 74 2D 4D 65 74 61 2D 4D 74 69 6D 65 71 11 55 ct-Meta-Mtimeq.U
+01B0 11 31 36 38 32 39 35 39 38 37 34 2E 37 35 36 32 .1682959874.7562
+01C0 30 35 71 12 55 20 58 2D 4F 62 6A 65 63 74 2D 53 05q.U X-Object-S
+01D0 79 73 6D 65 74 61 2D 45 63 2D 53 65 67 6D 65 6E ysmeta-Ec-Segmen
+01E0 74 2D 53 69 7A 65 55 07 31 30 34 38 35 37 36 71 t-SizeU.1048576q
+01F0 13 75 2E .u.
+ ''',
+ 'python_3.10.6_swift_2.23_ec': '''
+0000 80 02 7D 71 00 28 63 5F 63 6F 64 65 63 73 0A 65 ..}q.(c_codecs.e
+0010 6E 63 6F 64 65 0A 71 01 58 0B 00 00 00 58 2D 54 ncode.q.X....X-T
+0020 69 6D 65 73 74 61 6D 70 71 02 58 06 00 00 00 6C imestampq.X....l
+0030 61 74 69 6E 31 71 03 86 71 04 52 71 05 68 01 58 atin1q..q.Rq.h.X
+0040 10 00 00 00 31 36 38 32 39 36 32 39 35 35 2E 33 ....1682962955.3
+0050 37 35 34 36 71 06 68 03 86 71 07 52 71 08 68 01 7546q.h..q.Rq.h.
+0060 58 0C 00 00 00 43 6F 6E 74 65 6E 74 2D 54 79 70 X....Content-Typ
+0070 65 71 09 68 03 86 71 0A 52 71 0B 68 01 58 18 00 eq.h..q.Rq.h.X..
+0080 00 00 61 70 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 ..application/oc
+0090 74 65 74 2D 73 74 72 65 61 6D 71 0C 68 03 86 71 tet-streamq.h..q
+00A0 0D 52 71 0E 68 01 58 0E 00 00 00 43 6F 6E 74 65 .Rq.h.X....Conte
+00B0 6E 74 2D 4C 65 6E 67 74 68 71 0F 68 03 86 71 10 nt-Lengthq.h..q.
+00C0 52 71 11 68 01 58 02 00 00 00 38 34 71 12 68 03 Rq.h.X....84q.h.
+00D0 86 71 13 52 71 14 68 01 58 04 00 00 00 45 54 61 .q.Rq.h.X....ETa
+00E0 67 71 15 68 03 86 71 16 52 71 17 68 01 58 20 00 gq.h..q.Rq.h.X .
+00F0 00 00 65 32 66 64 34 33 30 65 61 66 37 32 32 33 ..e2fd430eaf7223
+0100 63 32 35 30 33 63 34 65 38 33 30 31 63 66 66 33 c2503c4e8301cff3
+0110 37 63 71 18 68 03 86 71 19 52 71 1A 68 01 58 13 7cq.h..q.Rq.h.X.
+0120 00 00 00 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 61 ...X-Object-Meta
+0130 2D 4D 74 69 6D 65 71 1B 68 03 86 71 1C 52 71 1D -Mtimeq.h..q.Rq.
+0140 68 01 58 11 00 00 00 31 36 38 32 39 35 39 38 37 h.X....168295987
+0150 34 2E 37 35 36 32 30 35 71 1E 68 03 86 71 1F 52 4.756205q.h..q.R
+0160 71 20 68 01 58 1A 00 00 00 58 2D 4F 62 6A 65 63 q h.X....X-Objec
+0170 74 2D 4D 65 74 61 2D C3 83 C2 A2 C3 82 C2 98 C3 t-Meta-.........
+0180 82 C2 83 71 21 68 03 86 71 22 52 71 23 68 01 58 ...q!h..q"Rq#h.X
+0190 0C 00 00 00 C3 83 C2 A2 C3 82 C2 98 C3 82 C2 83 ................
+01A0 71 24 68 03 86 71 25 52 71 26 68 01 58 18 00 00 q$h..q%Rq&h.X...
+01B0 00 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 .X-Object-Sysmet
+01C0 61 2D 45 63 2D 45 74 61 67 71 27 68 03 86 71 28 a-Ec-Etagq'h..q(
+01D0 52 71 29 68 01 58 20 00 00 00 36 62 37 64 39 61 Rq)h.X ...6b7d9a
+01E0 31 63 35 64 31 36 37 63 63 35 30 30 33 37 66 32 1c5d167cc50037f2
+01F0 39 66 32 39 30 62 62 33 37 35 71 2A 68 03 86 71 9f290bb375q*h..q
+0200 2B 52 71 2C 68 01 58 22 00 00 00 58 2D 4F 62 6A +Rq,h.X"...X-Obj
+0210 65 63 74 2D 53 79 73 6D 65 74 61 2D 45 63 2D 43 ect-Sysmeta-Ec-C
+0220 6F 6E 74 65 6E 74 2D 4C 65 6E 67 74 68 71 2D 68 ontent-Lengthq-h
+0230 03 86 71 2E 52 71 2F 68 01 58 02 00 00 00 31 33 ..q.Rq/h.X....13
+0240 71 30 68 03 86 71 31 52 71 32 68 01 58 1E 00 00 q0h..q1Rq2h.X...
+0250 00 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 74 .X-Object-Sysmet
+0260 61 2D 45 63 2D 46 72 61 67 2D 49 6E 64 65 78 71 a-Ec-Frag-Indexq
+0270 33 68 03 86 71 34 52 71 35 68 01 58 01 00 00 00 3h..q4Rq5h.X....
+0280 35 71 36 68 03 86 71 37 52 71 38 68 01 58 1A 00 5q6h..q7Rq8h.X..
+0290 00 00 58 2D 4F 62 6A 65 63 74 2D 53 79 73 6D 65 ..X-Object-Sysme
+02A0 74 61 2D 45 63 2D 53 63 68 65 6D 65 71 39 68 03 ta-Ec-Schemeq9h.
+02B0 86 71 3A 52 71 3B 68 01 58 1A 00 00 00 6C 69 62 .q:Rq;h.X....lib
+02C0 65 72 61 73 75 72 65 63 6F 64 65 5F 72 73 5F 76 erasurecode_rs_v
+02D0 61 6E 64 20 34 2B 32 71 3C 68 03 86 71 3D 52 71 and 4+2q<h..q=Rq
+02E0 3E 68 01 58 20 00 00 00 58 2D 4F 62 6A 65 63 74 >h.X ...X-Object
+02F0 2D 53 79 73 6D 65 74 61 2D 45 63 2D 53 65 67 6D -Sysmeta-Ec-Segm
+0300 65 6E 74 2D 53 69 7A 65 71 3F 68 03 86 71 40 52 ent-Sizeq?h..q@R
+0310 71 41 68 01 58 07 00 00 00 31 30 34 38 35 37 36 qAh.X....1048576
+0320 71 42 68 03 86 71 43 52 71 44 68 01 58 04 00 00 qBh..qCRqDh.X...
+0330 00 6E 61 6D 65 71 45 68 03 86 71 46 52 71 47 68 .nameqEh..qFRqGh
+0340 01 58 18 00 00 00 2F 41 55 54 48 5F 74 65 73 74 .X..../AUTH_test
+0350 2F C3 A2 C2 98 C2 83 2F C3 A2 C2 98 C2 83 71 48 /....../......qH
+0360 68 03 86 71 49 52 71 4A 75 2E h..qIRqJu.
+ ''',
+ 'python3.8.10_swift_2.31.1_replicated': '''
+0000 80 02 7D 71 00 28 63 5F 63 6F 64 65 63 73 0A 65 ..}q.(c_codecs.e
+0010 6E 63 6F 64 65 0A 71 01 58 0B 00 00 00 58 2D 54 ncode.q.X....X-T
+0020 69 6D 65 73 74 61 6D 70 71 02 58 06 00 00 00 6C imestampq.X....l
+0030 61 74 69 6E 31 71 03 86 71 04 52 71 05 68 01 58 atin1q..q.Rq.h.X
+0040 10 00 00 00 31 36 38 33 30 36 35 34 37 38 2E 32 ....1683065478.2
+0050 35 30 30 34 71 06 68 03 86 71 07 52 71 08 68 01 5004q.h..q.Rq.h.
+0060 58 0C 00 00 00 43 6F 6E 74 65 6E 74 2D 54 79 70 X....Content-Typ
+0070 65 71 09 68 03 86 71 0A 52 71 0B 68 01 58 18 00 eq.h..q.Rq.h.X..
+0080 00 00 61 70 70 6C 69 63 61 74 69 6F 6E 2F 6F 63 ..application/oc
+0090 74 65 74 2D 73 74 72 65 61 6D 71 0C 68 03 86 71 tet-streamq.h..q
+00A0 0D 52 71 0E 68 01 58 0E 00 00 00 43 6F 6E 74 65 .Rq.h.X....Conte
+00B0 6E 74 2D 4C 65 6E 67 74 68 71 0F 68 03 86 71 10 nt-Lengthq.h..q.
+00C0 52 71 11 68 01 58 01 00 00 00 38 71 12 68 03 86 Rq.h.X....8q.h..
+00D0 71 13 52 71 14 68 01 58 04 00 00 00 45 54 61 67 q.Rq.h.X....ETag
+00E0 71 15 68 03 86 71 16 52 71 17 68 01 58 20 00 00 q.h..q.Rq.h.X ..
+00F0 00 37 30 63 31 64 62 35 36 66 33 30 31 63 39 65 .70c1db56f301c9e
+0100 33 33 37 62 30 30 39 39 62 64 34 31 37 34 62 32 337b0099bd4174b2
+0110 38 71 18 68 03 86 71 19 52 71 1A 68 01 58 13 00 8q.h..q.Rq.h.X..
+0120 00 00 58 2D 4F 62 6A 65 63 74 2D 4D 65 74 61 2D ..X-Object-Meta-
+0130 4D 74 69 6D 65 71 1B 68 03 86 71 1C 52 71 1D 68 Mtimeq.h..q.Rq.h
+0140 01 58 11 00 00 00 31 36 38 33 30 36 34 39 33 38 .X....1683064938
+0150 2E 36 39 39 30 32 37 71 1E 68 03 86 71 1F 52 71 .699027q.h..q.Rq
+0160 20 68 01 58 1A 00 00 00 58 2D 4F 62 6A 65 63 74 h.X....X-Object
+0170 2D 4D 65 74 61 2D C3 83 C2 A2 C3 82 C2 98 C3 82 -Meta-..........
+0180 C2 83 71 21 68 03 86 71 22 52 71 23 68 01 58 0C ..q!h..q"Rq#h.X.
+0190 00 00 00 C3 83 C2 A2 C3 82 C2 98 C3 82 C2 83 71 ...............q
+01A0 24 68 03 86 71 25 52 71 26 68 01 58 04 00 00 00 $h..q%Rq&h.X....
+01B0 6E 61 6D 65 71 27 68 03 86 71 28 52 71 29 68 01 nameq'h..q(Rq)h.
+01C0 58 18 00 00 00 2F 41 55 54 48 5F 74 65 73 74 2F X..../AUTH_test/
+01D0 C3 A2 C2 98 C2 83 2F C3 A2 C2 98 C2 83 71 2A 68 ....../......q*h
+01E0 03 86 71 2B 52 71 2C 75 2E ..q+Rq,u.
+ ''',
+ }
+
+ def raw_xattr(output):
+ return binascii.unhexlify(''.join(
+ line[7:55] for line in output.split('\n')
+ ).replace(' ', ''))
+
+ path = os.path.join(self.testdir, str(uuid.uuid4()))
+ for case, xattr_output in cases.items():
+ try:
+ to_write = raw_xattr(xattr_output)
+ with open(path, 'wb') as fp:
+ xattr.setxattr(
+ fp.fileno(), 'user.swift.metadata', to_write)
+ with open(path, 'rb') as fd:
+ actual = diskfile.read_metadata(fd)
+ # name should come out as native strings
+ expected_name = b'/AUTH_test/\xe2\x98\x83/\xe2\x98\x83'
+ if not six.PY2:
+ expected_name = expected_name.decode('utf8')
+ self.assertEqual(actual['name'], expected_name)
+ # other meta will be WSGI strings, though
+ self.assertEqual(
+ actual['X-Object-Meta-\xe2\x98\x83'], '\xe2\x98\x83')
+ except Exception:
+ print('Failure in %s' % case, file=sys.stderr)
+ raise
+
def test_write_read_metadata(self):
path = os.path.join(self.testdir, str(uuid.uuid4()))
metadata = {'name': '/a/c/o',
@@ -596,29 +860,6 @@ class TestDiskFileModuleMethods(unittest.TestCase):
diskfile.write_metadata(fd, metadata)
check_metadata(as_native, str)
- # mock the read path to check the write path encoded persisted metadata
- with mock.patch.object(diskfile, '_decode_metadata', lambda x: x):
- check_metadata(as_bytes, bytes)
-
- # simulate a legacy diskfile that might have persisted
- # (some) unicode metadata
- with mock.patch.object(diskfile, '_encode_metadata', lambda x: x):
- with open(path, 'wb') as fd:
- diskfile.write_metadata(fd, metadata)
- # sanity check: mock read path again to see that we did persist unicode
- with mock.patch.object(diskfile, '_decode_metadata', lambda x: x):
- with open(path, 'rb') as fd:
- actual = diskfile.read_metadata(fd)
- for k, v in actual.items():
- if isinstance(k, six.text_type) and \
- k == u'X-Object-Meta-Strange':
- self.assertIsInstance(v, six.text_type)
- break
- else:
- self.fail('Did not find X-Object-Meta-Strange')
- # check that read_metadata converts binary_type
- check_metadata(as_native, str)
-
@patch_policies
class TestObjectAuditLocationGenerator(unittest.TestCase):
@@ -2809,7 +3050,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, BaseTestCase):
def test_cleanup_ondisk_files_commit_window(self):
# verify that non-durable files are not reclaimed regardless of
# timestamp if written to disk within commit_window
- much_older = Timestamp(time() - 1001).internal
+ much_older = Timestamp(time() - 2000).internal
older = Timestamp(time() - 1001).internal
newer = Timestamp(time() - 900).internal
scenarios = [
@@ -4748,7 +4989,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
# This is a horrible hack so you can run this test in isolation.
# Some of the ctypes machinery calls os.close(), and that runs afoul
# of our mock.
- with mock.patch.object(utils, '_sys_fallocate', None):
+ with mock.patch.object(libc, '_sys_fallocate', None):
utils.disable_fallocate()
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc',
diff --git a/test/unit/obj/test_ssync.py b/test/unit/obj/test_ssync.py
index 5db107461..d5da5a6e4 100644
--- a/test/unit/obj/test_ssync.py
+++ b/test/unit/obj/test_ssync.py
@@ -1437,19 +1437,21 @@ class TestSsyncReplication(TestBaseSsync):
rx_objs['o2'] = self._create_ondisk_files(rx_df_mgr, 'o2', policy, t2)
expected_subreqs['POST'].append('o2')
- # o3 is on tx with meta, rx has newer data but no meta
+ # o3 is on tx with meta, rx has newer data but no meta,
+ # meta timestamp has an offset
t3a = next(self.ts_iter)
tx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3a)
t3b = next(self.ts_iter)
rx_objs['o3'] = self._create_ondisk_files(rx_df_mgr, 'o3', policy, t3b)
t3_meta = next(self.ts_iter)
+ t3_meta = utils.Timestamp(t3_meta, offset=2)
metadata = {'X-Timestamp': t3_meta.internal,
'X-Object-Meta-Test': 'o3',
'X-Object-Sysmeta-Test': 'sys_o3'}
tx_objs['o3'][0].write_metadata(metadata)
expected_subreqs['POST'].append('o3')
- # o4 is on tx with meta, rx has older data and up to date meta
+ # o4 is on tx with meta, rx has older data and up to date meta,
t4a = next(self.ts_iter)
rx_objs['o4'] = self._create_ondisk_files(rx_df_mgr, 'o4', policy, t4a)
t4b = next(self.ts_iter)
@@ -1464,6 +1466,7 @@ class TestSsyncReplication(TestBaseSsync):
# o5 is on tx with meta, rx is in sync with data and meta
t5 = next(self.ts_iter)
+ t5 = utils.Timestamp(t5, offset=1) # note: use an offset for this test
rx_objs['o5'] = self._create_ondisk_files(rx_df_mgr, 'o5', policy, t5)
tx_objs['o5'] = self._create_ondisk_files(tx_df_mgr, 'o5', policy, t5)
t5_meta = next(self.ts_iter)
@@ -1498,6 +1501,25 @@ class TestSsyncReplication(TestBaseSsync):
tx_objs['o7'][0].write_metadata(metadata)
rx_tombstones['o7'][0].delete(next(self.ts_iter))
+ # o8 is on tx with meta, rx has in sync data but meta with different
+ # offset
+ t8 = next(self.ts_iter)
+ rx_objs['o8'] = self._create_ondisk_files(rx_df_mgr, 'o8', policy, t8)
+ tx_objs['o8'] = self._create_ondisk_files(tx_df_mgr, 'o8', policy, t8)
+ t8_meta = next(self.ts_iter)
+ t8_meta_offset = utils.Timestamp(t8_meta, offset=4)
+ metadata = {'X-Timestamp': t8_meta_offset.internal,
+ 'X-Object-Meta-Test': 'o8',
+ 'X-Object-Sysmeta-Test': 'sys_o8'}
+ tx_objs['o8'][0].write_metadata(metadata)
+ # different ts_meta offset on rx
+ t8_meta_offset = utils.Timestamp(t8_meta, offset=3)
+ metadata = {'X-Timestamp': t8_meta_offset.internal,
+ 'X-Object-Meta-Test': 'o8',
+ 'X-Object-Sysmeta-Test': 'sys_o8'}
+ rx_objs['o8'][0].write_metadata(metadata)
+ expected_subreqs['POST'].append('o8')
+
suffixes = set()
for diskfiles in list(tx_objs.values()) + list(tx_tombstones.values()):
for df in diskfiles:
@@ -1515,13 +1537,13 @@ class TestSsyncReplication(TestBaseSsync):
# run the sync protocol...
success, in_sync_objs = sender()
- self.assertEqual(7, len(in_sync_objs))
+ self.assertEqual(8, len(in_sync_objs))
self.assertTrue(success)
# verify protocol
results = self._analyze_trace(trace)
- self.assertEqual(7, len(results['tx_missing']))
- self.assertEqual(5, len(results['rx_missing']))
+ self.assertEqual(8, len(results['tx_missing']))
+ self.assertEqual(6, len(results['rx_missing']))
for subreq in results.get('tx_updates'):
obj = subreq['path'].split('/')[3]
method = subreq['method']
diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py
index b943e9ac6..709392744 100644
--- a/test/unit/obj/test_ssync_receiver.py
+++ b/test/unit/obj/test_ssync_receiver.py
@@ -2524,6 +2524,24 @@ class TestModuleMethods(unittest.TestCase):
self.assertEqual(
expected, ssync_receiver.decode_missing(msg.encode('ascii')))
+ # timestamps have offsets
+ t_data_offset = utils.Timestamp(t_data, offset=99)
+ t_meta_offset = utils.Timestamp(t_meta, offset=1)
+ t_ctype_offset = utils.Timestamp(t_ctype, offset=2)
+ expected = dict(object_hash=object_hash,
+ ts_data=t_data_offset,
+ ts_meta=t_meta_offset,
+ ts_ctype=t_ctype_offset,
+ durable=True)
+ expected = ('%s %s_0000000000000063 m:%x__1,t:%x__2'
+ % (object_hash, t_data.internal, d_meta_data,
+ d_ctype_data))
+ self.assertEqual(
+ expected.encode('ascii'),
+ ssync_sender.encode_missing(
+ object_hash, t_data_offset, t_meta_offset, t_ctype_offset,
+ durable=True))
+
# hex content type delta may be zero
msg = '%s %s t:0,m:%x' % (object_hash, t_data.internal, d_meta_data)
expected = dict(object_hash=object_hash,
diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py
index a5eb203c2..d577f6867 100644
--- a/test/unit/obj/test_ssync_sender.py
+++ b/test/unit/obj/test_ssync_sender.py
@@ -2040,6 +2040,19 @@ class TestModuleMethods(unittest.TestCase):
ssync_sender.encode_missing(object_hash, t_data, t_meta, t_type,
durable=True))
+ # timestamps have offsets
+ t_data_offset = utils.Timestamp(t_data, offset=99)
+ t_meta_offset = utils.Timestamp(t_meta, offset=1)
+ t_type_offset = utils.Timestamp(t_type, offset=2)
+ expected = ('%s %s m:%x__1,t:%x__2'
+ % (object_hash, t_data_offset.internal, d_meta_data,
+ d_type_data))
+ self.assertEqual(
+ expected.encode('ascii'),
+ ssync_sender.encode_missing(
+ object_hash, t_data_offset, t_meta_offset, t_type_offset,
+ durable=True))
+
# test encode and decode functions invert
expected = {'object_hash': object_hash, 'ts_meta': t_meta,
'ts_data': t_data, 'ts_ctype': t_type, 'durable': False}
@@ -2053,6 +2066,38 @@ class TestModuleMethods(unittest.TestCase):
actual = ssync_receiver.decode_missing(msg)
self.assertEqual(expected, actual)
+ # test encode and decode functions invert with offset
+ t_data_offset = utils.Timestamp(t_data, offset=1)
+ expected = {'object_hash': object_hash, 'ts_meta': t_meta,
+ 'ts_data': t_data_offset, 'ts_ctype': t_type,
+ 'durable': False}
+ msg = ssync_sender.encode_missing(**expected)
+ actual = ssync_receiver.decode_missing(msg)
+ self.assertEqual(expected, actual)
+
+ t_meta_offset = utils.Timestamp(t_data, offset=2)
+ expected = {'object_hash': object_hash, 'ts_meta': t_meta_offset,
+ 'ts_data': t_data, 'ts_ctype': t_type,
+ 'durable': False}
+ msg = ssync_sender.encode_missing(**expected)
+ actual = ssync_receiver.decode_missing(msg)
+ self.assertEqual(expected, actual)
+
+ t_type_offset = utils.Timestamp(t_type, offset=3)
+ expected = {'object_hash': object_hash, 'ts_meta': t_meta,
+ 'ts_data': t_data, 'ts_ctype': t_type_offset,
+ 'durable': False}
+ msg = ssync_sender.encode_missing(**expected)
+ actual = ssync_receiver.decode_missing(msg)
+ self.assertEqual(expected, actual)
+
+ expected = {'object_hash': object_hash, 'ts_meta': t_meta_offset,
+ 'ts_data': t_data_offset, 'ts_ctype': t_type_offset,
+ 'durable': False}
+ msg = ssync_sender.encode_missing(**expected)
+ actual = ssync_receiver.decode_missing(msg)
+ self.assertEqual(expected, actual)
+
def test_decode_wanted(self):
parts = ['d']
expected = {'data': True}
diff --git a/test/unit/obj/test_updater.py b/test/unit/obj/test_updater.py
index 941ec3dc3..1629e0ac2 100644
--- a/test/unit/obj/test_updater.py
+++ b/test/unit/obj/test_updater.py
@@ -12,10 +12,9 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from queue import PriorityQueue
-
import eventlet
import six.moves.cPickle as pickle
+from six.moves.queue import PriorityQueue
import mock
import os
import unittest
diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py
index 73d61c6ef..c5004bc12 100644
--- a/test/unit/proxy/controllers/test_base.py
+++ b/test/unit/proxy/controllers/test_base.py
@@ -499,7 +499,7 @@ class TestFuncs(BaseTest):
expected)
self.assertEqual(get_cache_key("account", "cont", shard="listing"),
- 'shard-listing/account/cont')
+ 'shard-listing-v2/account/cont')
self.assertEqual(get_cache_key("account", "cont", shard="updating"),
'shard-updating-v2/account/cont')
self.assertRaises(ValueError,
@@ -1155,17 +1155,74 @@ class TestFuncs(BaseTest):
base = Controller(self.app)
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
+ 'x-base-sysmeta-mysysmeta': 'myvalue',
+ 'x-Backend-No-Timestamp-Update': 'true',
+ 'X-Backend-Storage-Policy-Index': '3',
+ 'x-backendoftheworld': 'ignored',
'new-owner': 'Kun'}
req = Request.blank('/v1/a/c/o', headers=src_headers)
+ dst_headers = base.generate_request_headers(req)
+ expected_headers = {'x-backend-no-timestamp-update': 'true',
+ 'x-backend-storage-policy-index': '3',
+ 'x-timestamp': mock.ANY,
+ 'x-trans-id': '-',
+ 'Referer': 'GET http://localhost/v1/a/c/o',
+ 'connection': 'close',
+ 'user-agent': 'proxy-server %d' % os.getpid()}
+ for k, v in expected_headers.items():
+ self.assertIn(k, dst_headers)
+ self.assertEqual(v, dst_headers[k])
+ for k, v in expected_headers.items():
+ dst_headers.pop(k)
+ self.assertFalse(dst_headers)
+
+ # with transfer=True
+ req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(req, transfer=True)
- expected_headers = {'x-base-meta-owner': '',
- 'x-base-meta-size': '151M',
+ expected_headers.update({'x-base-meta-owner': '',
+ 'x-base-meta-size': '151M',
+ 'x-base-sysmeta-mysysmeta': 'myvalue'})
+ for k, v in expected_headers.items():
+ self.assertIn(k, dst_headers)
+ self.assertEqual(v, dst_headers[k])
+ for k, v in expected_headers.items():
+ dst_headers.pop(k)
+ self.assertFalse(dst_headers)
+
+ # with additional
+ req = Request.blank('/v1/a/c/o', headers=src_headers)
+ dst_headers = base.generate_request_headers(
+ req, transfer=True,
+ additional=src_headers)
+ expected_headers.update({'x-remove-base-meta-owner': 'x',
+ 'x-backendoftheworld': 'ignored',
+ 'new-owner': 'Kun'})
+ for k, v in expected_headers.items():
+ self.assertIn(k, dst_headers)
+ self.assertEqual(v, dst_headers[k])
+ for k, v in expected_headers.items():
+ dst_headers.pop(k)
+ self.assertFalse(dst_headers)
+
+ # with additional, verify precedence
+ req = Request.blank('/v1/a/c/o', headers=src_headers)
+ dst_headers = base.generate_request_headers(
+ req, transfer=False,
+ additional={'X-Backend-Storage-Policy-Index': '2',
+ 'X-Timestamp': '1234.56789'})
+ expected_headers = {'x-backend-no-timestamp-update': 'true',
+ 'x-backend-storage-policy-index': '2',
+ 'x-timestamp': '1234.56789',
+ 'x-trans-id': '-',
+ 'Referer': 'GET http://localhost/v1/a/c/o',
'connection': 'close',
'user-agent': 'proxy-server %d' % os.getpid()}
for k, v in expected_headers.items():
self.assertIn(k, dst_headers)
self.assertEqual(v, dst_headers[k])
- self.assertNotIn('new-owner', dst_headers)
+ for k, v in expected_headers.items():
+ dst_headers.pop(k)
+ self.assertFalse(dst_headers)
def test_generate_request_headers_change_backend_user_agent(self):
base = Controller(self.app)
@@ -1205,7 +1262,8 @@ class TestFuncs(BaseTest):
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
dst_headers = base.generate_request_headers(None,
- additional=src_headers)
+ additional=src_headers,
+ transfer=True)
expected_headers = {'x-base-meta-size': '151M',
'connection': 'close'}
for k, v in expected_headers.items():
diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py
index c010c7227..d8b136757 100644
--- a/test/unit/proxy/controllers/test_container.py
+++ b/test/unit/proxy/controllers/test_container.py
@@ -24,7 +24,8 @@ from six.moves import urllib
from swift.common.constraints import CONTAINER_LISTING_LIMIT
from swift.common.swob import Request, bytes_to_wsgi, str_to_wsgi, wsgi_quote
-from swift.common.utils import ShardRange, Timestamp
+from swift.common.utils import ShardRange, Timestamp, Namespace, \
+ NamespaceBoundList
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_container_info, \
Controller, get_container_info, get_cache_key
@@ -1970,6 +1971,7 @@ class TestContainerController(TestRingBase):
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker always advances to last object name
+ # NB end_markers are upper of the current available shard range
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
@@ -1991,7 +1993,7 @@ class TestContainerController(TestRingBase):
self.check_response(resp, root_resp_hdrs,
exp_sharding_state='sharding')
self.assertIn('swift.cache', resp.request.environ)
- self.assertNotIn('shard-listing/a/c',
+ self.assertNotIn('shard-listing-v2/a/c',
resp.request.environ['swift.cache'].store)
def test_GET_sharded_container_gap_in_shards_memcache(self):
@@ -2035,15 +2037,17 @@ class TestContainerController(TestRingBase):
(200, sr_objs[2], shard_resp_hdrs[2])
]
# NB marker always advances to last object name
+ # NB compaction of shard range data to cached bounds loses the gaps, so
+ # end_markers are lower of the next available shard range
expected_requests = [
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
- dict(marker='', end_marker='ham\x00', states='listing',
+ dict(marker='', end_marker='onion\x00', states='listing',
limit=str(limit))), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
- dict(marker='h', end_marker='pie\x00', states='listing',
+ dict(marker='h', end_marker='rhubarb\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='p', end_marker='', states='listing',
@@ -2055,11 +2059,14 @@ class TestContainerController(TestRingBase):
# root object count will be overridden by actual length of listing
self.check_response(resp, root_resp_hdrs)
self.assertIn('swift.cache', resp.request.environ)
- self.assertIn('shard-listing/a/c',
+ self.assertIn('shard-listing-v2/a/c',
resp.request.environ['swift.cache'].store)
+ # NB compact bounds in cache do not reveal the gap in shard ranges
self.assertEqual(
- sr_dicts,
- resp.request.environ['swift.cache'].store['shard-listing/a/c'])
+ [['', '.shards_a/c_ham'],
+ ['onion', '.shards_a/c_pie'],
+ ['rhubarb', '.shards_a/c_']],
+ resp.request.environ['swift.cache'].store['shard-listing-v2/a/c'])
def test_GET_sharded_container_empty_shard(self):
# verify ordered listing when a shard is empty
@@ -2699,10 +2706,14 @@ class TestContainerController(TestRingBase):
def _setup_shard_range_stubs(self):
self.memcache = FakeMemcache()
shard_bounds = (('', 'ham'), ('ham', 'pie'), ('pie', ''))
- shard_ranges = [
- ShardRange('.shards_a/c_%s' % upper, Timestamp.now(), lower, upper)
- for lower, upper in shard_bounds]
- self.sr_dicts = [dict(sr) for sr in shard_ranges]
+ self.ns_dicts = [{'name': '.shards_a/c_%s' % upper,
+ 'lower': lower,
+ 'upper': upper}
+ for lower, upper in shard_bounds]
+ self.namespaces = [Namespace(**ns) for ns in self.ns_dicts]
+ self.ns_bound_list = NamespaceBoundList.parse(self.namespaces)
+ self.sr_dicts = [dict(ShardRange(timestamp=Timestamp.now(), **ns))
+ for ns in self.ns_dicts]
self._stub_shards_dump = json.dumps(self.sr_dicts).encode('ascii')
self.root_resp_hdrs = {
'Accept-Ranges': 'bytes',
@@ -2737,22 +2748,24 @@ class TestContainerController(TestRingBase):
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': record_type,
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
- self._check_response(resp, self.sr_dicts, {
+ self._check_response(resp, self.ns_dicts, {
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
+
+ cache_key = 'shard-listing-v2/a/c'
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.set('shard-listing/a/c', self.sr_dicts,
+ mock.call.set(cache_key, self.ns_bound_list.bounds,
time=exp_recheck_listing),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual(sharding_state,
self.memcache.calls[2][1][1]['sharding_state'])
self.assertIn('swift.infocache', req.environ)
- self.assertIn('shard-listing/a/c', req.environ['swift.infocache'])
- self.assertEqual(tuple(self.sr_dicts),
- req.environ['swift.infocache']['shard-listing/a/c'])
+ self.assertIn(cache_key, req.environ['swift.infocache'])
+ self.assertEqual(self.ns_bound_list,
+ req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in self.logger.logger.log_dict['increment']],
['container.info.cache.miss',
@@ -2760,7 +2773,7 @@ class TestContainerController(TestRingBase):
# container is sharded and proxy has that state cached, but
# no shard ranges cached; expect a cache miss and write-back
- self.memcache.delete('shard-listing/a/c')
+ self.memcache.delete(cache_key)
self.memcache.clear_calls()
self.logger.clear()
req = self._build_request({'X-Backend-Record-Type': record_type},
@@ -2774,23 +2787,23 @@ class TestContainerController(TestRingBase):
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': record_type,
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
- self._check_response(resp, self.sr_dicts, {
+ self._check_response(resp, self.ns_dicts, {
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.get('shard-listing/a/c', raise_on_error=True),
- mock.call.set('shard-listing/a/c', self.sr_dicts,
+ mock.call.get(cache_key, raise_on_error=True),
+ mock.call.set(cache_key, self.ns_bound_list.bounds,
time=exp_recheck_listing),
# Since there was a backend request, we go ahead and cache
# container info, too
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
- self.assertIn('shard-listing/a/c', req.environ['swift.infocache'])
- self.assertEqual(tuple(self.sr_dicts),
- req.environ['swift.infocache']['shard-listing/a/c'])
+ self.assertIn(cache_key, req.environ['swift.infocache'])
+ self.assertEqual(self.ns_bound_list,
+ req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in self.logger.logger.log_dict['increment']],
['container.info.cache.hit',
@@ -2803,18 +2816,18 @@ class TestContainerController(TestRingBase):
req = self._build_request({'X-Backend-Record-Type': record_type},
{'states': 'listing'}, {})
resp = req.get_response(self.app)
- self._check_response(resp, self.sr_dicts, {
+ self._check_response(resp, self.ns_dicts, {
'X-Backend-Cached-Results': 'true',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.get('shard-listing/a/c', raise_on_error=True)],
+ mock.call.get(cache_key, raise_on_error=True)],
self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
- self.assertIn('shard-listing/a/c', req.environ['swift.infocache'])
- self.assertEqual(tuple(self.sr_dicts),
- req.environ['swift.infocache']['shard-listing/a/c'])
+ self.assertIn(cache_key, req.environ['swift.infocache'])
+ self.assertEqual(self.ns_bound_list,
+ req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in self.logger.logger.log_dict['increment']],
['container.info.cache.hit',
@@ -2836,22 +2849,22 @@ class TestContainerController(TestRingBase):
req, backend_req,
extra_hdrs={'X-Backend-Record-Type': record_type,
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
- self._check_response(resp, self.sr_dicts, {
+ self._check_response(resp, self.ns_dicts, {
'X-Backend-Recheck-Container-Existence': '60',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.set('shard-listing/a/c', self.sr_dicts,
+ mock.call.set(cache_key, self.ns_bound_list.bounds,
time=exp_recheck_listing),
# Since there was a backend request, we go ahead and cache
# container info, too
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
- self.assertIn('shard-listing/a/c', req.environ['swift.infocache'])
- self.assertEqual(tuple(self.sr_dicts),
- req.environ['swift.infocache']['shard-listing/a/c'])
+ self.assertIn(cache_key, req.environ['swift.infocache'])
+ self.assertEqual(self.ns_bound_list,
+ req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in self.logger.logger.log_dict['increment']],
['container.info.cache.hit',
@@ -2864,18 +2877,18 @@ class TestContainerController(TestRingBase):
{'states': 'listing'}, {})
with mock.patch('random.random', return_value=0.11):
resp = req.get_response(self.app)
- self._check_response(resp, self.sr_dicts, {
+ self._check_response(resp, self.ns_dicts, {
'X-Backend-Cached-Results': 'true',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.get('shard-listing/a/c', raise_on_error=True)],
+ mock.call.get(cache_key, raise_on_error=True)],
self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
- self.assertIn('shard-listing/a/c', req.environ['swift.infocache'])
- self.assertEqual(tuple(self.sr_dicts),
- req.environ['swift.infocache']['shard-listing/a/c'])
+ self.assertIn(cache_key, req.environ['swift.infocache'])
+ self.assertEqual(self.ns_bound_list,
+ req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in self.logger.logger.log_dict['increment']],
['container.info.cache.hit',
@@ -2890,15 +2903,15 @@ class TestContainerController(TestRingBase):
infocache=req.environ['swift.infocache'])
with mock.patch('random.random', return_value=0.11):
resp = req.get_response(self.app)
- self._check_response(resp, self.sr_dicts, {
+ self._check_response(resp, self.ns_dicts, {
'X-Backend-Cached-Results': 'true',
'X-Backend-Record-Type': 'shard',
'X-Backend-Sharding-State': sharding_state})
self.assertEqual([], self.memcache.calls)
self.assertIn('swift.infocache', req.environ)
- self.assertIn('shard-listing/a/c', req.environ['swift.infocache'])
- self.assertEqual(tuple(self.sr_dicts),
- req.environ['swift.infocache']['shard-listing/a/c'])
+ self.assertIn(cache_key, req.environ['swift.infocache'])
+ self.assertEqual(self.ns_bound_list,
+ req.environ['swift.infocache'][cache_key])
self.assertEqual(
[x[0][0] for x in self.logger.logger.log_dict['increment']],
['container.shard_listing.infocache.hit'])
@@ -2916,7 +2929,7 @@ class TestContainerController(TestRingBase):
num_resp=self.CONTAINER_REPLICAS)
self.assertEqual(
[mock.call.delete('container/a/c'),
- mock.call.delete('shard-listing/a/c')],
+ mock.call.delete(cache_key)],
self.memcache.calls)
def test_get_from_shards_add_root_spi(self):
@@ -3046,7 +3059,7 @@ class TestContainerController(TestRingBase):
# deleted from cache
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.get('shard-listing/a/c', raise_on_error=True),
+ mock.call.get('shard-listing-v2/a/c', raise_on_error=True),
mock.call.set('container/a/c', mock.ANY, time=6.0)],
self.memcache.calls)
self.assertEqual(404, self.memcache.calls[2][1][1]['status'])
@@ -3079,7 +3092,7 @@ class TestContainerController(TestRingBase):
self.assertNotIn('X-Backend-Cached-Results', resp.headers)
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.get('shard-listing/a/c', raise_on_error=True),
+ mock.call.get('shard-listing-v2/a/c', raise_on_error=True),
mock.call.set('container/a/c', mock.ANY, time=6.0)],
self.memcache.calls)
self.assertEqual(404, self.memcache.calls[2][1][1]['status'])
@@ -3098,7 +3111,7 @@ class TestContainerController(TestRingBase):
info['status'] = 200
info['sharding_state'] = 'sharded'
self.memcache.set('container/a/c', info)
- self.memcache.set('shard-listing/a/c', self.sr_dicts)
+ self.memcache.set('shard-listing-v2/a/c', self.ns_bound_list.bounds)
self.memcache.clear_calls()
req_hdrs = {'X-Backend-Record-Type': record_type}
@@ -3106,7 +3119,7 @@ class TestContainerController(TestRingBase):
resp = req.get_response(self.app)
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.get('shard-listing/a/c', raise_on_error=True)],
+ mock.call.get('shard-listing-v2/a/c', raise_on_error=True)],
self.memcache.calls)
self.assertEqual({'container.info.cache.hit': 1,
'container.shard_listing.cache.hit': 1},
@@ -3122,26 +3135,26 @@ class TestContainerController(TestRingBase):
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing'}, 'shard')
- self._check_response(resp, self.sr_dicts, exp_hdrs)
+ self._check_response(resp, self.ns_dicts, exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'reverse': 'true'}, 'shard')
- exp_shards = list(self.sr_dicts)
+ exp_shards = list(self.ns_dicts)
exp_shards.reverse()
self._check_response(resp, exp_shards, exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'marker': 'jam'}, 'shard')
- self._check_response(resp, self.sr_dicts[1:], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[1:], exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'},
'shard')
- self._check_response(resp, self.sr_dicts[1:2], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[1:2], exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'includes': 'egg'}, 'shard')
- self._check_response(resp, self.sr_dicts[:1], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[:1], exp_hdrs)
# override _get_from_shards so that the response contains the shard
# listing that we want to verify even though the record_type is 'auto'
@@ -3153,22 +3166,22 @@ class TestContainerController(TestRingBase):
mock_get_from_shards):
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'reverse': 'true'}, 'auto')
- exp_shards = list(self.sr_dicts)
+ exp_shards = list(self.ns_dicts)
exp_shards.reverse()
self._check_response(resp, exp_shards, exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'marker': 'jam'}, 'auto')
- self._check_response(resp, self.sr_dicts[1:], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[1:], exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'},
'auto')
- self._check_response(resp, self.sr_dicts[1:2], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[1:2], exp_hdrs)
resp = self._do_test_GET_shard_ranges_read_from_cache(
{'states': 'listing', 'includes': 'egg'}, 'auto')
- self._check_response(resp, self.sr_dicts[:1], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[:1], exp_hdrs)
def _do_test_GET_shard_ranges_write_to_cache(self, params, record_type):
# verify that shard range listing are written to cache when appropriate
@@ -3193,7 +3206,8 @@ class TestContainerController(TestRingBase):
expected_hdrs.update(resp_hdrs)
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.set('shard-listing/a/c', self.sr_dicts, time=600),
+ mock.call.set(
+ 'shard-listing-v2/a/c', self.ns_bound_list.bounds, time=600),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
# shards were cached
@@ -3213,26 +3227,26 @@ class TestContainerController(TestRingBase):
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing'}, 'shard')
- self._check_response(resp, self.sr_dicts, exp_hdrs)
+ self._check_response(resp, self.ns_dicts, exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'reverse': 'true'}, 'shard')
- exp_shards = list(self.sr_dicts)
+ exp_shards = list(self.ns_dicts)
exp_shards.reverse()
self._check_response(resp, exp_shards, exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'marker': 'jam'}, 'shard')
- self._check_response(resp, self.sr_dicts[1:], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[1:], exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'},
'shard')
- self._check_response(resp, self.sr_dicts[1:2], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[1:2], exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'includes': 'egg'}, 'shard')
- self._check_response(resp, self.sr_dicts[:1], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[:1], exp_hdrs)
# override _get_from_shards so that the response contains the shard
# listing that we want to verify even though the record_type is 'auto'
@@ -3244,22 +3258,22 @@ class TestContainerController(TestRingBase):
mock_get_from_shards):
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'reverse': 'true'}, 'auto')
- exp_shards = list(self.sr_dicts)
+ exp_shards = list(self.ns_dicts)
exp_shards.reverse()
self._check_response(resp, exp_shards, exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'marker': 'jam'}, 'auto')
- self._check_response(resp, self.sr_dicts[1:], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[1:], exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'},
'auto')
- self._check_response(resp, self.sr_dicts[1:2], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[1:2], exp_hdrs)
resp = self._do_test_GET_shard_ranges_write_to_cache(
{'states': 'listing', 'includes': 'egg'}, 'auto')
- self._check_response(resp, self.sr_dicts[:1], exp_hdrs)
+ self._check_response(resp, self.ns_dicts[:1], exp_hdrs)
def test_GET_shard_ranges_write_to_cache_with_x_newest(self):
# when x-newest is sent, verify that there is no cache lookup to check
@@ -3285,10 +3299,11 @@ class TestContainerController(TestRingBase):
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
expected_hdrs = {'X-Backend-Recheck-Container-Existence': '60'}
expected_hdrs.update(resp_hdrs)
- self._check_response(resp, self.sr_dicts, expected_hdrs)
+ self._check_response(resp, self.ns_dicts, expected_hdrs)
self.assertEqual(
[mock.call.get('container/a/c'),
- mock.call.set('shard-listing/a/c', self.sr_dicts, time=600),
+ mock.call.set(
+ 'shard-listing-v2/a/c', self.ns_bound_list.bounds, time=600),
mock.call.set('container/a/c', mock.ANY, time=60)],
self.memcache.calls)
self.assertEqual('sharded',
diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py
index bf32a059a..b268e008e 100644
--- a/test/unit/proxy/controllers/test_obj.py
+++ b/test/unit/proxy/controllers/test_obj.py
@@ -39,8 +39,9 @@ else:
import swift
from swift.common import utils, swob, exceptions
-from swift.common.exceptions import ChunkWriteTimeout
-from swift.common.utils import Timestamp, list_from_csv, md5
+from swift.common.exceptions import ChunkWriteTimeout, ShortReadError, \
+ ChunkReadTimeout
+from swift.common.utils import Timestamp, list_from_csv, md5, FileLikeIter
from swift.proxy import server as proxy_server
from swift.proxy.controllers import obj
from swift.proxy.controllers.base import \
@@ -4926,7 +4927,7 @@ class TestECObjController(ECObjectControllerMixin, unittest.TestCase):
for line in error_lines[:nparity]:
self.assertIn('retrying', line)
for line in error_lines[nparity:]:
- self.assertIn('ChunkReadTimeout (0.01s)', line)
+ self.assertIn('ChunkReadTimeout (0.01s', line)
for line in self.logger.logger.records['ERROR']:
self.assertIn(req.headers['x-trans-id'], line)
@@ -4959,8 +4960,9 @@ class TestECObjController(ECObjectControllerMixin, unittest.TestCase):
resp_body += b''.join(resp.app_iter)
# we log errors
log_lines = self.app.logger.get_lines_for_level('error')
+ self.assertTrue(log_lines)
for line in log_lines:
- self.assertIn('ChunkWriteTimeout fetching fragments', line)
+ self.assertIn('ChunkWriteTimeout feeding fragments', line)
# client gets a short read
self.assertEqual(16051, len(test_data))
self.assertEqual(8192, len(resp_body))
@@ -5010,7 +5012,7 @@ class TestECObjController(ECObjectControllerMixin, unittest.TestCase):
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(ndata, len(error_lines))
for line in error_lines:
- self.assertIn('ChunkReadTimeout (0.01s)', line)
+ self.assertIn('ChunkReadTimeout (0.01s', line)
for line in self.logger.logger.records['ERROR']:
self.assertIn(req.headers['x-trans-id'], line)
@@ -6675,5 +6677,54 @@ class TestNumContainerUpdates(unittest.TestCase):
c_replica, o_replica, o_quorum))
+@patch_policies(with_ec_default=True)
+class TestECFragGetter(BaseObjectControllerMixin, unittest.TestCase):
+ def setUp(self):
+ super(TestECFragGetter, self).setUp()
+ req = Request.blank(path='/a/c/o')
+ self.getter = obj.ECFragGetter(
+ self.app, req, None, None, self.policy, 'a/c/o',
+ {}, None, self.logger.thread_locals,
+ self.logger)
+
+ def test_iter_bytes_from_response_part(self):
+ part = FileLikeIter([b'some', b'thing'])
+ it = self.getter.iter_bytes_from_response_part(part, nbytes=None)
+ self.assertEqual(b'something', b''.join(it))
+
+ def test_iter_bytes_from_response_part_insufficient_bytes(self):
+ part = FileLikeIter([b'some', b'thing'])
+ it = self.getter.iter_bytes_from_response_part(part, nbytes=100)
+ with mock.patch.object(self.getter, '_dig_for_source_and_node',
+ return_value=(None, None)):
+ with self.assertRaises(ShortReadError) as cm:
+ b''.join(it)
+ self.assertEqual('Too few bytes; read 9, expecting 100',
+ str(cm.exception))
+
+ def test_iter_bytes_from_response_part_read_timeout(self):
+ part = FileLikeIter([b'some', b'thing'])
+ self.app.recoverable_node_timeout = 0.05
+ self.app.client_timeout = 0.8
+ it = self.getter.iter_bytes_from_response_part(part, nbytes=9)
+ with mock.patch.object(self.getter, '_dig_for_source_and_node',
+ return_value=(None, None)):
+ with mock.patch.object(part, 'read',
+ side_effect=[b'some', ChunkReadTimeout(9)]):
+ with self.assertRaises(ChunkReadTimeout) as cm:
+ b''.join(it)
+ self.assertEqual('9 seconds', str(cm.exception))
+
+ def test_iter_bytes_from_response_part_small_fragment_size(self):
+ self.getter.fragment_size = 4
+ part = FileLikeIter([b'some', b'thing', b''])
+ it = self.getter.iter_bytes_from_response_part(part, nbytes=None)
+ self.assertEqual([b'some', b'thin', b'g'], [ch for ch in it])
+ self.getter.fragment_size = 1
+ part = FileLikeIter([b'some', b'thing', b''])
+ it = self.getter.iter_bytes_from_response_part(part, nbytes=None)
+ self.assertEqual([c.encode() for c in 'something'], [ch for ch in it])
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py
index 50b1fcd1d..2e4da0754 100644
--- a/test/unit/proxy/test_server.py
+++ b/test/unit/proxy/test_server.py
@@ -22,6 +22,7 @@ import math
import os
import posix
import socket
+import errno
import sys
import traceback
import unittest
@@ -54,9 +55,10 @@ from six.moves.urllib.parse import quote, parse_qsl
from test import listen_zero
from test.debug_logger import debug_logger
from test.unit import (
- connect_tcp, readuntil2crlfs, fake_http_connect, FakeRing, FakeMemcache,
- patch_policies, write_fake_ring, mocked_http_conn, DEFAULT_TEST_EC_TYPE,
- make_timestamp_iter, skip_if_no_xattrs, FakeHTTPResponse)
+ connect_tcp, readuntil2crlfs, fake_http_connect, FakeRing,
+ FakeMemcache, patch_policies, write_fake_ring, mocked_http_conn,
+ DEFAULT_TEST_EC_TYPE, make_timestamp_iter, skip_if_no_xattrs,
+ FakeHTTPResponse)
from test.unit.helpers import setup_servers, teardown_servers
from swift.proxy import server as proxy_server
from swift.proxy.controllers.obj import ReplicatedObjectController
@@ -2412,9 +2414,9 @@ class BaseTestObjectController(object):
if condition():
break
- def put_container(self, policy_name, container_name):
+ def put_container(self, policy_name, container_name, prolis=None):
# Note: only works if called with unpatched policies
- prolis = _test_sockets[0]
+ prolis = prolis or _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s HTTP/1.1\r\n'
@@ -7321,6 +7323,22 @@ class TestReplicatedObjectController(
])
+@contextmanager
+def in_process_proxy(prosrv, **extra_server_kwargs):
+ server_kwargs = {
+ 'protocol': SwiftHttpProtocol,
+ 'capitalize_response_headers': False,
+ }
+ server_kwargs.update(extra_server_kwargs)
+ prolis = listen_zero()
+ try:
+ proxy_thread = spawn(wsgi.server, prolis, prosrv,
+ prosrv.logger, **server_kwargs)
+ yield prolis
+ finally:
+ proxy_thread.kill()
+
+
class BaseTestECObjectController(BaseTestObjectController):
def test_PUT_ec(self):
self.put_container(self.ec_policy.name, self.ec_policy.name)
@@ -8177,6 +8195,73 @@ class BaseTestECObjectController(BaseTestObjectController):
os.rename(self.ec_policy.object_ring.serialized_path + '.bak',
self.ec_policy.object_ring.serialized_path)
+ def test_GET_ec_pipeline(self):
+ conf = _test_context['conf']
+ conf['client_timeout'] = 0.1
+ prosrv = proxy_server.Application(conf, logger=debug_logger('proxy'))
+ with in_process_proxy(
+ prosrv, socket_timeout=conf['client_timeout']) as prolis:
+ self.put_container(self.ec_policy.name, self.ec_policy.name,
+ prolis=prolis)
+
+ obj = b'0123456' * 11 * 17
+
+ sock = connect_tcp(('localhost', prolis.getsockname()[1]))
+ fd = sock.makefile('rwb')
+ fd.write(('PUT /v1/a/%s/go-get-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'Content-Length: %d\r\n'
+ 'X-Storage-Token: t\r\n'
+ 'X-Object-Meta-Color: chartreuse\r\n'
+ 'Content-Type: application/octet-stream\r\n'
+ '\r\n' % (
+ self.ec_policy.name,
+ len(obj),
+ )).encode('ascii'))
+ fd.write(obj)
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = b'HTTP/1.1 201'
+ self.assertEqual(headers[:len(exp)], exp)
+
+ fd.write(('GET /v1/a/%s/go-get-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n' % self.ec_policy.name).encode('ascii'))
+ fd.flush()
+ headers = readuntil2crlfs(fd)
+ exp = b'HTTP/1.1 200'
+ self.assertEqual(headers[:len(exp)], exp)
+ for line in headers.splitlines():
+ if b'Content-Length' in line:
+ h, v = line.split()
+ content_length = int(v.strip())
+ break
+ else:
+ self.fail("Didn't find content-length in %r" % (headers,))
+
+ gotten_obj = fd.read(content_length)
+ self.assertEqual(gotten_obj, obj)
+
+ sleep(0.3) # client_timeout should kick us off
+
+ fd.write(('GET /v1/a/%s/go-get-it HTTP/1.1\r\n'
+ 'Host: localhost\r\n'
+ 'X-Storage-Token: t\r\n'
+ '\r\n' % self.ec_policy.name).encode('ascii'))
+ fd.flush()
+ # makefile is a little weird, but this is disconnected
+ self.assertEqual(b'', fd.read())
+ # I expected this to raise a socket error
+ self.assertEqual(b'', sock.recv(1024))
+ # ... but we ARE disconnected
+ with self.assertRaises(socket.error) as caught:
+ sock.send(b'test')
+ self.assertEqual(caught.exception.errno, errno.EPIPE)
+ # and logging confirms we've timed out
+ last_debug_msg = prosrv.logger.get_lines_for_level('debug')[-1]
+ self.assertIn('timed out', last_debug_msg)
+
def test_ec_client_disconnect(self):
prolis = _test_sockets[0]
diff --git a/tox.ini b/tox.ini
index 86f81ee28..69353b888 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,9 +1,14 @@
[tox]
envlist = py37,py27,pep8
minversion = 3.18.0
+requires =
+ # required to support py27/py36 envs
+ virtualenv<20.22
+ # project-wide requirement; see .zuul.yaml
+ tox<4
[pytest]
-addopts = --verbose
+addopts = --verbose -p no:requests_mock
[testenv]
usedevelop = True