diff options
Diffstat (limited to 'test')
-rw-r--r-- | test/probe/test_object_versioning.py | 8 | ||||
-rw-r--r-- | test/unit/__init__.py | 33 | ||||
-rw-r--r-- | test/unit/common/ring/test_ring.py | 16 | ||||
-rw-r--r-- | test/unit/common/test_daemon.py | 126 | ||||
-rw-r--r-- | test/unit/common/test_utils.py | 110 | ||||
-rw-r--r-- | test/unit/common/test_wsgi.py | 110 | ||||
-rw-r--r-- | test/unit/obj/test_diskfile.py | 2 | ||||
-rw-r--r-- | test/unit/proxy/controllers/test_base.py | 68 | ||||
-rw-r--r-- | test/unit/proxy/controllers/test_container.py | 151 | ||||
-rw-r--r-- | test/unit/proxy/controllers/test_obj.py | 61 |
10 files changed, 564 insertions, 121 deletions
diff --git a/test/probe/test_object_versioning.py b/test/probe/test_object_versioning.py index 60ecae9a1..09a209f54 100644 --- a/test/probe/test_object_versioning.py +++ b/test/probe/test_object_versioning.py @@ -273,18 +273,18 @@ class TestECObjectVersioning(ECProbeTest): self.fail('unable to find object on handoffs') # we want to repair the fault, but avoid doing the handoff revert self.revive_drive(failed_primary_device_path) - handoff_config = (handoff['id'] + 1) % 4 - failed_config = (failed_primary['id'] + 1) % 4 + handoff_config = self.config_number(handoff) + failed_config = self.config_number(failed_primary) partner_nodes = reconstructor._get_partners( failed_primary['index'], self.nodes) random.shuffle(partner_nodes) for partner in partner_nodes: - fix_config = (partner['id'] + 1) % 4 + fix_config = self.config_number(partner) if fix_config not in (handoff_config, failed_config): break else: self.fail('unable to find fix_config in %r excluding %r & %r' % ( - [(d['device'], (d['id'] + 1) % 4) for d in partner_nodes], + [(d['device'], self.config_number(d)) for d in partner_nodes], handoff_config, failed_config)) self.reconstructor.once(number=fix_config) diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 6f731b70a..f9847a10a 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -1408,3 +1408,36 @@ def generate_db_path(tempdir, server_type): return os.path.join( tempdir, '%ss' % server_type, 'part', 'suffix', 'hash', '%s-%s.db' % (server_type, uuid4())) + + +class ConfigAssertMixin(object): + """ + Use this with a TestCase to get py2/3 compatible assert for DuplicateOption + """ + def assertDuplicateOption(self, app_config, option_name, option_value): + """ + PY3 added a DuplicateOptionError, PY2 didn't seem to care + """ + if six.PY3: + self.assertDuplicateOptionError(app_config, option_name) + else: + self.assertDuplicateOptionOK(app_config, option_name, option_value) + + def assertDuplicateOptionError(self, app_config, option_name): + with self.assertRaises( + utils.configparser.DuplicateOptionError) as ctx: + app_config() + msg = str(ctx.exception) + self.assertIn(option_name, msg) + self.assertIn('already exists', msg) + + def assertDuplicateOptionOK(self, app_config, option_name, option_value): + app = app_config() + if hasattr(app, 'conf'): + found_value = app.conf[option_name] + else: + if hasattr(app, '_pipeline_final_app'): + # special case for proxy app! + app = app._pipeline_final_app + found_value = getattr(app, option_name) + self.assertEqual(found_value, option_value) diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index 0f7e58e0c..55f45862e 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -68,8 +68,10 @@ class TestRingData(unittest.TestCase): def test_attrs(self): r2p2d = [[0, 1, 0, 1], [0, 1, 0, 1]] - d = [{'id': 0, 'zone': 0, 'region': 0, 'ip': '10.1.1.0', 'port': 7000}, - {'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1', 'port': 7000}] + d = [{'id': 0, 'zone': 0, 'region': 0, 'ip': '10.1.1.0', 'port': 7000, + 'replication_ip': '10.1.1.0', 'replication_port': 7000}, + {'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1', 'port': 7000, + 'replication_ip': '10.1.1.1', 'replication_port': 7000}] s = 30 rd = ring.RingData(r2p2d, d, s) self.assertEqual(rd._replica2part2dev_id, r2p2d) @@ -88,10 +90,12 @@ class TestRingData(unittest.TestCase): pickle.dump(rd, f, protocol=p) meta_only = ring.RingData.load(ring_fname, metadata_only=True) self.assertEqual([ - {'id': 0, 'zone': 0, 'region': 1, 'ip': '10.1.1.0', - 'port': 7000}, - {'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1', - 'port': 7000}, + {'id': 0, 'zone': 0, 'region': 1, + 'ip': '10.1.1.0', 'port': 7000, + 'replication_ip': '10.1.1.0', 'replication_port': 7000}, + {'id': 1, 'zone': 1, 'region': 1, + 'ip': '10.1.1.1', 'port': 7000, + 'replication_ip': '10.1.1.1', 'replication_port': 7000}, ], meta_only.devs) # Pickled rings can't load only metadata, so you get it all self.assert_ring_data_equal(rd, meta_only) diff --git a/test/unit/common/test_daemon.py b/test/unit/common/test_daemon.py index d53d15f10..fc49fd4e4 100644 --- a/test/unit/common/test_daemon.py +++ b/test/unit/common/test_daemon.py @@ -14,18 +14,19 @@ # limitations under the License. import os -from six import StringIO +import six import time import unittest from getpass import getuser import logging -from test.unit import tmpfile +from test.unit import tmpfile, with_tempdir, ConfigAssertMixin import mock import signal from contextlib import contextmanager import itertools from collections import defaultdict import errno +from textwrap import dedent from swift.common import daemon, utils from test.debug_logger import debug_logger @@ -106,7 +107,7 @@ class TestWorkerDaemon(unittest.TestCase): self.assertTrue(d.is_healthy()) -class TestRunDaemon(unittest.TestCase): +class TestRunDaemon(unittest.TestCase, ConfigAssertMixin): def setUp(self): for patcher in [ @@ -147,9 +148,12 @@ class TestRunDaemon(unittest.TestCase): ]) def test_run_daemon(self): + logging.logThreads = 1 # reset to default sample_conf = "[my-daemon]\nuser = %s\n" % getuser() with tmpfile(sample_conf) as conf_file, \ - mock.patch('swift.common.daemon.use_hub') as mock_use_hub: + mock.patch('swift.common.utils.eventlet') as _utils_evt, \ + mock.patch('eventlet.hubs.use_hub') as mock_use_hub, \ + mock.patch('eventlet.debug') as _debug_evt: with mock.patch.dict('os.environ', {'TZ': ''}), \ mock.patch('time.tzset') as mock_tzset: daemon.run_daemon(MyDaemon, conf_file) @@ -159,6 +163,12 @@ class TestRunDaemon(unittest.TestCase): self.assertEqual(mock_use_hub.mock_calls, [mock.call(utils.get_hub())]) daemon.run_daemon(MyDaemon, conf_file, once=True) + _utils_evt.patcher.monkey_patch.assert_called_with(all=False, + socket=True, + select=True, + thread=True) + self.assertEqual(0, logging.logThreads) # fixed in monkey_patch + _debug_evt.hub_exceptions.assert_called_with(False) self.assertEqual(MyDaemon.once_called, True) # test raise in daemon code @@ -167,7 +177,7 @@ class TestRunDaemon(unittest.TestCase): conf_file, once=True) # test user quit - sio = StringIO() + sio = six.StringIO() logger = logging.getLogger('server') logger.addHandler(logging.StreamHandler(sio)) logger = utils.get_logger(None, 'server', log_route='server') @@ -195,7 +205,9 @@ class TestRunDaemon(unittest.TestCase): sample_conf = "[my-daemon]\nuser = %s\n" % getuser() with tmpfile(sample_conf) as conf_file, \ - mock.patch('swift.common.daemon.use_hub'): + mock.patch('swift.common.utils.eventlet'), \ + mock.patch('eventlet.hubs.use_hub'), \ + mock.patch('eventlet.debug'): daemon.run_daemon(MyDaemon, conf_file) self.assertFalse(MyDaemon.once_called) self.assertTrue(MyDaemon.forever_called) @@ -207,6 +219,107 @@ class TestRunDaemon(unittest.TestCase): os.environ['TZ'] = old_tz time.tzset() + @with_tempdir + def test_run_deamon_from_conf_file(self, tempdir): + conf_path = os.path.join(tempdir, 'test-daemon.conf') + conf_body = """ + [DEFAULT] + conn_timeout = 5 + client_timeout = 1 + [my-daemon] + CONN_timeout = 10 + client_timeout = 2 + """ + contents = dedent(conf_body) + with open(conf_path, 'w') as f: + f.write(contents) + with mock.patch('swift.common.utils.eventlet'), \ + mock.patch('eventlet.hubs.use_hub'), \ + mock.patch('eventlet.debug'): + d = daemon.run_daemon(MyDaemon, conf_path) + # my-daemon section takes priority (!?) + self.assertEqual('2', d.conf['client_timeout']) + self.assertEqual('10', d.conf['CONN_timeout']) + self.assertEqual('5', d.conf['conn_timeout']) + + @with_tempdir + def test_run_daemon_from_conf_file_with_duplicate_var(self, tempdir): + conf_path = os.path.join(tempdir, 'test-daemon.conf') + conf_body = """ + [DEFAULT] + client_timeout = 3 + [my-daemon] + CLIENT_TIMEOUT = 2 + client_timeout = 1 + conn_timeout = 1.1 + conn_timeout = 1.2 + """ + contents = dedent(conf_body) + with open(conf_path, 'w') as f: + f.write(contents) + with mock.patch('swift.common.utils.eventlet'), \ + mock.patch('eventlet.hubs.use_hub'), \ + mock.patch('eventlet.debug'): + app_config = lambda: daemon.run_daemon(MyDaemon, tempdir) + # N.B. CLIENT_TIMEOUT/client_timeout are unique options + self.assertDuplicateOption(app_config, 'conn_timeout', '1.2') + + @with_tempdir + def test_run_deamon_from_conf_dir(self, tempdir): + conf_files = { + 'default': """ + [DEFAULT] + conn_timeout = 5 + client_timeout = 1 + """, + 'daemon': """ + [DEFAULT] + CONN_timeout = 3 + CLIENT_TIMEOUT = 4 + [my-daemon] + CONN_timeout = 10 + client_timeout = 2 + """, + } + for filename, conf_body in conf_files.items(): + path = os.path.join(tempdir, filename + '.conf') + with open(path, 'wt') as fd: + fd.write(dedent(conf_body)) + with mock.patch('swift.common.utils.eventlet'), \ + mock.patch('eventlet.hubs.use_hub'), \ + mock.patch('eventlet.debug'): + d = daemon.run_daemon(MyDaemon, tempdir) + # my-daemon section takes priority (!?) + self.assertEqual('2', d.conf['client_timeout']) + self.assertEqual('10', d.conf['CONN_timeout']) + self.assertEqual('5', d.conf['conn_timeout']) + + @with_tempdir + def test_run_daemon_from_conf_dir_with_duplicate_var(self, tempdir): + conf_files = { + 'default': """ + [DEFAULT] + client_timeout = 3 + """, + 'daemon': """ + [my-daemon] + client_timeout = 2 + CLIENT_TIMEOUT = 4 + conn_timeout = 1.1 + conn_timeout = 1.2 + """, + } + for filename, conf_body in conf_files.items(): + path = os.path.join(tempdir, filename + '.conf') + with open(path, 'wt') as fd: + fd.write(dedent(conf_body)) + with mock.patch('swift.common.utils.eventlet'), \ + mock.patch('eventlet.hubs.use_hub'), \ + mock.patch('eventlet.debug'): + app_config = lambda: daemon.run_daemon(MyDaemon, tempdir) + # N.B. CLIENT_TIMEOUT/client_timeout are unique options + self.assertDuplicateOption(app_config, 'conn_timeout', '1.2') + @contextmanager def mock_os(self, child_worker_cycles=3): self.waitpid_calls = defaultdict(int) @@ -228,6 +341,7 @@ class TestRunDaemon(unittest.TestCase): yield def test_fork_workers(self): + utils.logging_monkey_patch() # needed to log at notice d = MyWorkerDaemon({'workers': 3}) strategy = daemon.DaemonStrategy(d, d.logger) with self.mock_os(): diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index e477ee85d..e66508c6d 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -210,6 +210,61 @@ class TestUtils(unittest.TestCase): self.md5_digest = '0d6dc3c588ae71a04ce9a6beebbbba06' self.fips_enabled = True + def test_monkey_patch(self): + def take_and_release(lock): + try: + lock.acquire() + finally: + lock.release() + + def do_test(): + res = 0 + try: + # this module imports eventlet original threading, so re-import + # locally... + import threading + import traceback + logging_lock_before = logging._lock + my_lock_before = threading.RLock() + self.assertIsInstance(logging_lock_before, + type(my_lock_before)) + + utils.monkey_patch() + + logging_lock_after = logging._lock + my_lock_after = threading.RLock() + self.assertIsInstance(logging_lock_after, + type(my_lock_after)) + + self.assertTrue(logging_lock_after.acquire()) + thread = threading.Thread(target=take_and_release, + args=(logging_lock_after,)) + thread.start() + self.assertTrue(thread.isAlive()) + # we should timeout while the thread is still blocking on lock + eventlet.sleep() + thread.join(timeout=0.1) + self.assertTrue(thread.isAlive()) + + logging._lock.release() + thread.join(timeout=0.1) + self.assertFalse(thread.isAlive()) + except AssertionError: + traceback.print_exc() + res = 1 + finally: + os._exit(res) + + pid = os.fork() + if pid == 0: + # run the test in an isolated environment to avoid monkey patching + # in this one + do_test() + else: + child_pid, errcode = os.waitpid(pid, 0) + self.assertEqual(0, os.WEXITSTATUS(errcode), + 'Forked do_test failed') + def test_get_zero_indexed_base_string(self): self.assertEqual(utils.get_zero_indexed_base_string('something', 0), 'something') @@ -1144,11 +1199,15 @@ class TestUtils(unittest.TestCase): # test eventlet.Timeout with ConnectionTimeout(42, 'my error message') \ as connection_timeout: - log_exception(connection_timeout) + now = time.time() + connection_timeout.created_at = now - 123.456 + with mock.patch('swift.common.utils.time.time', + return_value=now): + log_exception(connection_timeout) log_msg = strip_value(sio) self.assertNotIn('Traceback', log_msg) self.assertTrue('ConnectionTimeout' in log_msg) - self.assertTrue('(42s)' in log_msg) + self.assertTrue('(42s after 123.46s)' in log_msg) self.assertNotIn('my error message', log_msg) with MessageTimeout(42, 'my error message') as message_timeout: @@ -3403,7 +3462,7 @@ cluster_dfw1 = http://dfw1.host/v1/ if tempdir: shutil.rmtree(tempdir) - def test_find_shard_range(self): + def test_find_namespace(self): ts = utils.Timestamp.now().internal start = utils.ShardRange('a/-a', ts, '', 'a') atof = utils.ShardRange('a/a-f', ts, 'a', 'f') @@ -3413,29 +3472,29 @@ cluster_dfw1 = http://dfw1.host/v1/ end = utils.ShardRange('a/z-', ts, 'z', '') ranges = [start, atof, ftol, ltor, rtoz, end] - found = utils.find_shard_range('', ranges) + found = utils.find_namespace('', ranges) self.assertEqual(found, None) - found = utils.find_shard_range(' ', ranges) + found = utils.find_namespace(' ', ranges) self.assertEqual(found, start) - found = utils.find_shard_range(' ', ranges[1:]) + found = utils.find_namespace(' ', ranges[1:]) self.assertEqual(found, None) - found = utils.find_shard_range('b', ranges) + found = utils.find_namespace('b', ranges) self.assertEqual(found, atof) - found = utils.find_shard_range('f', ranges) + found = utils.find_namespace('f', ranges) self.assertEqual(found, atof) - found = utils.find_shard_range('f\x00', ranges) + found = utils.find_namespace('f\x00', ranges) self.assertEqual(found, ftol) - found = utils.find_shard_range('x', ranges) + found = utils.find_namespace('x', ranges) self.assertEqual(found, rtoz) - found = utils.find_shard_range('r', ranges) + found = utils.find_namespace('r', ranges) self.assertEqual(found, ltor) - found = utils.find_shard_range('}', ranges) + found = utils.find_namespace('}', ranges) self.assertEqual(found, end) - found = utils.find_shard_range('}', ranges[:-1]) + found = utils.find_namespace('}', ranges[:-1]) self.assertEqual(found, None) # remove l-r from list of ranges and try and find a shard range for an # item in that range. - found = utils.find_shard_range('p', ranges[:-3] + ranges[-2:]) + found = utils.find_namespace('p', ranges[:-3] + ranges[-2:]) self.assertEqual(found, None) # add some sub-shards; a sub-shard's state is less than its parent @@ -3445,20 +3504,20 @@ cluster_dfw1 = http://dfw1.host/v1/ htok = utils.ShardRange('a/h-k', ts, 'h', 'k') overlapping_ranges = ranges[:2] + [ftoh, htok] + ranges[2:] - found = utils.find_shard_range('g', overlapping_ranges) + found = utils.find_namespace('g', overlapping_ranges) self.assertEqual(found, ftoh) - found = utils.find_shard_range('h', overlapping_ranges) + found = utils.find_namespace('h', overlapping_ranges) self.assertEqual(found, ftoh) - found = utils.find_shard_range('k', overlapping_ranges) + found = utils.find_namespace('k', overlapping_ranges) self.assertEqual(found, htok) - found = utils.find_shard_range('l', overlapping_ranges) + found = utils.find_namespace('l', overlapping_ranges) self.assertEqual(found, ftol) - found = utils.find_shard_range('m', overlapping_ranges) + found = utils.find_namespace('m', overlapping_ranges) self.assertEqual(found, ltor) ktol = utils.ShardRange('a/k-l', ts, 'k', 'l') overlapping_ranges = ranges[:2] + [ftoh, htok, ktol] + ranges[2:] - found = utils.find_shard_range('l', overlapping_ranges) + found = utils.find_namespace('l', overlapping_ranges) self.assertEqual(found, ktol) def test_parse_db_filename(self): @@ -7960,7 +8019,7 @@ class TestShardRange(unittest.TestCase): with self.assertRaises(KeyError): utils.ShardRange.from_dict(bad_dict) # But __init__ still (generally) works! - if key not in ('name', 'timestamp'): + if key != 'name': utils.ShardRange(**bad_dict) else: with self.assertRaises(TypeError): @@ -8744,13 +8803,16 @@ class TestWatchdog(unittest.TestCase): w._evt.send = mock.Mock(side_effect=w._evt.send) gth = object() + now = time.time() + timeout_value = 1.0 with patch('eventlet.greenthread.getcurrent', return_value=gth),\ - patch('time.time', return_value=10.0): + patch('time.time', return_value=now): # On first call, _next_expiration is None, it should unblock # greenthread that is blocked for ever - key = w.start(1.0, Timeout) + key = w.start(timeout_value, Timeout) self.assertIn(key, w._timeouts) - self.assertEqual(w._timeouts[key], (1.0, 11.0, gth, Timeout)) + self.assertEqual(w._timeouts[key], ( + timeout_value, now + timeout_value, gth, Timeout, now)) w._evt.send.assert_called_once() w.stop(key) diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index e3f988452..d2f13b205 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -43,7 +43,7 @@ from swift.common.storage_policy import POLICIES from test import listen_zero from test.debug_logger import debug_logger from test.unit import ( - temptree, with_tempdir, write_fake_ring, patch_policies) + temptree, with_tempdir, write_fake_ring, patch_policies, ConfigAssertMixin) from paste.deploy import loadwsgi @@ -60,7 +60,7 @@ def _fake_rings(tmpdir): @patch_policies -class TestWSGI(unittest.TestCase): +class TestWSGI(unittest.TestCase, ConfigAssertMixin): """Tests for swift.common.wsgi""" def test_init_request_processor(self): @@ -133,14 +133,40 @@ class TestWSGI(unittest.TestCase): def test_loadapp_from_file(self, tempdir): conf_path = os.path.join(tempdir, 'object-server.conf') conf_body = """ + [DEFAULT] + CONN_timeout = 10 + client_timeout = 1 [app:main] use = egg:swift#object + conn_timeout = 5 + client_timeout = 2 + CLIENT_TIMEOUT = 3 """ contents = dedent(conf_body) with open(conf_path, 'w') as f: f.write(contents) app = wsgi.loadapp(conf_path) self.assertIsInstance(app, obj_server.ObjectController) + self.assertTrue(isinstance(app, obj_server.ObjectController)) + # N.B. paste config loading from *file* is already case-sensitive, + # so, CLIENT_TIMEOUT/client_timeout are unique options + self.assertEqual(1, app.client_timeout) + self.assertEqual(5, app.conn_timeout) + + @with_tempdir + def test_loadapp_from_file_with_duplicate_var(self, tempdir): + conf_path = os.path.join(tempdir, 'object-server.conf') + conf_body = """ + [app:main] + use = egg:swift#object + client_timeout = 2 + client_timeout = 3 + """ + contents = dedent(conf_body) + with open(conf_path, 'w') as f: + f.write(contents) + app_config = lambda: wsgi.loadapp(conf_path) + self.assertDuplicateOption(app_config, 'client_timeout', 3.0) @with_tempdir def test_loadapp_from_file_with_global_conf(self, tempdir): @@ -204,11 +230,89 @@ class TestWSGI(unittest.TestCase): def test_loadapp_from_string(self): conf_body = """ + [DEFAULT] + CONN_timeout = 10 + client_timeout = 1 [app:main] use = egg:swift#object + conn_timeout = 5 + client_timeout = 2 """ app = wsgi.loadapp(wsgi.ConfigString(conf_body)) self.assertTrue(isinstance(app, obj_server.ObjectController)) + self.assertEqual(1, app.client_timeout) + self.assertEqual(5, app.conn_timeout) + + @with_tempdir + def test_loadapp_from_dir(self, tempdir): + conf_files = { + 'pipeline': """ + [pipeline:main] + pipeline = tempauth proxy-server + """, + 'tempauth': """ + [DEFAULT] + swift_dir = %s + random_VAR = foo + [filter:tempauth] + use = egg:swift#tempauth + random_var = bar + """ % tempdir, + 'proxy': """ + [DEFAULT] + conn_timeout = 5 + client_timeout = 1 + [app:proxy-server] + use = egg:swift#proxy + CONN_timeout = 10 + client_timeout = 2 + """, + } + _fake_rings(tempdir) + for filename, conf_body in conf_files.items(): + path = os.path.join(tempdir, filename + '.conf') + with open(path, 'wt') as fd: + fd.write(dedent(conf_body)) + app = wsgi.loadapp(tempdir) + # DEFAULT takes priority (!?) + self.assertEqual(5, app._pipeline_final_app.conn_timeout) + self.assertEqual(1, app._pipeline_final_app.client_timeout) + self.assertEqual('foo', app.app.app.app.conf['random_VAR']) + self.assertEqual('bar', app.app.app.app.conf['random_var']) + + @with_tempdir + def test_loadapp_from_dir_with_duplicate_var(self, tempdir): + conf_files = { + 'pipeline': """ + [pipeline:main] + pipeline = tempauth proxy-server + """, + 'tempauth': """ + [DEFAULT] + swift_dir = %s + random_VAR = foo + [filter:tempauth] + use = egg:swift#tempauth + random_var = bar + """ % tempdir, + 'proxy': """ + [app:proxy-server] + use = egg:swift#proxy + client_timeout = 2 + CLIENT_TIMEOUT = 1 + conn_timeout = 3 + conn_timeout = 4 + """, + } + _fake_rings(tempdir) + for filename, conf_body in conf_files.items(): + path = os.path.join(tempdir, filename + '.conf') + with open(path, 'wt') as fd: + fd.write(dedent(conf_body)) + app_config = lambda: wsgi.loadapp(tempdir) + # N.B. our paste conf.d parsing re-uses readconf, + # so, CLIENT_TIMEOUT/client_timeout are unique options + self.assertDuplicateOption(app_config, 'conn_timeout', 4.0) @with_tempdir def test_load_app_config(self, tempdir): @@ -896,6 +1000,7 @@ class TestWSGI(unittest.TestCase): def _loadapp(uri, name=None, **kwargs): calls['_loadapp'] += 1 + logging.logThreads = 1 # reset to default with mock.patch.object(wsgi, '_initrp', _initrp), \ mock.patch.object(wsgi, 'get_socket'), \ mock.patch.object(wsgi, 'drop_privileges') as _d_privs, \ @@ -916,6 +1021,7 @@ class TestWSGI(unittest.TestCase): # just clean_up_deemon_hygene() self.assertEqual([], _d_privs.mock_calls) self.assertEqual([mock.call()], _c_hyg.mock_calls) + self.assertEqual(0, logging.logThreads) # fixed in our monkey_patch @mock.patch('swift.common.wsgi.run_server') @mock.patch('swift.common.wsgi.WorkersStrategy') diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 8d3a484b7..327d860e5 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -2810,7 +2810,7 @@ class TestECDiskFileManager(DiskFileManagerMixin, BaseTestCase): def test_cleanup_ondisk_files_commit_window(self): # verify that non-durable files are not reclaimed regardless of # timestamp if written to disk within commit_window - much_older = Timestamp(time() - 1001).internal + much_older = Timestamp(time() - 2000).internal older = Timestamp(time() - 1001).internal newer = Timestamp(time() - 900).internal scenarios = [ diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 73d61c6ef..c5004bc12 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -499,7 +499,7 @@ class TestFuncs(BaseTest): expected) self.assertEqual(get_cache_key("account", "cont", shard="listing"), - 'shard-listing/account/cont') + 'shard-listing-v2/account/cont') self.assertEqual(get_cache_key("account", "cont", shard="updating"), 'shard-updating-v2/account/cont') self.assertRaises(ValueError, @@ -1155,17 +1155,74 @@ class TestFuncs(BaseTest): base = Controller(self.app) src_headers = {'x-remove-base-meta-owner': 'x', 'x-base-meta-size': '151M', + 'x-base-sysmeta-mysysmeta': 'myvalue', + 'x-Backend-No-Timestamp-Update': 'true', + 'X-Backend-Storage-Policy-Index': '3', + 'x-backendoftheworld': 'ignored', 'new-owner': 'Kun'} req = Request.blank('/v1/a/c/o', headers=src_headers) + dst_headers = base.generate_request_headers(req) + expected_headers = {'x-backend-no-timestamp-update': 'true', + 'x-backend-storage-policy-index': '3', + 'x-timestamp': mock.ANY, + 'x-trans-id': '-', + 'Referer': 'GET http://localhost/v1/a/c/o', + 'connection': 'close', + 'user-agent': 'proxy-server %d' % os.getpid()} + for k, v in expected_headers.items(): + self.assertIn(k, dst_headers) + self.assertEqual(v, dst_headers[k]) + for k, v in expected_headers.items(): + dst_headers.pop(k) + self.assertFalse(dst_headers) + + # with transfer=True + req = Request.blank('/v1/a/c/o', headers=src_headers) dst_headers = base.generate_request_headers(req, transfer=True) - expected_headers = {'x-base-meta-owner': '', - 'x-base-meta-size': '151M', + expected_headers.update({'x-base-meta-owner': '', + 'x-base-meta-size': '151M', + 'x-base-sysmeta-mysysmeta': 'myvalue'}) + for k, v in expected_headers.items(): + self.assertIn(k, dst_headers) + self.assertEqual(v, dst_headers[k]) + for k, v in expected_headers.items(): + dst_headers.pop(k) + self.assertFalse(dst_headers) + + # with additional + req = Request.blank('/v1/a/c/o', headers=src_headers) + dst_headers = base.generate_request_headers( + req, transfer=True, + additional=src_headers) + expected_headers.update({'x-remove-base-meta-owner': 'x', + 'x-backendoftheworld': 'ignored', + 'new-owner': 'Kun'}) + for k, v in expected_headers.items(): + self.assertIn(k, dst_headers) + self.assertEqual(v, dst_headers[k]) + for k, v in expected_headers.items(): + dst_headers.pop(k) + self.assertFalse(dst_headers) + + # with additional, verify precedence + req = Request.blank('/v1/a/c/o', headers=src_headers) + dst_headers = base.generate_request_headers( + req, transfer=False, + additional={'X-Backend-Storage-Policy-Index': '2', + 'X-Timestamp': '1234.56789'}) + expected_headers = {'x-backend-no-timestamp-update': 'true', + 'x-backend-storage-policy-index': '2', + 'x-timestamp': '1234.56789', + 'x-trans-id': '-', + 'Referer': 'GET http://localhost/v1/a/c/o', 'connection': 'close', 'user-agent': 'proxy-server %d' % os.getpid()} for k, v in expected_headers.items(): self.assertIn(k, dst_headers) self.assertEqual(v, dst_headers[k]) - self.assertNotIn('new-owner', dst_headers) + for k, v in expected_headers.items(): + dst_headers.pop(k) + self.assertFalse(dst_headers) def test_generate_request_headers_change_backend_user_agent(self): base = Controller(self.app) @@ -1205,7 +1262,8 @@ class TestFuncs(BaseTest): 'x-base-meta-size': '151M', 'new-owner': 'Kun'} dst_headers = base.generate_request_headers(None, - additional=src_headers) + additional=src_headers, + transfer=True) expected_headers = {'x-base-meta-size': '151M', 'connection': 'close'} for k, v in expected_headers.items(): diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py index c010c7227..d8b136757 100644 --- a/test/unit/proxy/controllers/test_container.py +++ b/test/unit/proxy/controllers/test_container.py @@ -24,7 +24,8 @@ from six.moves import urllib from swift.common.constraints import CONTAINER_LISTING_LIMIT from swift.common.swob import Request, bytes_to_wsgi, str_to_wsgi, wsgi_quote -from swift.common.utils import ShardRange, Timestamp +from swift.common.utils import ShardRange, Timestamp, Namespace, \ + NamespaceBoundList from swift.proxy import server as proxy_server from swift.proxy.controllers.base import headers_to_container_info, \ Controller, get_container_info, get_cache_key @@ -1970,6 +1971,7 @@ class TestContainerController(TestRingBase): (200, sr_objs[2], shard_resp_hdrs[2]) ] # NB marker always advances to last object name + # NB end_markers are upper of the current available shard range expected_requests = [ # path, headers, params ('a/c', {'X-Backend-Record-Type': 'auto'}, @@ -1991,7 +1993,7 @@ class TestContainerController(TestRingBase): self.check_response(resp, root_resp_hdrs, exp_sharding_state='sharding') self.assertIn('swift.cache', resp.request.environ) - self.assertNotIn('shard-listing/a/c', + self.assertNotIn('shard-listing-v2/a/c', resp.request.environ['swift.cache'].store) def test_GET_sharded_container_gap_in_shards_memcache(self): @@ -2035,15 +2037,17 @@ class TestContainerController(TestRingBase): (200, sr_objs[2], shard_resp_hdrs[2]) ] # NB marker always advances to last object name + # NB compaction of shard range data to cached bounds loses the gaps, so + # end_markers are lower of the next available shard range expected_requests = [ # path, headers, params ('a/c', {'X-Backend-Record-Type': 'auto'}, dict(states='listing')), # 200 (shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'}, - dict(marker='', end_marker='ham\x00', states='listing', + dict(marker='', end_marker='onion\x00', states='listing', limit=str(limit))), # 200 (shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'}, - dict(marker='h', end_marker='pie\x00', states='listing', + dict(marker='h', end_marker='rhubarb\x00', states='listing', limit=str(limit - len(sr_objs[0])))), # 200 (shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'}, dict(marker='p', end_marker='', states='listing', @@ -2055,11 +2059,14 @@ class TestContainerController(TestRingBase): # root object count will be overridden by actual length of listing self.check_response(resp, root_resp_hdrs) self.assertIn('swift.cache', resp.request.environ) - self.assertIn('shard-listing/a/c', + self.assertIn('shard-listing-v2/a/c', resp.request.environ['swift.cache'].store) + # NB compact bounds in cache do not reveal the gap in shard ranges self.assertEqual( - sr_dicts, - resp.request.environ['swift.cache'].store['shard-listing/a/c']) + [['', '.shards_a/c_ham'], + ['onion', '.shards_a/c_pie'], + ['rhubarb', '.shards_a/c_']], + resp.request.environ['swift.cache'].store['shard-listing-v2/a/c']) def test_GET_sharded_container_empty_shard(self): # verify ordered listing when a shard is empty @@ -2699,10 +2706,14 @@ class TestContainerController(TestRingBase): def _setup_shard_range_stubs(self): self.memcache = FakeMemcache() shard_bounds = (('', 'ham'), ('ham', 'pie'), ('pie', '')) - shard_ranges = [ - ShardRange('.shards_a/c_%s' % upper, Timestamp.now(), lower, upper) - for lower, upper in shard_bounds] - self.sr_dicts = [dict(sr) for sr in shard_ranges] + self.ns_dicts = [{'name': '.shards_a/c_%s' % upper, + 'lower': lower, + 'upper': upper} + for lower, upper in shard_bounds] + self.namespaces = [Namespace(**ns) for ns in self.ns_dicts] + self.ns_bound_list = NamespaceBoundList.parse(self.namespaces) + self.sr_dicts = [dict(ShardRange(timestamp=Timestamp.now(), **ns)) + for ns in self.ns_dicts] self._stub_shards_dump = json.dumps(self.sr_dicts).encode('ascii') self.root_resp_hdrs = { 'Accept-Ranges': 'bytes', @@ -2737,22 +2748,24 @@ class TestContainerController(TestRingBase): req, backend_req, extra_hdrs={'X-Backend-Record-Type': record_type, 'X-Backend-Override-Shard-Name-Filter': 'sharded'}) - self._check_response(resp, self.sr_dicts, { + self._check_response(resp, self.ns_dicts, { 'X-Backend-Recheck-Container-Existence': '60', 'X-Backend-Record-Type': 'shard', 'X-Backend-Sharding-State': sharding_state}) + + cache_key = 'shard-listing-v2/a/c' self.assertEqual( [mock.call.get('container/a/c'), - mock.call.set('shard-listing/a/c', self.sr_dicts, + mock.call.set(cache_key, self.ns_bound_list.bounds, time=exp_recheck_listing), mock.call.set('container/a/c', mock.ANY, time=60)], self.memcache.calls) self.assertEqual(sharding_state, self.memcache.calls[2][1][1]['sharding_state']) self.assertIn('swift.infocache', req.environ) - self.assertIn('shard-listing/a/c', req.environ['swift.infocache']) - self.assertEqual(tuple(self.sr_dicts), - req.environ['swift.infocache']['shard-listing/a/c']) + self.assertIn(cache_key, req.environ['swift.infocache']) + self.assertEqual(self.ns_bound_list, + req.environ['swift.infocache'][cache_key]) self.assertEqual( [x[0][0] for x in self.logger.logger.log_dict['increment']], ['container.info.cache.miss', @@ -2760,7 +2773,7 @@ class TestContainerController(TestRingBase): # container is sharded and proxy has that state cached, but # no shard ranges cached; expect a cache miss and write-back - self.memcache.delete('shard-listing/a/c') + self.memcache.delete(cache_key) self.memcache.clear_calls() self.logger.clear() req = self._build_request({'X-Backend-Record-Type': record_type}, @@ -2774,23 +2787,23 @@ class TestContainerController(TestRingBase): req, backend_req, extra_hdrs={'X-Backend-Record-Type': record_type, 'X-Backend-Override-Shard-Name-Filter': 'sharded'}) - self._check_response(resp, self.sr_dicts, { + self._check_response(resp, self.ns_dicts, { 'X-Backend-Recheck-Container-Existence': '60', 'X-Backend-Record-Type': 'shard', 'X-Backend-Sharding-State': sharding_state}) self.assertEqual( [mock.call.get('container/a/c'), - mock.call.get('shard-listing/a/c', raise_on_error=True), - mock.call.set('shard-listing/a/c', self.sr_dicts, + mock.call.get(cache_key, raise_on_error=True), + mock.call.set(cache_key, self.ns_bound_list.bounds, time=exp_recheck_listing), # Since there was a backend request, we go ahead and cache # container info, too mock.call.set('container/a/c', mock.ANY, time=60)], self.memcache.calls) self.assertIn('swift.infocache', req.environ) - self.assertIn('shard-listing/a/c', req.environ['swift.infocache']) - self.assertEqual(tuple(self.sr_dicts), - req.environ['swift.infocache']['shard-listing/a/c']) + self.assertIn(cache_key, req.environ['swift.infocache']) + self.assertEqual(self.ns_bound_list, + req.environ['swift.infocache'][cache_key]) self.assertEqual( [x[0][0] for x in self.logger.logger.log_dict['increment']], ['container.info.cache.hit', @@ -2803,18 +2816,18 @@ class TestContainerController(TestRingBase): req = self._build_request({'X-Backend-Record-Type': record_type}, {'states': 'listing'}, {}) resp = req.get_response(self.app) - self._check_response(resp, self.sr_dicts, { + self._check_response(resp, self.ns_dicts, { 'X-Backend-Cached-Results': 'true', 'X-Backend-Record-Type': 'shard', 'X-Backend-Sharding-State': sharding_state}) self.assertEqual( [mock.call.get('container/a/c'), - mock.call.get('shard-listing/a/c', raise_on_error=True)], + mock.call.get(cache_key, raise_on_error=True)], self.memcache.calls) self.assertIn('swift.infocache', req.environ) - self.assertIn('shard-listing/a/c', req.environ['swift.infocache']) - self.assertEqual(tuple(self.sr_dicts), - req.environ['swift.infocache']['shard-listing/a/c']) + self.assertIn(cache_key, req.environ['swift.infocache']) + self.assertEqual(self.ns_bound_list, + req.environ['swift.infocache'][cache_key]) self.assertEqual( [x[0][0] for x in self.logger.logger.log_dict['increment']], ['container.info.cache.hit', @@ -2836,22 +2849,22 @@ class TestContainerController(TestRingBase): req, backend_req, extra_hdrs={'X-Backend-Record-Type': record_type, 'X-Backend-Override-Shard-Name-Filter': 'sharded'}) - self._check_response(resp, self.sr_dicts, { + self._check_response(resp, self.ns_dicts, { 'X-Backend-Recheck-Container-Existence': '60', 'X-Backend-Record-Type': 'shard', 'X-Backend-Sharding-State': sharding_state}) self.assertEqual( [mock.call.get('container/a/c'), - mock.call.set('shard-listing/a/c', self.sr_dicts, + mock.call.set(cache_key, self.ns_bound_list.bounds, time=exp_recheck_listing), # Since there was a backend request, we go ahead and cache # container info, too mock.call.set('container/a/c', mock.ANY, time=60)], self.memcache.calls) self.assertIn('swift.infocache', req.environ) - self.assertIn('shard-listing/a/c', req.environ['swift.infocache']) - self.assertEqual(tuple(self.sr_dicts), - req.environ['swift.infocache']['shard-listing/a/c']) + self.assertIn(cache_key, req.environ['swift.infocache']) + self.assertEqual(self.ns_bound_list, + req.environ['swift.infocache'][cache_key]) self.assertEqual( [x[0][0] for x in self.logger.logger.log_dict['increment']], ['container.info.cache.hit', @@ -2864,18 +2877,18 @@ class TestContainerController(TestRingBase): {'states': 'listing'}, {}) with mock.patch('random.random', return_value=0.11): resp = req.get_response(self.app) - self._check_response(resp, self.sr_dicts, { + self._check_response(resp, self.ns_dicts, { 'X-Backend-Cached-Results': 'true', 'X-Backend-Record-Type': 'shard', 'X-Backend-Sharding-State': sharding_state}) self.assertEqual( [mock.call.get('container/a/c'), - mock.call.get('shard-listing/a/c', raise_on_error=True)], + mock.call.get(cache_key, raise_on_error=True)], self.memcache.calls) self.assertIn('swift.infocache', req.environ) - self.assertIn('shard-listing/a/c', req.environ['swift.infocache']) - self.assertEqual(tuple(self.sr_dicts), - req.environ['swift.infocache']['shard-listing/a/c']) + self.assertIn(cache_key, req.environ['swift.infocache']) + self.assertEqual(self.ns_bound_list, + req.environ['swift.infocache'][cache_key]) self.assertEqual( [x[0][0] for x in self.logger.logger.log_dict['increment']], ['container.info.cache.hit', @@ -2890,15 +2903,15 @@ class TestContainerController(TestRingBase): infocache=req.environ['swift.infocache']) with mock.patch('random.random', return_value=0.11): resp = req.get_response(self.app) - self._check_response(resp, self.sr_dicts, { + self._check_response(resp, self.ns_dicts, { 'X-Backend-Cached-Results': 'true', 'X-Backend-Record-Type': 'shard', 'X-Backend-Sharding-State': sharding_state}) self.assertEqual([], self.memcache.calls) self.assertIn('swift.infocache', req.environ) - self.assertIn('shard-listing/a/c', req.environ['swift.infocache']) - self.assertEqual(tuple(self.sr_dicts), - req.environ['swift.infocache']['shard-listing/a/c']) + self.assertIn(cache_key, req.environ['swift.infocache']) + self.assertEqual(self.ns_bound_list, + req.environ['swift.infocache'][cache_key]) self.assertEqual( [x[0][0] for x in self.logger.logger.log_dict['increment']], ['container.shard_listing.infocache.hit']) @@ -2916,7 +2929,7 @@ class TestContainerController(TestRingBase): num_resp=self.CONTAINER_REPLICAS) self.assertEqual( [mock.call.delete('container/a/c'), - mock.call.delete('shard-listing/a/c')], + mock.call.delete(cache_key)], self.memcache.calls) def test_get_from_shards_add_root_spi(self): @@ -3046,7 +3059,7 @@ class TestContainerController(TestRingBase): # deleted from cache self.assertEqual( [mock.call.get('container/a/c'), - mock.call.get('shard-listing/a/c', raise_on_error=True), + mock.call.get('shard-listing-v2/a/c', raise_on_error=True), mock.call.set('container/a/c', mock.ANY, time=6.0)], self.memcache.calls) self.assertEqual(404, self.memcache.calls[2][1][1]['status']) @@ -3079,7 +3092,7 @@ class TestContainerController(TestRingBase): self.assertNotIn('X-Backend-Cached-Results', resp.headers) self.assertEqual( [mock.call.get('container/a/c'), - mock.call.get('shard-listing/a/c', raise_on_error=True), + mock.call.get('shard-listing-v2/a/c', raise_on_error=True), mock.call.set('container/a/c', mock.ANY, time=6.0)], self.memcache.calls) self.assertEqual(404, self.memcache.calls[2][1][1]['status']) @@ -3098,7 +3111,7 @@ class TestContainerController(TestRingBase): info['status'] = 200 info['sharding_state'] = 'sharded' self.memcache.set('container/a/c', info) - self.memcache.set('shard-listing/a/c', self.sr_dicts) + self.memcache.set('shard-listing-v2/a/c', self.ns_bound_list.bounds) self.memcache.clear_calls() req_hdrs = {'X-Backend-Record-Type': record_type} @@ -3106,7 +3119,7 @@ class TestContainerController(TestRingBase): resp = req.get_response(self.app) self.assertEqual( [mock.call.get('container/a/c'), - mock.call.get('shard-listing/a/c', raise_on_error=True)], + mock.call.get('shard-listing-v2/a/c', raise_on_error=True)], self.memcache.calls) self.assertEqual({'container.info.cache.hit': 1, 'container.shard_listing.cache.hit': 1}, @@ -3122,26 +3135,26 @@ class TestContainerController(TestRingBase): resp = self._do_test_GET_shard_ranges_read_from_cache( {'states': 'listing'}, 'shard') - self._check_response(resp, self.sr_dicts, exp_hdrs) + self._check_response(resp, self.ns_dicts, exp_hdrs) resp = self._do_test_GET_shard_ranges_read_from_cache( {'states': 'listing', 'reverse': 'true'}, 'shard') - exp_shards = list(self.sr_dicts) + exp_shards = list(self.ns_dicts) exp_shards.reverse() self._check_response(resp, exp_shards, exp_hdrs) resp = self._do_test_GET_shard_ranges_read_from_cache( {'states': 'listing', 'marker': 'jam'}, 'shard') - self._check_response(resp, self.sr_dicts[1:], exp_hdrs) + self._check_response(resp, self.ns_dicts[1:], exp_hdrs) resp = self._do_test_GET_shard_ranges_read_from_cache( {'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'}, 'shard') - self._check_response(resp, self.sr_dicts[1:2], exp_hdrs) + self._check_response(resp, self.ns_dicts[1:2], exp_hdrs) resp = self._do_test_GET_shard_ranges_read_from_cache( {'states': 'listing', 'includes': 'egg'}, 'shard') - self._check_response(resp, self.sr_dicts[:1], exp_hdrs) + self._check_response(resp, self.ns_dicts[:1], exp_hdrs) # override _get_from_shards so that the response contains the shard # listing that we want to verify even though the record_type is 'auto' @@ -3153,22 +3166,22 @@ class TestContainerController(TestRingBase): mock_get_from_shards): resp = self._do_test_GET_shard_ranges_read_from_cache( {'states': 'listing', 'reverse': 'true'}, 'auto') - exp_shards = list(self.sr_dicts) + exp_shards = list(self.ns_dicts) exp_shards.reverse() self._check_response(resp, exp_shards, exp_hdrs) resp = self._do_test_GET_shard_ranges_read_from_cache( {'states': 'listing', 'marker': 'jam'}, 'auto') - self._check_response(resp, self.sr_dicts[1:], exp_hdrs) + self._check_response(resp, self.ns_dicts[1:], exp_hdrs) resp = self._do_test_GET_shard_ranges_read_from_cache( {'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'}, 'auto') - self._check_response(resp, self.sr_dicts[1:2], exp_hdrs) + self._check_response(resp, self.ns_dicts[1:2], exp_hdrs) resp = self._do_test_GET_shard_ranges_read_from_cache( {'states': 'listing', 'includes': 'egg'}, 'auto') - self._check_response(resp, self.sr_dicts[:1], exp_hdrs) + self._check_response(resp, self.ns_dicts[:1], exp_hdrs) def _do_test_GET_shard_ranges_write_to_cache(self, params, record_type): # verify that shard range listing are written to cache when appropriate @@ -3193,7 +3206,8 @@ class TestContainerController(TestRingBase): expected_hdrs.update(resp_hdrs) self.assertEqual( [mock.call.get('container/a/c'), - mock.call.set('shard-listing/a/c', self.sr_dicts, time=600), + mock.call.set( + 'shard-listing-v2/a/c', self.ns_bound_list.bounds, time=600), mock.call.set('container/a/c', mock.ANY, time=60)], self.memcache.calls) # shards were cached @@ -3213,26 +3227,26 @@ class TestContainerController(TestRingBase): resp = self._do_test_GET_shard_ranges_write_to_cache( {'states': 'listing'}, 'shard') - self._check_response(resp, self.sr_dicts, exp_hdrs) + self._check_response(resp, self.ns_dicts, exp_hdrs) resp = self._do_test_GET_shard_ranges_write_to_cache( {'states': 'listing', 'reverse': 'true'}, 'shard') - exp_shards = list(self.sr_dicts) + exp_shards = list(self.ns_dicts) exp_shards.reverse() self._check_response(resp, exp_shards, exp_hdrs) resp = self._do_test_GET_shard_ranges_write_to_cache( {'states': 'listing', 'marker': 'jam'}, 'shard') - self._check_response(resp, self.sr_dicts[1:], exp_hdrs) + self._check_response(resp, self.ns_dicts[1:], exp_hdrs) resp = self._do_test_GET_shard_ranges_write_to_cache( {'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'}, 'shard') - self._check_response(resp, self.sr_dicts[1:2], exp_hdrs) + self._check_response(resp, self.ns_dicts[1:2], exp_hdrs) resp = self._do_test_GET_shard_ranges_write_to_cache( {'states': 'listing', 'includes': 'egg'}, 'shard') - self._check_response(resp, self.sr_dicts[:1], exp_hdrs) + self._check_response(resp, self.ns_dicts[:1], exp_hdrs) # override _get_from_shards so that the response contains the shard # listing that we want to verify even though the record_type is 'auto' @@ -3244,22 +3258,22 @@ class TestContainerController(TestRingBase): mock_get_from_shards): resp = self._do_test_GET_shard_ranges_write_to_cache( {'states': 'listing', 'reverse': 'true'}, 'auto') - exp_shards = list(self.sr_dicts) + exp_shards = list(self.ns_dicts) exp_shards.reverse() self._check_response(resp, exp_shards, exp_hdrs) resp = self._do_test_GET_shard_ranges_write_to_cache( {'states': 'listing', 'marker': 'jam'}, 'auto') - self._check_response(resp, self.sr_dicts[1:], exp_hdrs) + self._check_response(resp, self.ns_dicts[1:], exp_hdrs) resp = self._do_test_GET_shard_ranges_write_to_cache( {'states': 'listing', 'marker': 'jam', 'end_marker': 'kale'}, 'auto') - self._check_response(resp, self.sr_dicts[1:2], exp_hdrs) + self._check_response(resp, self.ns_dicts[1:2], exp_hdrs) resp = self._do_test_GET_shard_ranges_write_to_cache( {'states': 'listing', 'includes': 'egg'}, 'auto') - self._check_response(resp, self.sr_dicts[:1], exp_hdrs) + self._check_response(resp, self.ns_dicts[:1], exp_hdrs) def test_GET_shard_ranges_write_to_cache_with_x_newest(self): # when x-newest is sent, verify that there is no cache lookup to check @@ -3285,10 +3299,11 @@ class TestContainerController(TestRingBase): 'X-Backend-Override-Shard-Name-Filter': 'sharded'}) expected_hdrs = {'X-Backend-Recheck-Container-Existence': '60'} expected_hdrs.update(resp_hdrs) - self._check_response(resp, self.sr_dicts, expected_hdrs) + self._check_response(resp, self.ns_dicts, expected_hdrs) self.assertEqual( [mock.call.get('container/a/c'), - mock.call.set('shard-listing/a/c', self.sr_dicts, time=600), + mock.call.set( + 'shard-listing-v2/a/c', self.ns_bound_list.bounds, time=600), mock.call.set('container/a/c', mock.ANY, time=60)], self.memcache.calls) self.assertEqual('sharded', diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index bf32a059a..b268e008e 100644 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -39,8 +39,9 @@ else: import swift from swift.common import utils, swob, exceptions -from swift.common.exceptions import ChunkWriteTimeout -from swift.common.utils import Timestamp, list_from_csv, md5 +from swift.common.exceptions import ChunkWriteTimeout, ShortReadError, \ + ChunkReadTimeout +from swift.common.utils import Timestamp, list_from_csv, md5, FileLikeIter from swift.proxy import server as proxy_server from swift.proxy.controllers import obj from swift.proxy.controllers.base import \ @@ -4926,7 +4927,7 @@ class TestECObjController(ECObjectControllerMixin, unittest.TestCase): for line in error_lines[:nparity]: self.assertIn('retrying', line) for line in error_lines[nparity:]: - self.assertIn('ChunkReadTimeout (0.01s)', line) + self.assertIn('ChunkReadTimeout (0.01s', line) for line in self.logger.logger.records['ERROR']: self.assertIn(req.headers['x-trans-id'], line) @@ -4959,8 +4960,9 @@ class TestECObjController(ECObjectControllerMixin, unittest.TestCase): resp_body += b''.join(resp.app_iter) # we log errors log_lines = self.app.logger.get_lines_for_level('error') + self.assertTrue(log_lines) for line in log_lines: - self.assertIn('ChunkWriteTimeout fetching fragments', line) + self.assertIn('ChunkWriteTimeout feeding fragments', line) # client gets a short read self.assertEqual(16051, len(test_data)) self.assertEqual(8192, len(resp_body)) @@ -5010,7 +5012,7 @@ class TestECObjController(ECObjectControllerMixin, unittest.TestCase): error_lines = self.logger.get_lines_for_level('error') self.assertEqual(ndata, len(error_lines)) for line in error_lines: - self.assertIn('ChunkReadTimeout (0.01s)', line) + self.assertIn('ChunkReadTimeout (0.01s', line) for line in self.logger.logger.records['ERROR']: self.assertIn(req.headers['x-trans-id'], line) @@ -6675,5 +6677,54 @@ class TestNumContainerUpdates(unittest.TestCase): c_replica, o_replica, o_quorum)) +@patch_policies(with_ec_default=True) +class TestECFragGetter(BaseObjectControllerMixin, unittest.TestCase): + def setUp(self): + super(TestECFragGetter, self).setUp() + req = Request.blank(path='/a/c/o') + self.getter = obj.ECFragGetter( + self.app, req, None, None, self.policy, 'a/c/o', + {}, None, self.logger.thread_locals, + self.logger) + + def test_iter_bytes_from_response_part(self): + part = FileLikeIter([b'some', b'thing']) + it = self.getter.iter_bytes_from_response_part(part, nbytes=None) + self.assertEqual(b'something', b''.join(it)) + + def test_iter_bytes_from_response_part_insufficient_bytes(self): + part = FileLikeIter([b'some', b'thing']) + it = self.getter.iter_bytes_from_response_part(part, nbytes=100) + with mock.patch.object(self.getter, '_dig_for_source_and_node', + return_value=(None, None)): + with self.assertRaises(ShortReadError) as cm: + b''.join(it) + self.assertEqual('Too few bytes; read 9, expecting 100', + str(cm.exception)) + + def test_iter_bytes_from_response_part_read_timeout(self): + part = FileLikeIter([b'some', b'thing']) + self.app.recoverable_node_timeout = 0.05 + self.app.client_timeout = 0.8 + it = self.getter.iter_bytes_from_response_part(part, nbytes=9) + with mock.patch.object(self.getter, '_dig_for_source_and_node', + return_value=(None, None)): + with mock.patch.object(part, 'read', + side_effect=[b'some', ChunkReadTimeout(9)]): + with self.assertRaises(ChunkReadTimeout) as cm: + b''.join(it) + self.assertEqual('9 seconds', str(cm.exception)) + + def test_iter_bytes_from_response_part_small_fragment_size(self): + self.getter.fragment_size = 4 + part = FileLikeIter([b'some', b'thing', b'']) + it = self.getter.iter_bytes_from_response_part(part, nbytes=None) + self.assertEqual([b'some', b'thin', b'g'], [ch for ch in it]) + self.getter.fragment_size = 1 + part = FileLikeIter([b'some', b'thing', b'']) + it = self.getter.iter_bytes_from_response_part(part, nbytes=None) + self.assertEqual([c.encode() for c in 'something'], [ch for ch in it]) + + if __name__ == '__main__': unittest.main() |