summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml9
-rw-r--r--designate/central/service.py8
-rw-r--r--designate/dnsutils.py1
-rw-r--r--designate/exceptions.py1
-rw-r--r--designate/mdns/service.py3
-rw-r--r--designate/rpc.py68
-rw-r--r--designate/sink/service.py4
-rw-r--r--designate/storage/impl_sqlalchemy/migrate_repo/versions/101_support_naptr_records.py15
-rw-r--r--designate/storage/impl_sqlalchemy/migrate_repo/versions/102_support_caa_records.py15
-rw-r--r--designate/tests/test_pool_manager/test_service.py9
-rw-r--r--designate/tests/test_producer/test_tasks.py116
-rw-r--r--designate/tests/unit/test_central/test_basic.py181
-rw-r--r--designate/tests/unit/test_mdns/test_service.py79
-rw-r--r--designate/worker/__init__.py6
-rw-r--r--devstack/designate_plugins/backend-powerdns2
-rwxr-xr-xdevstack/plugin.sh16
-rw-r--r--devstack/settings13
-rw-r--r--releasenotes/notes/CAA_NAPTR_records-5e2e466a5bc18a31.yaml6
-rw-r--r--releasenotes/notes/pool-manager-disabled-ff8582b5f86d2360.yaml6
19 files changed, 320 insertions, 238 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index cf76c575..25d12cd2 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -41,7 +41,7 @@
designate-zone-manager: true
- job:
- name: designate-bind9-py35
+ name: designate-bind9-py36
parent: designate-bind9
vars:
devstack_localrc:
@@ -54,11 +54,6 @@
c-bak: false
- job:
- name: designate-bind9-py36
- parent: designate-bind9-py35
- nodeset: openstack-bionic-node
-
-- job:
name: designate-devstack-base
parent: legacy-dsvm-base
vars:
@@ -158,7 +153,6 @@
jobs:
- designate-bind9
- designate-bind9-manager-model
- - designate-bind9-py35
- designate-bind9-py36
- designate-devstack-pdns4
- designate-devstack-pdns4-postgres
@@ -168,7 +162,6 @@
jobs:
- designate-bind9
- designate-bind9-manager-model
- - designate-bind9-py35
- designate-bind9-py36
- designate-devstack-pdns4
- designate-devstack-pdns4-postgres
diff --git a/designate/central/service.py b/designate/central/service.py
index 3407a1c0..ffd12c65 100644
--- a/designate/central/service.py
+++ b/designate/central/service.py
@@ -1094,7 +1094,9 @@ class Service(service.RPCService, service.Service):
# Prevent deletion of a zone which has child zones
criterion = {'parent_zone_id': zone_id}
- if self.storage.count_zones(context, criterion) > 0:
+ # Look for child zones across all tenants with elevated context
+ if self.storage.count_zones(context.elevated(all_tenants=True),
+ criterion) > 0:
raise exceptions.ZoneHasSubZone('Please delete any subzones '
'before deleting this zone')
@@ -1937,7 +1939,9 @@ class Service(service.RPCService, service.Service):
data = self.network_api.list_floatingips(context, region=region)
return self._list_to_dict(data, keys=['region', 'id'])
- def _list_to_dict(self, data, keys=['id']):
+ def _list_to_dict(self, data, keys=None):
+ if keys is None:
+ keys = ['id']
new = {}
for i in data:
key = tuple([i[key] for key in keys])
diff --git a/designate/dnsutils.py b/designate/dnsutils.py
index b0342a6b..9b00ed57 100644
--- a/designate/dnsutils.py
+++ b/designate/dnsutils.py
@@ -148,7 +148,6 @@ class TsigInfoMiddleware(DNSMiddleware):
def __init__(self, application, storage):
super(TsigInfoMiddleware, self).__init__(application)
-
self.storage = storage
def process_request(self, request):
diff --git a/designate/exceptions.py b/designate/exceptions.py
index da7fd5ec..c5a91a19 100644
--- a/designate/exceptions.py
+++ b/designate/exceptions.py
@@ -21,6 +21,7 @@ class Base(Exception):
error_type = None
error_message = None
errors = None
+ expected = False
def __init__(self, *args, **kwargs):
self.errors = kwargs.pop('errors', None)
diff --git a/designate/mdns/service.py b/designate/mdns/service.py
index b7332582..42bc7157 100644
--- a/designate/mdns/service.py
+++ b/designate/mdns/service.py
@@ -58,6 +58,7 @@ class Service(service.DNSService, service.RPCService, service.Service):
application = handler.RequestHandler(self.storage, self.tg)
application = dnsutils.TsigInfoMiddleware(application, self.storage)
application = dnsutils.SerializationMiddleware(
- application, dnsutils.TsigKeyring(self.storage))
+ application, dnsutils.TsigKeyring(self.storage)
+ )
return application
diff --git a/designate/rpc.py b/designate/rpc.py
index 8702f442..c531661f 100644
--- a/designate/rpc.py
+++ b/designate/rpc.py
@@ -25,18 +25,16 @@ __all__ = [
'get_notifier',
]
-
from oslo_config import cfg
import oslo_messaging as messaging
-from oslo_messaging.rpc import server as rpc_server
from oslo_messaging.rpc import dispatcher as rpc_dispatcher
+from oslo_messaging.rpc import server as rpc_server
from oslo_serialization import jsonutils
import designate.context
import designate.exceptions
from designate import objects
-
CONF = cfg.CONF
TRANSPORT = None
NOTIFIER = None
@@ -74,11 +72,15 @@ def initialized():
def cleanup():
global TRANSPORT, NOTIFIER, NOTIFICATION_TRANSPORT
- assert TRANSPORT is not None
- assert NOTIFICATION_TRANSPORT is not None
- assert NOTIFIER is not None
+ if TRANSPORT is None:
+ raise AssertionError("'TRANSPORT' must not be None")
+ if NOTIFICATION_TRANSPORT is None:
+ raise AssertionError("'NOTIFICATION_TRANSPORT' must not be None")
+ if NOTIFIER is None:
+ raise AssertionError("'NOTIFIER' must not be None")
TRANSPORT.cleanup()
- TRANSPORT = NOTIFIER = None
+ NOTIFICATION_TRANSPORT.cleanup()
+ TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None
def set_defaults(control_exchange):
@@ -161,15 +163,13 @@ class RequestContextSerializer(messaging.Serializer):
class RPCDispatcher(rpc_dispatcher.RPCDispatcher):
-
def dispatch(self, *args, **kwds):
try:
return super(RPCDispatcher, self).dispatch(*args, **kwds)
- except Exception as e:
- if getattr(e, 'expected', False):
+ except designate.exceptions.Base as e:
+ if e.expected:
raise rpc_dispatcher.ExpectedException()
- else:
- raise
+ raise
def get_transport_url(url_str=None):
@@ -177,41 +177,53 @@ def get_transport_url(url_str=None):
def get_client(target, version_cap=None, serializer=None):
- assert TRANSPORT is not None
+ if TRANSPORT is None:
+ raise AssertionError("'TRANSPORT' must not be None")
if serializer is None:
serializer = DesignateObjectSerializer()
serializer = RequestContextSerializer(serializer)
- return messaging.RPCClient(TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer)
+ return messaging.RPCClient(
+ TRANSPORT,
+ target,
+ version_cap=version_cap,
+ serializer=serializer
+ )
def get_server(target, endpoints, serializer=None):
- assert TRANSPORT is not None
+ if TRANSPORT is None:
+ raise AssertionError("'TRANSPORT' must not be None")
if serializer is None:
serializer = DesignateObjectSerializer()
serializer = RequestContextSerializer(serializer)
access_policy = rpc_dispatcher.DefaultRPCAccessPolicy
dispatcher = RPCDispatcher(endpoints, serializer, access_policy)
return rpc_server.RPCServer(
- TRANSPORT, target, dispatcher, 'eventlet')
+ TRANSPORT,
+ target,
+ dispatcher=dispatcher,
+ executor='eventlet',
+ )
-def get_listener(targets, endpoints, serializer=None, pool=None):
- assert TRANSPORT is not None
+def get_notification_listener(targets, endpoints, serializer=None, pool=None):
+ if NOTIFICATION_TRANSPORT is None:
+ raise AssertionError("'NOTIFICATION_TRANSPORT' must not be None")
if serializer is None:
serializer = JsonPayloadSerializer()
- return messaging.get_notification_listener(TRANSPORT,
- targets,
- endpoints,
- executor='eventlet',
- pool=pool,
- serializer=serializer)
+ return messaging.get_notification_listener(
+ NOTIFICATION_TRANSPORT,
+ targets,
+ endpoints,
+ executor='eventlet',
+ pool=pool,
+ serializer=serializer
+ )
def get_notifier(service=None, host=None, publisher_id=None):
- assert NOTIFIER is not None
+ if NOTIFIER is None:
+ raise AssertionError("'NOTIFIER' must not be None")
if not publisher_id:
publisher_id = "%s.%s" % (service, host or CONF.host)
return NOTIFIER.prepare(publisher_id=publisher_id)
diff --git a/designate/sink/service.py b/designate/sink/service.py
index 90db96db..11f3e156 100644
--- a/designate/sink/service.py
+++ b/designate/sink/service.py
@@ -68,11 +68,11 @@ class Service(service.Service):
# TODO(ekarlso): Change this is to endpoint objects rather then
# ourselves?
- self._server = rpc.get_listener(
+ self._server = rpc.get_notification_listener(
targets, [self],
pool=cfg.CONF['service:sink'].listener_pool_name)
- if len(targets) > 0:
+ if targets:
self._server.start()
def stop(self):
diff --git a/designate/storage/impl_sqlalchemy/migrate_repo/versions/101_support_naptr_records.py b/designate/storage/impl_sqlalchemy/migrate_repo/versions/101_support_naptr_records.py
index 4c03e6e8..7cd91ea7 100644
--- a/designate/storage/impl_sqlalchemy/migrate_repo/versions/101_support_naptr_records.py
+++ b/designate/storage/impl_sqlalchemy/migrate_repo/versions/101_support_naptr_records.py
@@ -27,18 +27,3 @@ def upgrade(migrate_engine):
records_table = Table('recordsets', meta, autoload=True)
records_table.columns.type.alter(name='type', type=Enum(*RECORD_TYPES))
-
-
-def downgrade(migrate_engine):
- meta.bind = migrate_engine
-
- RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
- 'PTR', 'SSHFP', 'SOA']
-
- records_table = Table('recordsets', meta, autoload=True)
-
- # Delete all NAPTR records
- records_table.filter_by(name='type', type='NAPTR').delete()
-
- # Remove CAA from the ENUM
- records_table.columns.type.alter(type=Enum(*RECORD_TYPES))
diff --git a/designate/storage/impl_sqlalchemy/migrate_repo/versions/102_support_caa_records.py b/designate/storage/impl_sqlalchemy/migrate_repo/versions/102_support_caa_records.py
index 1bf61572..32ec4fcd 100644
--- a/designate/storage/impl_sqlalchemy/migrate_repo/versions/102_support_caa_records.py
+++ b/designate/storage/impl_sqlalchemy/migrate_repo/versions/102_support_caa_records.py
@@ -27,18 +27,3 @@ def upgrade(migrate_engine):
records_table = Table('recordsets', meta, autoload=True)
records_table.columns.type.alter(name='type', type=Enum(*RECORD_TYPES))
-
-
-def downgrade(migrate_engine):
- meta.bind = migrate_engine
-
- RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
- 'PTR', 'SSHFP', 'SOA', 'NAPTR']
-
- records_table = Table('recordsets', meta, autoload=True)
-
- # Delete all CAA records
- records_table.filter_by(name='type', type='CAA').delete()
-
- # Remove CAA from the ENUM
- records_table.columns.type.alter(type=Enum(*RECORD_TYPES))
diff --git a/designate/tests/test_pool_manager/test_service.py b/designate/tests/test_pool_manager/test_service.py
index 4afbdcb9..2610ebb8 100644
--- a/designate/tests/test_pool_manager/test_service.py
+++ b/designate/tests/test_pool_manager/test_service.py
@@ -42,7 +42,7 @@ class PoolManagerServiceNoopTest(PoolManagerTestCase):
threshold_percentage=100,
enable_recovery_timer=False,
enable_sync_timer=False,
- poll_retry_interval=1,
+ poll_retry_interval=0,
poll_max_retries=1,
cache_driver='noop',
group='service:pool_manager')
@@ -89,7 +89,6 @@ class PoolManagerServiceNoopTest(PoolManagerTestCase):
def test_create_zone(
self, mock_update_status, mock_notify_zone_changed,
mock_poll_for_serial_number, _):
-
zone = self._build_zone('example.org.', 'CREATE', 'PENDING')
self.service.create_zone(self.admin_context, zone)
@@ -104,9 +103,9 @@ class PoolManagerServiceNoopTest(PoolManagerTestCase):
self.assertEqual(2, mock_poll_for_serial_number.call_count)
self.assertEqual(
[call(self.admin_context, zone,
- self.service.pool.nameservers[0], 30, 1, 1, 5),
+ self.service.pool.nameservers[0], 30, 0, 1, 5),
call(self.admin_context, zone,
- self.service.pool.nameservers[1], 30, 1, 1, 5)],
+ self.service.pool.nameservers[1], 30, 0, 1, 5)],
mock_poll_for_serial_number.call_args_list)
# Pool manager needs to call into mdns to calculate consensus as
@@ -148,7 +147,7 @@ class PoolManagerServiceNoopTest(PoolManagerTestCase):
zone = self._build_zone('example.org.', 'CREATE', 'PENDING')
- mock_create_zone.side_effect = [None, exceptions.Backend]
+ mock_create_zone.side_effect = [exceptions.Backend, None]
self.service.create_zone(self.admin_context, zone)
diff --git a/designate/tests/test_producer/test_tasks.py b/designate/tests/test_producer/test_tasks.py
index 4f9135bf..1d870aaa 100644
--- a/designate/tests/test_producer/test_tasks.py
+++ b/designate/tests/test_producer/test_tasks.py
@@ -28,27 +28,18 @@ from designate.producer import tasks
LOG = logging.getLogger(__name__)
-class TaskTest(TestCase):
- def setUp(self):
- super(TaskTest, self).setUp()
-
- def _enable_tasks(self, tasks):
- self.config(
- enabled_tasks=tasks,
- group="service:producer")
-
+class DeletedZonePurgeTest(TestCase):
+ number_of_zones = 20
+ batch_size = 5
+ time_threshold = 24 * 60 * 60
-class DeletedzonePurgeTest(TaskTest):
def setUp(self):
- super(DeletedzonePurgeTest, self).setUp()
-
+ super(DeletedZonePurgeTest, self).setUp()
self.config(
- interval=3600,
- time_threshold=604800,
- batch_size=100,
+ time_threshold=self.time_threshold,
+ batch_size=self.batch_size,
group="producer_task:zone_purge"
)
-
self.purge_task_fixture = self.useFixture(
fixtures.ZoneManagerTaskFixture(tasks.DeletedZonePurgeTask)
)
@@ -57,19 +48,16 @@ class DeletedzonePurgeTest(TaskTest):
# Create a zone and set it as deleted
zone = self.create_zone(name=name)
self._delete_zone(zone, mock_deletion_time)
- return zone
def _fetch_all_zones(self):
- # Fetch all zones including deleted ones
+ # Fetch all zones including deleted ones.
query = tables.zones.select()
return self.central_service.storage.session.execute(query).fetchall()
def _delete_zone(self, zone, mock_deletion_time):
# Set a zone as deleted
zid = zone.id.replace('-', '')
- query = tables.zones.update().\
- where(tables.zones.c.id == zid).\
- values(
+ query = tables.zones.update().where(tables.zones.c.id == zid).values(
action='NONE',
deleted=zid,
deleted_at=mock_deletion_time,
@@ -78,62 +66,63 @@ class DeletedzonePurgeTest(TaskTest):
pxy = self.central_service.storage.session.execute(query)
self.assertEqual(1, pxy.rowcount)
- return zone
def _create_deleted_zones(self):
- # Create a number of deleted zones in the past days
- zones = []
+ # Create a number of deleted zones in the past days.
now = timeutils.utcnow()
- for age in range(18):
- age *= (24 * 60 * 60) # seconds
+ for index in range(self.number_of_zones):
+ age = index * (self.time_threshold // self.number_of_zones * 2)
delta = datetime.timedelta(seconds=age)
deletion_time = now - delta
- name = "example%d.org." % len(zones)
- z = self._create_deleted_zone(name, deletion_time)
- zones.append(z)
-
- return zones
+ name = "example%d.org." % index
+ self._create_deleted_zone(name, deletion_time)
def test_purge_zones(self):
- # Create 18 zones, run producer, check if 7 zones are remaining
- self.config(quota_zones=1000)
+ # Create X zones, run producer, check if half of the zones
+ # are remaining.
+ self.config(quota_zones=self.number_of_zones)
self._create_deleted_zones()
- self.purge_task_fixture.task()
+ for remaining in reversed(range(self.number_of_zones // 2,
+ self.number_of_zones,
+ self.batch_size)):
+ self.purge_task_fixture.task()
+
+ zones = self._fetch_all_zones()
+ LOG.info("Number of zones: %d", len(zones))
+ self.assertEqual(remaining, len(zones))
- zones = self._fetch_all_zones()
- LOG.info("Number of zones: %d", len(zones))
- self.assertEqual(7, len(zones))
+ remaning_zones = self._fetch_all_zones()
+ self.assertEqual(len(remaning_zones), self.number_of_zones // 2)
-class PeriodicGenerateDelayedNotifyTaskTest(TaskTest):
+class PeriodicGenerateDelayedNotifyTaskTest(TestCase):
+ number_of_zones = 20
+ batch_size = 5
def setUp(self):
super(PeriodicGenerateDelayedNotifyTaskTest, self).setUp()
-
+ self.config(quota_zones=self.number_of_zones)
self.config(
- interval=5,
- batch_size=100,
+ interval=1,
+ batch_size=self.batch_size,
group="producer_task:delayed_notify"
)
-
self.generate_delayed_notify_task_fixture = self.useFixture(
fixtures.ZoneManagerTaskFixture(
tasks.PeriodicGenerateDelayedNotifyTask
)
)
- def _fetch_zones(self, query=None):
- # Fetch zones including deleted ones
- if query is None:
- query = tables.zones.select()
+ def _fetch_zones(self, query):
+ # Fetch zones including deleted ones.
return self.central_service.storage.session.execute(query).fetchall()
def _create_zones(self):
- # Create a number of zones; half of them with delayed_notify set
- for age in range(20):
- name = "example%d.org." % age
- delayed_notify = (age % 2 == 0)
+ # Create a number of zones; half of them with delayed_notify set.
+ for index in range(self.number_of_zones):
+ name = "example%d.org." % index
+ delayed_notify = (index % 2 == 0)
self.create_zone(
name=name,
delayed_notify=delayed_notify,
@@ -141,27 +130,14 @@ class PeriodicGenerateDelayedNotifyTaskTest(TaskTest):
def test_generate_delayed_notify_zones(self):
# Create zones and set some of them as pending update.
- self.generate_delayed_notify_task_fixture.task()
- self.config(quota_zones=1000)
- self.config(
- interval=1,
- batch_size=5,
- group="producer_task:delayed_notify"
- )
self._create_zones()
- zones = self._fetch_zones(tables.zones.select().where(
- tables.zones.c.delayed_notify == True)) # nopep8
- self.assertEqual(10, len(zones))
-
- self.generate_delayed_notify_task_fixture.task()
- zones = self._fetch_zones(tables.zones.select().where(
- tables.zones.c.delayed_notify == True)) # nopep8
- self.assertEqual(5, len(zones))
+ for remaining in reversed(range(0,
+ self.number_of_zones // 2,
+ self.batch_size)):
+ self.generate_delayed_notify_task_fixture.task()
- # Run the task and check if it reset the delayed_notify flag
- self.generate_delayed_notify_task_fixture.task()
+ zones = self._fetch_zones(tables.zones.select().where(
+ tables.zones.c.delayed_notify))
- zones = self._fetch_zones(tables.zones.select().where(
- tables.zones.c.delayed_notify == True)) # nopep8
- self.assertEqual(0, len(zones))
+ self.assertEqual(remaining, len(zones))
diff --git a/designate/tests/unit/test_central/test_basic.py b/designate/tests/unit/test_central/test_basic.py
index a1e2e0bd..5b882f4b 100644
--- a/designate/tests/unit/test_central/test_basic.py
+++ b/designate/tests/unit/test_central/test_basic.py
@@ -30,6 +30,7 @@ import testtools
from designate import exceptions
from designate import objects
from designate.central.service import Service
+from designate.tests import TestCase
from designate.tests.fixtures import random_seed
import designate.central.service
@@ -37,7 +38,6 @@ LOG = logging.getLogger(__name__)
# TODO(Federico): move this
-
def unwrap(f):
"""Unwrap a decorated function
Requires __wrapped_function and __wrapper_name to be set
@@ -218,6 +218,15 @@ fx_pool_manager = fixtures.MockPatch(
])
)
+fx_worker = fixtures.MockPatch(
+ 'designate.central.service.worker_rpcapi.WorkerAPI.get_instance',
+ mock.MagicMock(spec_set=[
+ 'create_zone',
+ 'update_zone',
+ 'delete_zone'
+ ])
+)
+
fx_disable_notification = fixtures.MockPatch('designate.central.notification')
@@ -227,7 +236,7 @@ class NotMockedError(NotImplementedError):
@patch('designate.central.service.storage',
mock.NonCallableMock(side_effect=NotMockedError))
-class CentralBasic(base.BaseTestCase):
+class CentralBasic(TestCase):
def setUp(self):
super(CentralBasic, self).setUp()
@@ -270,6 +279,7 @@ class CentralBasic(base.BaseTestCase):
])
designate.central.service.rpcapi = mock.Mock()
designate.central.service.pool_manager_rpcapi = mock.Mock()
+ designate.central.service.worker_rpcapi = mock.Mock()
self.context = mock.NonCallableMock(spec_set=[
'elevated',
'sudo',
@@ -971,10 +981,10 @@ class CentralZoneTestCase(CentralBasic):
out = self.service.delete_zone(self.context,
CentralZoneTestCase.zone__id)
assert not self.service.storage.delete_zone.called
- assert self.service.pool_manager_api.delete_zone.called
+ assert self.service.zone_api.delete_zone.called
assert designate.central.service.policy.check.called
ctx, deleted_dom = \
- self.service.pool_manager_api.delete_zone.call_args[0]
+ self.service.zone_api.delete_zone.call_args[0]
self.assertEqual('foo', deleted_dom.name)
self.assertEqual('foo', out.name)
pcheck, ctx, target = \
@@ -1069,14 +1079,27 @@ class CentralZoneTestCase(CentralBasic):
criterion='bogus'
)
- def test_touch_zone(self):
+ def _test_touch_zone(self, worker_enabled=True):
+ if not worker_enabled:
+ self.config(
+ enabled="False",
+ group="service:worker"
+ )
+
self.service._touch_zone_in_storage = Mock()
self.service.storage.get_zone.return_value = RoObject(
name='example.org.',
tenant_id='2',
)
- with fx_pool_manager:
- self.service.touch_zone(self.context, CentralZoneTestCase.zone__id)
+
+ if worker_enabled:
+ with fx_worker:
+ self.service.touch_zone(self.context,
+ CentralZoneTestCase.zone__id)
+ else:
+ with fx_pool_manager:
+ self.service.touch_zone(self.context,
+ CentralZoneTestCase.zone__id)
assert designate.central.service.policy.check.called
self.assertEqual(
@@ -1084,6 +1107,12 @@ class CentralZoneTestCase(CentralBasic):
designate.central.service.policy.check.call_args[0][0]
)
+ def test_touch_zone_with_worker_model(self):
+ self._test_touch_zone(worker_enabled=True)
+
+ def test_touch_zone_with_pool_manager_model(self):
+ self._test_touch_zone(worker_enabled=False)
+
def test_get_recordset_not_found(self):
self.service.storage.get_zone.return_value = RoObject(
id=CentralZoneTestCase.zone__id,
@@ -1185,7 +1214,12 @@ class CentralZoneTestCase(CentralBasic):
with testtools.ExpectedException(exceptions.BadRequest):
self.service.update_recordset(self.context, recordset)
- def test_update_recordset(self):
+ def _test_update_recordset(self, worker_enabled=True):
+ if not worker_enabled:
+ self.config(
+ enabled="False",
+ group="service:worker"
+ )
self.service.storage.get_zone.return_value = RoObject(
type='foo',
name='example.org.',
@@ -1200,10 +1234,13 @@ class CentralZoneTestCase(CentralBasic):
self.service._update_recordset_in_storage = Mock(
return_value=('x', 'y')
)
-
- with fx_pool_manager:
- self.service.update_recordset(self.context, recordset)
- assert self.service._update_recordset_in_storage.called
+ if worker_enabled:
+ with fx_worker:
+ self.service.update_recordset(self.context, recordset)
+ else:
+ with fx_pool_manager:
+ self.service.update_recordset(self.context, recordset)
+ assert self.service._update_recordset_in_storage.called
n, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('update_recordset', n)
@@ -1214,6 +1251,12 @@ class CentralZoneTestCase(CentralBasic):
'recordset_id': '9c85d9b0-1e9d-4e99-aede-a06664f1af2e',
'tenant_id': '2'}, target)
+ def test_update_recordset_worker_model(self):
+ self._test_update_recordset(worker_enabled=True)
+
+ def test_update_recordset_pool_manager_model(self):
+ self._test_update_recordset(worker_enabled=False)
+
def test__update_recordset_in_storage(self):
recordset = Mock()
recordset.name = 'n'
@@ -1356,7 +1399,12 @@ class CentralZoneTestCase(CentralBasic):
CentralZoneTestCase.zone__id_2,
CentralZoneTestCase.recordset__id)
- def test_delete_recordset(self):
+ def _test_delete_recordset(self, worker_enabled=True):
+ if not worker_enabled:
+ self.config(
+ enabled="False",
+ group="service:worker"
+ )
mock_zone = RoObject(
action='foo',
id=CentralZoneTestCase.zone__id_2,
@@ -1378,14 +1426,27 @@ class CentralZoneTestCase(CentralBasic):
self.service._delete_recordset_in_storage = Mock(
return_value=(mock_rs, mock_zone)
)
- with fx_pool_manager:
- self.service.delete_recordset(self.context,
- CentralZoneTestCase.zone__id_2,
- CentralZoneTestCase.recordset__id)
- assert self.service.pool_manager_api.update_zone.called
+ if worker_enabled:
+ with fx_worker:
+ self.service.delete_recordset(self.context,
+ CentralZoneTestCase.zone__id_2,
+ CentralZoneTestCase.recordset__id)
+ assert self.service.zone_api.update_zone.called
+ else:
+ with fx_pool_manager:
+ self.service.delete_recordset(self.context,
+ CentralZoneTestCase.zone__id_2,
+ CentralZoneTestCase.recordset__id)
+ assert self.service.zone_api.update_zone.called
assert self.service._delete_recordset_in_storage.called
+ def test_delete_recordset_worker(self):
+ self._test_delete_recordset(worker_enabled=True)
+
+ def test_delete_recordset_pool_manager(self):
+ self._test_delete_recordset(worker_enabled=False)
+
def test__delete_recordset_in_storage(self):
def mock_uds(c, zone, inc):
return zone
@@ -1453,7 +1514,9 @@ class CentralZoneTestCase(CentralBasic):
RoObject(),
)
- def test_create_record(self):
+ def _test_create_record(self, worker_enabled=True):
+ if not worker_enabled:
+ self.config(enabled="False", group="service:worker")
self.service._create_record_in_storage = Mock(
return_value=(None, None)
)
@@ -1467,14 +1530,23 @@ class CentralZoneTestCase(CentralBasic):
self.service.storage.get_recordset.return_value = RoObject(
name='rs',
)
- with fx_pool_manager:
- self.service.create_record(
- self.context,
- CentralZoneTestCase.zone__id,
- CentralZoneTestCase.recordset__id,
- RoObject(),
- )
- assert self.service.pool_manager_api.update_zone.called
+
+ if worker_enabled:
+ with fx_worker:
+ self.service.create_record(
+ self.context,
+ CentralZoneTestCase.zone__id,
+ CentralZoneTestCase.recordset__id,
+ RoObject())
+ assert self.service.zone_api.update_zone.called
+ else:
+ with fx_pool_manager:
+ self.service.create_record(
+ self.context,
+ CentralZoneTestCase.zone__id,
+ CentralZoneTestCase.recordset__id,
+ RoObject())
+ assert self.service.zone_api.update_zone.called
n, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('create_record', n)
@@ -1486,6 +1558,12 @@ class CentralZoneTestCase(CentralBasic):
'recordset_name': 'rs',
'tenant_id': '2'}, target)
+ def test_create_record_worker(self):
+ self._test_create_record(worker_enabled=True)
+
+ def test_create_record_pool_manager(self):
+ self._test_create_record(worker_enabled=False)
+
def test__create_record_in_storage(self):
self.service._enforce_record_quota = Mock()
self.service._create_record_in_storage(
@@ -1623,7 +1701,9 @@ class CentralZoneTestCase(CentralBasic):
with testtools.ExpectedException(exceptions.BadRequest):
self.service.update_record(self.context, record)
- def test_update_record(self):
+ def _test_update_record(self, worker_enabled=True):
+ if not worker_enabled:
+ self.config(enabled="False", group="service:worker")
self.service.storage.get_zone.return_value = RoObject(
action='a',
name='n',
@@ -1643,9 +1723,13 @@ class CentralZoneTestCase(CentralBasic):
return_value=('x', 'y')
)
- with fx_pool_manager:
- self.service.update_record(self.context, record)
- assert self.service._update_record_in_storage.called
+ if worker_enabled:
+ with fx_worker:
+ self.service.update_record(self.context, record)
+ else:
+ with fx_pool_manager:
+ self.service.update_record(self.context, record)
+ assert self.service._update_record_in_storage.called
n, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('update_record', n)
@@ -1658,6 +1742,12 @@ class CentralZoneTestCase(CentralBasic):
'recordset_name': 'rsn',
'tenant_id': 'tid'}, target)
+ def test_update_record_worker(self):
+ self._test_update_record(worker_enabled=True)
+
+ def test_update_record_pool_manager(self):
+ self._test_update_record(worker_enabled=False)
+
def test__update_record_in_storage(self):
self.service._update_zone_in_storage = Mock()
self.service._update_record_in_storage(
@@ -1712,7 +1802,9 @@ class CentralZoneTestCase(CentralBasic):
CentralZoneTestCase.recordset__id,
CentralZoneTestCase.record__id)
- def test_delete_record(self):
+ def _test_delete_record(self, worker_enabled=True):
+ if not worker_enabled:
+ self.config(enabled="False", group="service:worker")
self.service._delete_record_in_storage = Mock(
return_value=(None, None)
)
@@ -1734,11 +1826,18 @@ class CentralZoneTestCase(CentralBasic):
managed=False,
)
- with fx_pool_manager:
- self.service.delete_record(self.context,
- CentralZoneTestCase.zone__id_2,
- CentralZoneTestCase.recordset__id_2,
- CentralZoneTestCase.record__id)
+ if worker_enabled:
+ with fx_worker:
+ self.service.delete_record(self.context,
+ CentralZoneTestCase.zone__id_2,
+ CentralZoneTestCase.recordset__id_2,
+ CentralZoneTestCase.record__id)
+ else:
+ with fx_pool_manager:
+ self.service.delete_record(self.context,
+ CentralZoneTestCase.zone__id_2,
+ CentralZoneTestCase.recordset__id_2,
+ CentralZoneTestCase.record__id)
t, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('delete_record', t)
@@ -1751,6 +1850,12 @@ class CentralZoneTestCase(CentralBasic):
'recordset_name': 'rsn',
'tenant_id': 'tid'}, target)
+ def test_delete_record_worker(self):
+ self._test_delete_record(worker_enabled=True)
+
+ def test_delete_record_pool_manager(self):
+ self._test_delete_record(worker_enabled=False)
+
def test_delete_record_fail_on_managed(self):
self.service._delete_record_in_storage = Mock(
return_value=(None, None)
@@ -1968,7 +2073,7 @@ class CentralZoneExportTests(CentralBasic):
)
)
- self.service.worker_api.start_zone_export = Mock()
+ self.service.zone_api.start_zone_export = Mock()
out = self.service.create_zone_export(
self.context,
diff --git a/designate/tests/unit/test_mdns/test_service.py b/designate/tests/unit/test_mdns/test_service.py
index 10fb460f..9b4c4425 100644
--- a/designate/tests/unit/test_mdns/test_service.py
+++ b/designate/tests/unit/test_mdns/test_service.py
@@ -17,60 +17,61 @@
"""Unit-test MiniDNS service
"""
-import unittest
-
from oslotest import base
import mock
-from designate.tests.unit import RoObject
+import designate.rpc
import designate.mdns.service as mdns
-
-# TODO(Federico): fix skipped tests
+import designate.storage.base as storage
-@mock.patch.object(mdns.utils, 'cache_result')
-@mock.patch.object(mdns.notify, 'NotifyEndpoint')
-@mock.patch.object(mdns.xfr, 'XfrEndpoint')
class MdnsServiceTest(base.BaseTestCase):
+ @mock.patch.object(mdns.service.DNSService, '_start')
+ @mock.patch.object(designate.rpc, 'get_server')
+ def test_service_start(self, mock_service_start, mock_rpc_server):
+ self.mdns = mdns.Service()
+ self.mdns.start()
- @mock.patch.object(mdns.storage, 'get_storage', name='get_storage')
- @mock.patch.object(mdns.Service, '_rpc_endpoints')
- def setUp(self, *mocks):
- super(MdnsServiceTest, self).setUp()
- mdns.CONF = RoObject({
- 'service:mdns': RoObject(storage_driver=None)
- })
- # _rpc_endpoints is a property
- mock_rpc_endpoints = mocks[0]
- mock_rpc_endpoints.__get__ = mock.Mock(
- return_value=[mock.MagicMock(), mock.MagicMock()]
- )
+ self.assertTrue(mock_service_start.called)
+ self.assertTrue(mock_rpc_server.called)
+ def test_service_name(self):
self.mdns = mdns.Service()
- self.mdns.tg = mock.Mock(name='tg')
- @unittest.skip("Fails with new oslo.messaging release")
- def test_service_name(self, mc, mn, mx):
self.assertEqual('mdns', self.mdns.service_name)
- @unittest.skip("Fails when run together with designate/tests/test_mdns/")
- def test_rpc_endpoints(self, _, mock_notify, mock_xfr):
- out = self.mdns._rpc_endpoints
- self.assertEqual(2, len(out))
- assert isinstance(out[0], mock.MagicMock), out
- assert isinstance(out[1], mock.MagicMock), out
+ def test_rpc_endpoints(self):
+ self.mdns = mdns.Service()
+
+ endpoints = self.mdns._rpc_endpoints
+
+ self.assertIsInstance(endpoints[0], mdns.notify.NotifyEndpoint)
+ self.assertIsInstance(endpoints[1], mdns.xfr.XfrEndpoint)
+
+ @mock.patch.object(storage.Storage, 'get_driver')
+ def test_storage_driver(self, mock_get_driver):
+ mock_driver = mock.MagicMock()
+ mock_driver.name = 'noop_driver'
+ mock_get_driver.return_value = mock_driver
+
+ self.mdns = mdns.Service()
+
+ self.assertIsInstance(self.mdns.storage, mock.MagicMock)
+
+ self.assertTrue(mock_get_driver.called)
- @unittest.skip("Fails when run together with designate/tests/test_mdns/")
@mock.patch.object(mdns.handler, 'RequestHandler', name='reqh')
- @mock.patch.object(mdns.dnsutils, 'TsigInfoMiddleware', name='tsig')
- @mock.patch.object(mdns.dnsutils, 'SerializationMiddleware')
- def test_dns_application(self, *mocks):
- mock_serialization, mock_tsiginf, mock_req_handler = mocks[:3]
- mock_req_handler.return_value = mock.Mock(name='app')
+ @mock.patch.object(mdns.service.DNSService, '_start')
+ @mock.patch.object(mdns.utils, 'cache_result')
+ @mock.patch.object(storage.Storage, 'get_driver')
+ def test_dns_application(self, mock_req_handler, mock_cache_result,
+ mock_service_start, mock_get_driver):
+ mock_driver = mock.MagicMock()
+ mock_driver.name = 'noop_driver'
+ mock_get_driver.return_value = mock_driver
+
+ self.mdns = mdns.Service()
app = self.mdns._dns_application
- assert isinstance(app, mock.MagicMock), repr(app)
- assert mock_req_handler.called
- assert mock_tsiginf.called
- assert mock_serialization.called
+ self.assertIsInstance(app, mdns.dnsutils.DNSMiddleware)
diff --git a/designate/worker/__init__.py b/designate/worker/__init__.py
index 9d1b311a..4e6c9e23 100644
--- a/designate/worker/__init__.py
+++ b/designate/worker/__init__.py
@@ -22,12 +22,12 @@ worker_group = cfg.OptGroup(
)
OPTS = [
- cfg.BoolOpt('enabled', default=False,
+ cfg.BoolOpt('enabled', default=True,
help='Whether to send events to worker instead of '
'Pool Manager',
deprecated_for_removal=True,
- deprecated_reason='In Rocky, this option will disappear '
- 'because worker will be enabled by default'),
+ deprecated_reason='In Train, this option will disappear'
+ 'because pool manager will be removed'),
cfg.IntOpt('workers',
help='Number of Worker worker processes to spawn'),
cfg.IntOpt('threads', default=200,
diff --git a/devstack/designate_plugins/backend-powerdns b/devstack/designate_plugins/backend-powerdns
index f8db9f23..295ceccd 100644
--- a/devstack/designate_plugins/backend-powerdns
+++ b/devstack/designate_plugins/backend-powerdns
@@ -148,7 +148,7 @@ function init_designate_backend {
# create_designate_pool_configuration_backend - Perform post-pool config tasks
function create_designate_pool_configuration_backend {
# Init and migrate designate_pdns database
- designate-manage powerdns sync $DESIGNATE_POOL_ID
+ $DESIGNATE_BIN_DIR/designate-manage powerdns sync $DESIGNATE_POOL_ID
}
# start_designate_backend - start any external services
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 2264a937..cb81df9a 100755
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -34,7 +34,6 @@ function configure_designate {
# General Configuration
iniset_rpc_backend designate $DESIGNATE_CONF DEFAULT
- iniset $DESIGNATE_CONF DEFAULT rpc_response_timeout 60
iniset $DESIGNATE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $DESIGNATE_CONF DEFAULT state_path $DESIGNATE_STATE_PATH
@@ -78,11 +77,12 @@ function configure_designate {
iniset $DESIGNATE_CONF service:mdns listen ${DESIGNATE_SERVICE_HOST}:${DESIGNATE_SERVICE_PORT_MDNS}
# Worker Configuration
- if ! is_service_enabled designate-pool-manager; then
- iniset $DESIGNATE_CONF service:worker enabled True
+ if is_service_enabled designate-worker; then
iniset $DESIGNATE_CONF service:worker notify True
iniset $DESIGNATE_CONF service:worker poll_max_retries $DESIGNATE_POLL_RETRIES
iniset $DESIGNATE_CONF service:worker poll_retry_interval $DESIGNATE_POLL_INTERVAL
+ else
+ iniset $DESIGNATE_CONF service:worker enabled False
fi
# Set up Notifications/Ceilometer Integration
@@ -194,6 +194,8 @@ function create_designate_accounts {
get_or_create_service "designate" "dns" "Designate DNS Service"
get_or_create_endpoint "dns" \
"$REGION_NAME" \
+ "$DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT/" \
+ "$DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT/" \
"$DESIGNATE_SERVICE_PROTOCOL://$DESIGNATE_SERVICE_HOST:$DESIGNATE_SERVICE_PORT/"
fi
}
@@ -201,7 +203,7 @@ function create_designate_accounts {
# create_designate_pool_configuration - Create Pool Configuration
function create_designate_pool_configuration {
# Sync Pools Config
- designate-manage pool update --file $DESIGNATE_CONF_DIR/pools.yaml
+ $DESIGNATE_BIN_DIR/designate-manage pool update --file $DESIGNATE_CONF_DIR/pools.yaml
# Allow Backends to do backend specific tasks
if function_exists create_designate_pool_configuration_backend; then
@@ -224,14 +226,14 @@ function init_designate {
recreate_database designate utf8
# Init and migrate designate database
- designate-manage database sync
+ $DESIGNATE_BIN_DIR/designate-manage database sync
if [ "$DESIGNATE_POOL_MANAGER_CACHE_DRIVER" == "sqlalchemy" ]; then
# (Re)create designate_pool_manager cache
recreate_database designate_pool_manager utf8
# Init and migrate designate pool-manager-cache
- designate-manage pool-manager-cache sync
+ $DESIGNATE_BIN_DIR/designate-manage pool-manager-cache sync
fi
init_designate_backend
@@ -338,7 +340,7 @@ if is_service_enabled designate; then
install_designateclient
echo_summary "Installing Designate"
- install_designate
+ stack_install_service designate
if is_service_enabled horizon; then
echo_summary "Installing Designate dashboard"
diff --git a/devstack/settings b/devstack/settings
index d64faf77..68adfc22 100644
--- a/devstack/settings
+++ b/devstack/settings
@@ -13,7 +13,7 @@ DESIGNATE_POLL_INTERVAL=${DESIGNATE_POLL_INTERVAL:-5}
DESIGNATE_POLL_RETRIES=${DESIGNATE_POLL_RETRIES:-6}
# Quota Options
-DESIGNATE_QUOTA_ZONES=${DESIGNATE_QUOTA_ZONES:-100}
+DESIGNATE_QUOTA_ZONES=${DESIGNATE_QUOTA_ZONES:-10}
DESIGNATE_QUOTA_ZONE_RECORDSETS=${DESIGNATE_QUOTA_ZONE_RECORDSETS:-500}
DESIGNATE_QUOTA_ZONE_RECORDS=${DESIGNATE_QUOTA_ZONE_RECORDS:-500}
DESIGNATE_QUOTA_RECORDSET_RECORDS=${DESIGNATE_QUOTA_RECORDSET_RECORDS:-20}
@@ -39,9 +39,16 @@ DESIGNATE_SERVICE_PORT_DNS=${DESIGNATE_SERVICE_PORT_DNS:-53}
DESIGNATE_SERVICE_PORT_MDNS=${DESIGNATE_SERVICE_PORT_MDNS:-5354}
DESIGNATE_SERVICE_PORT_AGENT=${DESIGNATE_SERVICE_PORT_AGENT:-5358}
-# Default directories
-DESIGNATE_BIN_DIR=$(get_python_exec_prefix)
DESIGNATE_DIR=$DEST/designate
+# Default directories
+# Support potential entry-points console scripts in VENV or not
+if [[ ${USE_VENV} = True ]]; then
+ PROJECT_VENV["designate"]=${DESIGNATE_DIR}.venv
+ DESIGNATE_BIN_DIR=${PROJECT_VENV["designate"]}/bin
+else
+ DESIGNATE_BIN_DIR=$(get_python_exec_prefix)
+fi
+
DESIGNATEDASHBOARD_DIR=$DEST/designate-dashboard
DESIGNATE_CONF_DIR=/etc/designate
DESIGNATE_STATE_PATH=${DESIGNATE_STATE_PATH:=$DATA_DIR/designate}
diff --git a/releasenotes/notes/CAA_NAPTR_records-5e2e466a5bc18a31.yaml b/releasenotes/notes/CAA_NAPTR_records-5e2e466a5bc18a31.yaml
new file mode 100644
index 00000000..5dd4667a
--- /dev/null
+++ b/releasenotes/notes/CAA_NAPTR_records-5e2e466a5bc18a31.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ CAA and NAPTR recordset types have been added. All users should be able to use these types
+ from the API and openstack client. These can be disabled (like other record types) by
+ setting the `[DEFAULT].supported-record-type` config variable in all designate services.
diff --git a/releasenotes/notes/pool-manager-disabled-ff8582b5f86d2360.yaml b/releasenotes/notes/pool-manager-disabled-ff8582b5f86d2360.yaml
new file mode 100644
index 00000000..ec990c1d
--- /dev/null
+++ b/releasenotes/notes/pool-manager-disabled-ff8582b5f86d2360.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - New installs will now have pool manager disabled by
+ default and will use the worker and producer services.
+ To continue to use pool manager set ``enabled=False``
+ in the ``[service:worker]`` of your config. \ No newline at end of file