summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml20
-rw-r--r--designate/api/middleware.py6
-rw-r--r--designate/api/v2/controllers/zones/tasks/exports.py9
-rw-r--r--designate/backend/agent.py40
-rw-r--r--designate/backend/impl_bind9.py31
-rw-r--r--designate/central/service.py1223
-rw-r--r--designate/common/constants.py4
-rw-r--r--designate/common/policies/base.py70
-rw-r--r--designate/common/policies/context.py50
-rw-r--r--designate/common/policies/diagnostics.py51
-rw-r--r--designate/common/policies/quota.py2
-rw-r--r--designate/common/policies/recordset.py64
-rw-r--r--designate/common/policies/tsigkey.py3
-rw-r--r--designate/common/policies/zone.py25
-rw-r--r--designate/common/policies/zone_export.py24
-rw-r--r--designate/common/policies/zone_import.py2
-rw-r--r--designate/common/policies/zone_transfer_accept.py6
-rw-r--r--designate/common/policies/zone_transfer_request.py25
-rw-r--r--designate/context.py5
-rw-r--r--designate/dnsutils.py152
-rw-r--r--designate/exceptions.py13
-rw-r--r--designate/mdns/notify.py24
-rw-r--r--designate/objects/adapters/api_v2/zone_transfer_request.py12
-rw-r--r--designate/objects/blacklist.py4
-rw-r--r--designate/objects/fields.py51
-rw-r--r--designate/policy.py15
-rw-r--r--designate/quota/base.py17
-rw-r--r--designate/quota/impl_storage.py2
-rw-r--r--designate/storage/impl_sqlalchemy/__init__.py9
-rw-r--r--designate/tests/__init__.py33
-rw-r--r--designate/tests/test_api/test_middleware.py3
-rw-r--r--designate/tests/test_api/test_v2/test_blacklists.py45
-rw-r--r--designate/tests/test_api/test_v2/test_floatingips.py19
-rw-r--r--designate/tests/test_api/test_v2/test_import_export.py6
-rw-r--r--designate/tests/test_central/test_service.py89
-rw-r--r--designate/tests/test_quota/test_quota.py156
-rw-r--r--designate/tests/unit/backend/test_agent.py56
-rw-r--r--designate/tests/unit/backend/test_bind9.py9
-rw-r--r--designate/tests/unit/mdns/test_notify.py76
-rw-r--r--designate/tests/unit/objects/test_caa_object.py10
-rw-r--r--designate/tests/unit/test_central/test_basic.py70
-rw-r--r--designate/tests/unit/test_dnsutils.py19
-rw-r--r--designate/tests/unit/workers/test_base_task.py88
-rw-r--r--designate/tests/unit/workers/test_zone_tasks.py20
-rw-r--r--designate/worker/README.md2
-rw-r--r--designate/worker/tasks/base.py48
-rw-r--r--designate/worker/tasks/zone.py38
-rw-r--r--designate/worker/utils.py82
-rw-r--r--releasenotes/notes/Fix-to-address-denylist-invalid-patterns-not-being-checked-ec1f1316ccc6cb1d.yaml16
-rw-r--r--releasenotes/notes/Fix-update-zone-create-zone-ada1fd81de479492.yaml4
-rw-r--r--releasenotes/notes/Support-scoped-tokens-6b7d6052a258cd11.yaml4
51 files changed, 1825 insertions, 1027 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 5fb90d98..09f694b6 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -54,6 +54,24 @@
pre-run: playbooks/enable-fips.yaml
- job:
+ name: designate-bind9-scoped-tokens
+ post-run: playbooks/designate-bind9/post.yaml
+ parent: designate-base
+ vars:
+ devstack_local_conf:
+ post-config:
+ $DESIGNATE_CONF:
+ oslo_policy:
+ enforce_scope: True
+ enforce_new_defaults: True
+ test-config:
+ "$TEMPEST_CONFIG":
+ enforce_scope:
+ designate: True
+ dns_feature_enabled:
+ enforce_new_defaults: True
+
+- job:
name: designate-pdns4
post-run: playbooks/designate-pdns4/post.yaml
parent: designate-base
@@ -135,6 +153,7 @@
- designate-bind9
- designate-bind9-centos8stream-fips:
voting: false
+ - designate-bind9-scoped-tokens
- designate-pdns4
- designate-grenade-pdns4
- designate-ipv6-only-pdns4
@@ -143,6 +162,7 @@
gate:
jobs:
- designate-bind9
+ - designate-bind9-scoped-tokens
- designate-pdns4
- designate-grenade-pdns4
- designate-ipv6-only-pdns4
diff --git a/designate/api/middleware.py b/designate/api/middleware.py
index 07fd46b1..2a451606 100644
--- a/designate/api/middleware.py
+++ b/designate/api/middleware.py
@@ -128,14 +128,13 @@ class KeystoneContextMiddleware(ContextMiddleware):
pass
tenant_id = headers.get('X-Tenant-ID')
- if tenant_id is None:
- return flask.Response(status=401)
catalog = None
if headers.get('X-Service-Catalog'):
catalog = jsonutils.loads(headers.get('X-Service-Catalog'))
roles = headers.get('X-Roles').split(',')
+ system_scope = headers.get('Openstack-System-Scope')
try:
self.make_context(
@@ -144,7 +143,8 @@ class KeystoneContextMiddleware(ContextMiddleware):
user_id=headers.get('X-User-ID'),
project_id=tenant_id,
roles=roles,
- service_catalog=catalog
+ service_catalog=catalog,
+ system_scope=system_scope
)
except exceptions.Forbidden:
return flask.Response(status=403)
diff --git a/designate/api/v2/controllers/zones/tasks/exports.py b/designate/api/v2/controllers/zones/tasks/exports.py
index 60a852b9..29d97c78 100644
--- a/designate/api/v2/controllers/zones/tasks/exports.py
+++ b/designate/api/v2/controllers/zones/tasks/exports.py
@@ -16,10 +16,11 @@
import pecan
from oslo_log import log as logging
+from designate.api.v2.controllers import rest
+from designate.common import constants
from designate import exceptions
from designate import policy
from designate import utils
-from designate.api.v2.controllers import rest
from designate.objects.adapters import DesignateAdapter
LOG = logging.getLogger(__name__)
@@ -31,7 +32,11 @@ class ZoneExportController(rest.RestController):
@utils.validate_uuid('export_id')
def get_all(self, export_id):
context = pecan.request.environ['context']
- target = {'tenant_id': context.project_id}
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
+
policy.check('zone_export', context, target)
export = self.central_api.get_zone_export(context, export_id)
diff --git a/designate/backend/agent.py b/designate/backend/agent.py
index 8ddc1818..67f8c80c 100644
--- a/designate/backend/agent.py
+++ b/designate/backend/agent.py
@@ -24,25 +24,23 @@
Configured in the [service:pool_manager] section
"""
-import eventlet
import dns
-import dns.rdataclass
-import dns.rdatatype
import dns.exception
import dns.flags
-import dns.rcode
import dns.message
import dns.opcode
+import dns.rcode
+import dns.rdataclass
+import dns.rdatatype
from oslo_config import cfg
from oslo_log import log as logging
from designate.backend import base
-from designate import exceptions
+from designate.backend import private_codes
from designate.conf.agent import DEFAULT_AGENT_PORT
+from designate import dnsutils
+from designate import exceptions
from designate.mdns import rpcapi as mdns_api
-import designate.backend.private_codes as pcodes
-
-dns_query = eventlet.import_patched('dns.query')
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -72,9 +70,9 @@ class AgentPoolBackend(base.Backend):
response, retry = self._make_and_send_dns_message(
zone.name,
self.timeout,
- pcodes.CC,
- pcodes.CREATE,
- pcodes.CLASSCC,
+ private_codes.CC,
+ private_codes.CREATE,
+ private_codes.CLASSCC,
self.host,
self.port
)
@@ -100,9 +98,9 @@ class AgentPoolBackend(base.Backend):
response, retry = self._make_and_send_dns_message(
zone.name,
self.timeout,
- pcodes.CC,
- pcodes.DELETE,
- pcodes.CLASSCC,
+ private_codes.CC,
+ private_codes.DELETE,
+ private_codes.CLASSCC,
self.host,
self.port
)
@@ -134,7 +132,7 @@ class AgentPoolBackend(base.Backend):
'port': dest_port, 'timeout': timeout,
'retry': retry})
response = None
- elif isinstance(response, dns_query.BadResponse):
+ elif isinstance(response, dns.query.BadResponse):
LOG.warning("Got BadResponse while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. "
"Timeout='%(timeout)d' seconds. Retry='%(retry)d'",
@@ -173,14 +171,10 @@ class AgentPoolBackend(base.Backend):
def _send_dns_message(self, dns_message, dest_ip, dest_port, timeout):
try:
- if not CONF['service:mdns'].all_tcp:
- response = dns_query.udp(
- dns_message, dest_ip, port=dest_port, timeout=timeout)
- else:
- response = dns_query.tcp(
- dns_message, dest_ip, port=dest_port, timeout=timeout)
- return response
+ return dnsutils.send_dns_message(
+ dns_message, dest_ip, port=dest_port, timeout=timeout
+ )
except dns.exception.Timeout as timeout:
return timeout
- except dns_query.BadResponse as badResponse:
+ except dns.query.BadResponse as badResponse:
return badResponse
diff --git a/designate/backend/impl_bind9.py b/designate/backend/impl_bind9.py
index 8f04bfdc..060c8f9c 100644
--- a/designate/backend/impl_bind9.py
+++ b/designate/backend/impl_bind9.py
@@ -108,6 +108,28 @@ class Bind9Backend(base.Backend):
context, zone, self._host, self._port, self.timeout,
self.retry_interval, self.max_retries, self.delay)
+ def get_zone(self, context, zone):
+ """Returns True if zone exists and False if not"""
+ LOG.debug('Get Zone')
+
+ view = 'in %s' % self._view if self._view else ''
+
+ rndc_op = [
+ 'showzone',
+ '%s %s' % (zone['name'].rstrip('.'), view),
+ ]
+ try:
+ self._execute_rndc(rndc_op)
+ except exceptions.Backend as e:
+ if "not found" in str(e):
+ LOG.debug('Zone %s not found on the backend', zone['name'])
+ return False
+ else:
+ LOG.warning('RNDC call failure: %s', e)
+ raise e
+
+ return True
+
def delete_zone(self, context, zone):
"""Delete a new Zone by executin rndc
Do not raise exceptions if the zone does not exist.
@@ -135,14 +157,21 @@ class Bind9Backend(base.Backend):
"""
Update a DNS zone.
- This will execute a rndc modzone as the zone
+ This will execute a rndc modzone if the zone
already exists but masters might need to be refreshed.
+ Or, will create the zone if it does not exist.
:param context: Security context information.
:param zone: the DNS zone.
"""
LOG.debug('Update Zone')
+ if not self.get_zone(context, zone):
+ # If zone does not exist yet, create it
+ self.create_zone(context, zone)
+ # Newly created zone won't require an update
+ return
+
masters = []
for master in self.masters:
host = master['host']
diff --git a/designate/central/service.py b/designate/central/service.py
index 35cd0e3c..fbe880c2 100644
--- a/designate/central/service.py
+++ b/designate/central/service.py
@@ -26,13 +26,13 @@ import random
from random import SystemRandom
import time
-from eventlet import tpool
-from dns import zone as dnszone
from dns import exception as dnsexception
+from dns import zone as dnszone
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_log import log as logging
+from designate.common import constants
from designate import context as dcontext
from designate import coordination
from designate import exceptions
@@ -483,6 +483,12 @@ class Service(service.RPCService):
raise exceptions.InvalidTTL('TTL is below the minimum: %s'
% min_ttl)
+ def _is_valid_project_id(self, project_id):
+ if project_id is None:
+ raise exceptions.MissingProjectID(
+ "A project ID must be specified when not using a project "
+ "scoped token.")
+
def _increment_zone_serial(self, context, zone, set_delayed_notify=False):
"""Update the zone serial and the SOA record
Optionally set delayed_notify to have PM issue delayed notify
@@ -554,51 +560,55 @@ class Service(service.RPCService):
objects.Record(data=r, managed=True) for r in ns_records])
values = {
'name': zone['name'],
- 'type': "NS",
+ 'type': 'NS',
'records': recordlist
}
ns, zone = self._create_recordset_in_storage(
context, zone, objects.RecordSet(**values),
- increment_serial=False)
+ increment_serial=False
+ )
return ns
def _add_ns(self, context, zone, ns_record):
# Get NS recordset
# If the zone doesn't have an NS recordset yet, create one
- recordsets = self.find_recordsets(
- context, criterion={'zone_id': zone['id'], 'type': "NS"}
- )
-
- managed = []
- for rs in recordsets:
- if rs.managed:
- managed.append(rs)
-
- if len(managed) == 0:
+ try:
+ recordset = self.find_recordset(
+ context,
+ criterion={
+ 'zone_id': zone['id'],
+ 'name': zone['name'],
+ 'type': 'NS'
+ }
+ )
+ except exceptions.RecordSetNotFound:
self._create_ns(context, zone, [ns_record])
return
- elif len(managed) != 1:
- raise exceptions.RecordSetNotFound("No valid recordset found")
-
- ns_recordset = managed[0]
# Add new record to recordset based on the new nameserver
- ns_recordset.records.append(
- objects.Record(data=ns_record, managed=True))
+ recordset.records.append(
+ objects.Record(data=ns_record, managed=True)
+ )
- self._update_recordset_in_storage(context, zone, ns_recordset,
+ self._update_recordset_in_storage(context, zone, recordset,
set_delayed_notify=True)
def _delete_ns(self, context, zone, ns_record):
- ns_recordset = self.find_recordset(
- context, criterion={'zone_id': zone['id'], 'type': "NS"})
+ recordset = self.find_recordset(
+ context,
+ criterion={
+ 'zone_id': zone['id'],
+ 'name': zone['name'],
+ 'type': 'NS'
+ }
+ )
- for record in copy.deepcopy(ns_recordset.records):
+ for record in list(recordset.records):
if record.data == ns_record:
- ns_recordset.records.remove(record)
+ recordset.records.remove(record)
- self._update_recordset_in_storage(context, zone, ns_recordset,
+ self._update_recordset_in_storage(context, zone, recordset,
set_delayed_notify=True)
# Quota Enforcement Methods
@@ -660,17 +670,30 @@ class Service(service.RPCService):
# Quota Methods
@rpc.expected_exceptions()
def get_quotas(self, context, tenant_id):
- target = {'tenant_id': tenant_id}
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: tenant_id,
+ 'all_tenants': context.all_tenants}
+ else:
+ target = {'tenant_id': tenant_id}
policy.check('get_quotas', context, target)
- if tenant_id != context.project_id and not context.all_tenants:
+ # TODO(johnsom) Deprecated since Wallaby, remove with legacy default
+ # policies. System scoped admin doesn't have a project_id
+ if (tenant_id != context.project_id and not context.all_tenants and not
+ policy.enforce_new_defaults()):
raise exceptions.Forbidden()
return self.quota.get_quotas(context, tenant_id)
@rpc.expected_exceptions()
def get_quota(self, context, tenant_id, resource):
- target = {'tenant_id': tenant_id, 'resource': resource}
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: tenant_id,
+ 'resource': resource
+ }
+ else:
+ target = {'tenant_id': tenant_id, 'resource': resource}
policy.check('get_quota', context, target)
return self.quota.get_quota(context, tenant_id, resource)
@@ -678,21 +701,34 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
@transaction
def set_quota(self, context, tenant_id, resource, hard_limit):
- target = {
- 'tenant_id': tenant_id,
- 'resource': resource,
- 'hard_limit': hard_limit,
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: tenant_id,
+ 'resource': resource,
+ 'hard_limit': hard_limit,
+ }
+ else:
+ target = {
+ 'tenant_id': tenant_id,
+ 'resource': resource,
+ 'hard_limit': hard_limit,
+ }
policy.check('set_quota', context, target)
- if tenant_id != context.project_id and not context.all_tenants:
+ # TODO(johnsom) Deprecated since Wallaby, remove with legacy default
+ # policies. System scoped admin doesn't have a project_id
+ if (tenant_id != context.project_id and not context.all_tenants and not
+ policy.enforce_new_defaults()):
raise exceptions.Forbidden()
return self.quota.set_quota(context, tenant_id, resource, hard_limit)
@transaction
def reset_quotas(self, context, tenant_id):
- target = {'tenant_id': tenant_id}
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: tenant_id}
+ else:
+ target = {'tenant_id': tenant_id}
policy.check('reset_quotas', context, target)
self.quota.reset_quotas(context, tenant_id)
@@ -808,9 +844,10 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def get_tenant(self, context, tenant_id):
- target = {
- 'tenant_id': tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: tenant_id}
+ else:
+ target = {'tenant_id': tenant_id}
policy.check('get_tenant', context, target)
@@ -857,13 +894,21 @@ class Service(service.RPCService):
# Default to creating in the current users tenant
zone.tenant_id = zone.tenant_id or context.project_id
- target = {
- 'tenant_id': zone.tenant_id,
- 'zone_name': zone.name
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: zone.tenant_id,
+ 'zone_name': zone.name
+ }
+ else:
+ target = {
+ 'tenant_id': zone.tenant_id,
+ 'zone_name': zone.name
+ }
policy.check('create_zone', context, target)
+ self._is_valid_project_id(zone.tenant_id)
+
# Ensure the tenant has enough quota to continue
self._enforce_zone_quota(context, zone.tenant_id)
@@ -971,11 +1016,19 @@ class Service(service.RPCService):
"""
zone = self.storage.get_zone(context, zone_id)
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'tenant_id': zone.tenant_id
+ }
+
policy.check('get_zone', context, target)
return zone
@@ -988,11 +1041,19 @@ class Service(service.RPCService):
pool_id = cfg.CONF['service:central'].default_pool_id
else:
zone = self.storage.get_zone(context, zone_id)
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'tenant_id': zone.tenant_id
- }
+
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'tenant_id': zone.tenant_id
+ }
pool_id = zone.pool_id
policy.check('get_zone_ns_records', context, target)
@@ -1010,7 +1071,11 @@ class Service(service.RPCService):
sort_key=None, sort_dir=None):
"""List existing zones including the ones flagged for deletion.
"""
- target = {'tenant_id': context.project_id}
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
+
policy.check('find_zones', context, target)
return self.storage.find_zones(context, criterion, marker, limit,
@@ -1018,7 +1083,11 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def find_zone(self, context, criterion=None):
- target = {'tenant_id': context.project_id}
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
+
policy.check('find_zone', context, target)
return self.storage.find_zone(context, criterion)
@@ -1032,11 +1101,19 @@ class Service(service.RPCService):
:returns: updated zone
"""
- target = {
- 'zone_id': zone.obj_get_original_value('id'),
- 'zone_name': zone.obj_get_original_value('name'),
- 'tenant_id': zone.obj_get_original_value('tenant_id'),
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone.obj_get_original_value('id'),
+ 'zone_name': zone.obj_get_original_value('name'),
+ constants.RBAC_PROJECT_ID: (
+ zone.obj_get_original_value('tenant_id')),
+ }
+ else:
+ target = {
+ 'zone_id': zone.obj_get_original_value('id'),
+ 'zone_name': zone.obj_get_original_value('name'),
+ 'tenant_id': zone.obj_get_original_value('tenant_id'),
+ }
policy.check('update_zone', context, target)
@@ -1102,11 +1179,18 @@ class Service(service.RPCService):
"""
zone = self.storage.get_zone(context, zone_id)
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'tenant_id': zone.tenant_id
+ }
if hasattr(context, 'abandon') and context.abandon:
policy.check('abandon_zone', context, target)
@@ -1161,11 +1245,18 @@ class Service(service.RPCService):
def xfr_zone(self, context, zone_id):
zone = self.storage.get_zone(context, zone_id)
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'tenant_id': zone.tenant_id
+ }
policy.check('xfr_zone', context, target)
@@ -1191,9 +1282,14 @@ class Service(service.RPCService):
if criterion is None:
criterion = {}
- target = {
- 'tenant_id': criterion.get('tenant_id', None)
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: criterion.get('tenant_id', None)
+ }
+ else:
+ target = {
+ 'tenant_id': criterion.get('tenant_id', None)
+ }
policy.check('count_zones', context, target)
@@ -1235,11 +1331,18 @@ class Service(service.RPCService):
def touch_zone(self, context, zone_id):
zone = self.storage.get_zone(context, zone_id)
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'tenant_id': zone.tenant_id
+ }
policy.check('touch_zone', context, target)
@@ -1268,13 +1371,22 @@ class Service(service.RPCService):
if zone.action == 'DELETE':
raise exceptions.BadRequest('Can not update a deleting zone')
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'zone_type': zone.type,
- 'recordset_name': recordset.name,
- 'tenant_id': zone.tenant_id,
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_name': recordset.name,
+ constants.RBAC_PROJECT_ID: zone.tenant_id,
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_name': recordset.name,
+ 'tenant_id': zone.tenant_id,
+ }
policy.check('create_recordset', context, target)
@@ -1359,12 +1471,20 @@ class Service(service.RPCService):
else:
zone = self.storage.get_zone(context, recordset.zone_id)
- target = {
- 'zone_id': zone.id,
- 'zone_name': zone.name,
- 'recordset_id': recordset.id,
- 'tenant_id': zone.tenant_id,
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone.id,
+ 'zone_name': zone.name,
+ 'recordset_id': recordset.id,
+ constants.RBAC_PROJECT_ID: zone.tenant_id,
+ }
+ else:
+ target = {
+ 'zone_id': zone.id,
+ 'zone_name': zone.name,
+ 'recordset_id': recordset.id,
+ 'tenant_id': zone.tenant_id,
+ }
policy.check('get_recordset', context, target)
@@ -1377,7 +1497,11 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None, force_index=False):
- target = {'tenant_id': context.project_id}
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
+
policy.check('find_recordsets', context, target)
recordsets = self.storage.find_recordsets(context, criterion, marker,
@@ -1388,7 +1512,10 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def find_recordset(self, context, criterion=None):
- target = {'tenant_id': context.project_id}
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
policy.check('find_recordset', context, target)
recordset = self.storage.find_recordset(context, criterion)
@@ -1432,13 +1559,22 @@ class Service(service.RPCService):
if zone.action == 'DELETE':
raise exceptions.BadRequest('Can not update a deleting zone')
- target = {
- 'zone_id': recordset.obj_get_original_value('zone_id'),
- 'zone_type': zone.type,
- 'recordset_id': recordset.obj_get_original_value('id'),
- 'zone_name': zone.name,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': recordset.obj_get_original_value('zone_id'),
+ 'zone_type': zone.type,
+ 'recordset_id': recordset.obj_get_original_value('id'),
+ 'zone_name': zone.name,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': recordset.obj_get_original_value('zone_id'),
+ 'zone_type': zone.type,
+ 'recordset_id': recordset.obj_get_original_value('id'),
+ 'zone_name': zone.name,
+ 'tenant_id': zone.tenant_id
+ }
policy.check('update_recordset', context, target)
@@ -1478,7 +1614,7 @@ class Service(service.RPCService):
# Update the recordset
recordset = self.storage.update_recordset(context, recordset)
- return (recordset, zone)
+ return recordset, zone
@rpc.expected_exceptions()
@notification('dns.recordset.delete')
@@ -1496,13 +1632,22 @@ class Service(service.RPCService):
if zone.action == 'DELETE':
raise exceptions.BadRequest('Can not update a deleting zone')
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'zone_type': zone.type,
- 'recordset_id': recordset.id,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_id': recordset.id,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_id': recordset.id,
+ 'tenant_id': zone.tenant_id
+ }
policy.check('delete_recordset', context, target)
@@ -1545,9 +1690,12 @@ class Service(service.RPCService):
if criterion is None:
criterion = {}
- target = {
- 'tenant_id': criterion.get('tenant_id', None)
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: criterion.get('tenant_id', None)
+ }
+ else:
+ target = {'tenant_id': criterion.get('tenant_id', None)}
policy.check('count_recordsets', context, target)
@@ -1567,14 +1715,24 @@ class Service(service.RPCService):
recordset = self.storage.get_recordset(context, recordset_id)
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'zone_type': zone.type,
- 'recordset_id': recordset_id,
- 'recordset_name': recordset.name,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_id': recordset_id,
+ 'recordset_name': recordset.name,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_id': recordset_id,
+ 'recordset_name': recordset.name,
+ 'tenant_id': zone.tenant_id
+ }
policy.check('create_record', context, target)
@@ -1605,7 +1763,7 @@ class Service(service.RPCService):
record = self.storage.create_record(context, zone.id, recordset.id,
record)
- return (record, zone)
+ return record, zone
@rpc.expected_exceptions()
def get_record(self, context, zone_id, recordset_id, record_id):
@@ -1621,14 +1779,24 @@ class Service(service.RPCService):
if recordset.id != record.recordset_id:
raise exceptions.RecordNotFound()
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'recordset_id': recordset_id,
- 'recordset_name': recordset.name,
- 'record_id': record.id,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'recordset_id': recordset_id,
+ 'recordset_name': recordset.name,
+ 'record_id': record.id,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'recordset_id': recordset_id,
+ 'recordset_name': recordset.name,
+ 'record_id': record.id,
+ 'tenant_id': zone.tenant_id
+ }
policy.check('get_record', context, target)
@@ -1637,7 +1805,11 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def find_records(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
- target = {'tenant_id': context.project_id}
+
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
policy.check('find_records', context, target)
return self.storage.find_records(context, criterion, marker, limit,
@@ -1645,7 +1817,11 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def find_record(self, context, criterion=None):
- target = {'tenant_id': context.project_id}
+
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
policy.check('find_record', context, target)
return self.storage.find_record(context, criterion)
@@ -1679,15 +1855,26 @@ class Service(service.RPCService):
raise exceptions.BadRequest('Moving a recordset between '
'recordsets is not allowed')
- target = {
- 'zone_id': record.obj_get_original_value('zone_id'),
- 'zone_name': zone.name,
- 'zone_type': zone.type,
- 'recordset_id': record.obj_get_original_value('recordset_id'),
- 'recordset_name': recordset.name,
- 'record_id': record.obj_get_original_value('id'),
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': record.obj_get_original_value('zone_id'),
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_id': record.obj_get_original_value('recordset_id'),
+ 'recordset_name': recordset.name,
+ 'record_id': record.obj_get_original_value('id'),
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': record.obj_get_original_value('zone_id'),
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_id': record.obj_get_original_value('recordset_id'),
+ 'recordset_name': recordset.name,
+ 'record_id': record.obj_get_original_value('id'),
+ 'tenant_id': zone.tenant_id
+ }
policy.check('update_record', context, target)
@@ -1717,7 +1904,7 @@ class Service(service.RPCService):
# Update the record
record = self.storage.update_record(context, record)
- return (record, zone)
+ return record, zone
@rpc.expected_exceptions()
@notification('dns.record.delete')
@@ -1741,15 +1928,26 @@ class Service(service.RPCService):
if recordset.id != record.recordset_id:
raise exceptions.RecordNotFound()
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'zone_type': zone.type,
- 'recordset_id': recordset_id,
- 'recordset_name': recordset.name,
- 'record_id': record.id,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_id': recordset_id,
+ 'recordset_name': recordset.name,
+ 'record_id': record.id,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'zone_type': zone.type,
+ 'recordset_id': recordset_id,
+ 'recordset_name': recordset.name,
+ 'record_id': record.id,
+ 'tenant_id': zone.tenant_id
+ }
policy.check('delete_record', context, target)
@@ -1778,16 +1976,19 @@ class Service(service.RPCService):
record = self.storage.update_record(context, record)
- return (record, zone)
+ return record, zone
@rpc.expected_exceptions()
def count_records(self, context, criterion=None):
if criterion is None:
criterion = {}
- target = {
- 'tenant_id': criterion.get('tenant_id', None)
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: criterion.get('tenant_id', None)
+ }
+ else:
+ target = {'tenant_id': criterion.get('tenant_id', None)}
policy.check('count_records', context, target)
return self.storage.count_records(context, criterion)
@@ -1814,11 +2015,18 @@ class Service(service.RPCService):
def sync_zone(self, context, zone_id):
zone = self.storage.get_zone(context, zone_id)
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'tenant_id': zone.tenant_id
+ }
policy.check('diagnostics_sync_zone', context, target)
@@ -1830,14 +2038,24 @@ class Service(service.RPCService):
zone = self.storage.get_zone(context, zone_id)
recordset = self.storage.get_recordset(context, recordset_id)
- target = {
- 'zone_id': zone_id,
- 'zone_name': zone.name,
- 'recordset_id': recordset_id,
- 'recordset_name': recordset.name,
- 'record_id': record_id,
- 'tenant_id': zone.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'recordset_id': recordset_id,
+ 'recordset_name': recordset.name,
+ 'record_id': record_id,
+ constants.RBAC_PROJECT_ID: zone.tenant_id
+ }
+ else:
+ target = {
+ 'zone_id': zone_id,
+ 'zone_name': zone.name,
+ 'recordset_id': recordset_id,
+ 'recordset_name': recordset.name,
+ 'record_id': record_id,
+ 'tenant_id': zone.tenant_id
+ }
policy.check('diagnostics_sync_record', context, target)
@@ -1870,16 +2088,15 @@ class Service(service.RPCService):
'storage': storage_status
}
- def _determine_floatingips(self, context, fips, records=None,
- tenant_id=None):
+ def _determine_floatingips(self, context, fips, project_id=None):
"""
- Given the context or tenant, records and fips it returns the valid
- floatingips either with a associated record or not. Deletes invalid
+ Given the context or project, and fips it returns the valid
+ floating ips either with an associated record or not. Deletes invalid
records also.
- Returns a list of tuples with FloatingIPs and it's Record.
+ Returns a list of tuples with FloatingIPs and its Record.
"""
- tenant_id = tenant_id or context.project_id
+ project_id = project_id or context.project_id
elevated_context = context.elevated(all_tenants=True,
edit_managed_records=True)
@@ -1893,23 +2110,24 @@ class Service(service.RPCService):
invalid = []
data = {}
- # First populate the list of FIPS
+ # First populate the list of FIPS.
for fip_key, fip_values in fips.items():
# Check if the FIP has a record
record = records.get(fip_values['address'])
- # NOTE: Now check if it's owned by the tenant that actually has the
- # FIP in the external service and if not invalidate it (delete it)
- # thus not returning it with in the tuple with the FIP, but None..
+ # NOTE: Now check if it's owned by the project that actually has
+ # the FIP in the external service and if not invalidate it
+ # (delete it) thus not returning it with in the tuple with the FIP,
+ # but None.
if record:
- record_tenant = record['managed_tenant_id']
-
- if record_tenant != tenant_id:
- msg = "Invalid FloatingIP %s belongs to %s but record " \
- "owner %s"
- LOG.debug(msg, fip_key, tenant_id, record_tenant)
+ record_project = record['managed_tenant_id']
+ if record_project != project_id:
+ LOG.debug(
+ 'Invalid FloatingIP %s belongs to %s but record '
+ 'project %s', fip_key, project_id, record_project
+ )
invalid.append(record)
record = None
data[fip_key] = (fip_values, record)
@@ -1920,66 +2138,17 @@ class Service(service.RPCService):
"""
Utility method to delete a list of records.
"""
+ if not records:
+ return
+
elevated_context = context.elevated(all_tenants=True,
edit_managed_records=True)
- if len(records) > 0:
- for r in records:
- msg = 'Deleting record %s for FIP %s'
- LOG.debug(msg, r['id'], r['managed_resource_id'])
- self.delete_record(elevated_context, r['zone_id'],
- r['recordset_id'], r['id'])
-
- def _format_floatingips(self, context, data, recordsets=None):
- """
- Given a list of FloatingIP and Record tuples we look through creating
- a new dict of FloatingIPs
- """
- elevated_context = context.elevated(all_tenants=True)
-
- fips = objects.FloatingIPList()
- for key, value in data.items():
- fip, record = value
-
- fip_ptr = objects.FloatingIP().from_dict({
- 'address': fip['address'],
- 'id': fip['id'],
- 'region': fip['region'],
- 'ptrdname': None,
- 'ttl': None,
- 'description': None,
- 'action': None,
- 'status': 'ACTIVE'
- })
-
- # TTL population requires a present record in order to find the
- # RS or Zone
- if record:
- fip_ptr['action'] = record.action
- fip_ptr['status'] = record.status
-
- # We can have a recordset dict passed in
- if (recordsets is not None and
- record['recordset_id'] in recordsets):
- recordset = recordsets[record['recordset_id']]
- else:
- recordset = self.storage.get_recordset(
- elevated_context, record['recordset_id'])
-
- if recordset['ttl'] is not None:
- fip_ptr['ttl'] = recordset['ttl']
- else:
- zone = self.get_zone(
- elevated_context, record['zone_id'])
- fip_ptr['ttl'] = zone['ttl']
-
- fip_ptr['ptrdname'] = record['data']
- fip_ptr['description'] = record['description']
- else:
- LOG.debug("No record information found for %s", value[0]['id'])
-
- # Store the "fip_record" with the region and it's id as key
- fips.append(fip_ptr)
- return fips
+ for record in records:
+ LOG.debug('Deleting record %s for FIP %s',
+ record['id'], record['managed_resource_id'])
+ self._delete_ptr_record(
+ elevated_context, record
+ )
def _list_floatingips(self, context, region=None):
data = self.network_api.list_floatingips(context, region=region)
@@ -1996,9 +2165,11 @@ class Service(service.RPCService):
def _get_floatingip(self, context, region, floatingip_id, fips):
if (region, floatingip_id) not in fips:
- msg = 'FloatingIP %s in %s is not associated for tenant "%s"' % \
- (floatingip_id, region, context.project_id)
- raise exceptions.NotFound(msg)
+ raise exceptions.NotFound(
+ 'FloatingIP %s in %s is not associated for project "%s"' % (
+ floatingip_id, region, context.project_id
+ )
+ )
return fips[region, floatingip_id]
# PTR ops
@@ -2014,14 +2185,15 @@ class Service(service.RPCService):
elevated_context = context.elevated(all_tenants=True,
edit_managed_records=True)
- tenant_fips = self._list_floatingips(context)
+ project_floatingips = self._list_floatingips(context)
valid, invalid = self._determine_floatingips(
- elevated_context, tenant_fips)
+ elevated_context, project_floatingips
+ )
self._invalidate_floatingips(context, invalid)
- return self._format_floatingips(context, valid)
+ return self._create_floating_ip_list(context, valid)
@rpc.expected_exceptions()
def get_floatingip(self, context, region, floatingip_id):
@@ -2037,11 +2209,12 @@ class Service(service.RPCService):
result = self._list_to_dict([fip], keys=['region', 'id'])
valid, invalid = self._determine_floatingips(
- elevated_context, result)
+ elevated_context, result
+ )
self._invalidate_floatingips(context, invalid)
- return self._format_floatingips(context, valid)[0]
+ return self._create_floating_ip_list(context, valid)[0]
def _set_floatingip_reverse(self, context, region, floatingip_id, values):
"""
@@ -2051,16 +2224,18 @@ class Service(service.RPCService):
elevated_context = context.elevated(all_tenants=True,
edit_managed_records=True)
- tenant_fips = self._list_floatingips(context, region=region)
+ project_fips = self._list_floatingips(context, region=region)
- fip = self._get_floatingip(context, region, floatingip_id, tenant_fips)
+ fip = self._get_floatingip(
+ context, region, floatingip_id, project_fips
+ )
zone_name = self.network_api.address_zone(fip['address'])
- # NOTE: Find existing zone or create it..
try:
zone = self.storage.find_zone(
- elevated_context, {'name': zone_name})
+ elevated_context, {'name': zone_name}
+ )
except exceptions.ZoneNotFound:
LOG.info(
'Creating zone for %(fip_id)s:%(region)s - %(fip_addr)s '
@@ -2083,44 +2258,16 @@ class Service(service.RPCService):
}
zone = self.create_zone(
- elevated_context, objects.Zone(**zone_values))
+ elevated_context, objects.Zone(**zone_values)
+ )
record_name = self.network_api.address_name(fip['address'])
-
recordset_values = {
'name': record_name,
+ 'zone_id': zone['id'],
'type': 'PTR',
- 'ttl': values.get('ttl', None)
+ 'ttl': values.get('ttl')
}
-
- try:
- recordset = self.find_recordset(
- elevated_context, {'name': record_name, 'type': 'PTR'})
-
- # Update the recordset values
- recordset.name = recordset_values['name']
- recordset.type = recordset_values['type']
- recordset.ttl = recordset_values['ttl']
- recordset.zone_id = zone['id']
- recordset = self.update_recordset(
- elevated_context,
- recordset=recordset)
-
- # Delete the current records for the recordset
- LOG.debug("Removing old Record")
- for record in recordset.records:
- self.delete_record(
- elevated_context,
- zone_id=recordset['zone_id'],
- recordset_id=recordset['id'],
- record_id=record['id'])
-
- except exceptions.RecordSetNotFound:
- recordset = self.create_recordset(
- elevated_context,
- zone_id=zone['id'],
- recordset=objects.RecordSet(**recordset_values))
-
record_values = {
'data': values['ptrdname'],
'description': values['description'],
@@ -2131,16 +2278,14 @@ class Service(service.RPCService):
'managed_resource_type': 'ptr:floatingip',
'managed_tenant_id': context.project_id
}
-
- record = self.create_record(
- elevated_context,
- zone_id=zone['id'],
- recordset_id=recordset['id'],
- record=objects.Record(**record_values))
-
- return self._format_floatingips(
- context, {(region, floatingip_id): (fip, record)},
- {recordset['id']: recordset})[0]
+ record = objects.Record(**record_values)
+ recordset = self._replace_or_create_ptr_recordset(
+ elevated_context, record,
+ **recordset_values
+ )
+ return self._create_floating_ip(
+ context, fip, record, zone=zone, recordset=recordset
+ )
def _unset_floatingip_reverse(self, context, region, floatingip_id):
"""
@@ -2160,16 +2305,121 @@ class Service(service.RPCService):
try:
record = self.storage.find_record(
- elevated_context, criterion=criterion)
+ elevated_context, criterion=criterion
+ )
except exceptions.RecordNotFound:
msg = 'No such FloatingIP %s:%s' % (region, floatingip_id)
raise exceptions.NotFound(msg)
- self.delete_record(
- elevated_context,
- zone_id=record['zone_id'],
- recordset_id=record['recordset_id'],
- record_id=record['id'])
+ self._delete_ptr_record(
+ elevated_context, record
+ )
+
+ def _create_floating_ip(self, context, fip, record,
+ zone=None, recordset=None):
+ """
+ Creates a FloatingIP based on floating ip and record data.
+ """
+ elevated_context = context.elevated(all_tenants=True)
+ fip_ptr = objects.FloatingIP().from_dict({
+ 'address': fip['address'],
+ 'id': fip['id'],
+ 'region': fip['region'],
+ 'ptrdname': None,
+ 'ttl': None,
+ 'description': None,
+ 'action': None,
+ 'status': 'ACTIVE'
+ })
+ # TTL population requires a present record in order to find the
+ # RS or Zone.
+ if record and record.action != 'DELETE':
+ if not recordset:
+ recordset = self.storage.get_recordset(
+ elevated_context, record.recordset_id)
+
+ fip_ptr['action'] = recordset.action
+ fip_ptr['status'] = recordset.status
+
+ if recordset.ttl is not None:
+ fip_ptr['ttl'] = recordset.ttl
+ else:
+ if not zone:
+ zone = self.get_zone(elevated_context,
+ record.zone_id)
+ fip_ptr['ttl'] = zone.ttl
+
+ fip_ptr['ptrdname'] = record.data
+ fip_ptr['description'] = record.description
+ else:
+ LOG.debug('No record information found for %s', fip['id'])
+
+ return fip_ptr
+
+ def _create_floating_ip_list(self, context, data):
+ """
+ Creates a FloatingIPList based on floating ips and records data.
+ """
+ fips = objects.FloatingIPList()
+ for key, value in data.items():
+ fip, record = value
+ fip_ptr = self._create_floating_ip(context, fip, record)
+ fips.append(fip_ptr)
+ return fips
+
+ def _delete_ptr_record(self, context, record):
+ try:
+ recordset = self.get_recordset(
+ context, record.zone_id, record.recordset_id
+ )
+
+ if record not in recordset.records:
+ LOG.debug(
+ 'PTR Record %s not found in recordset %s',
+ record.id, record.recordset_id
+ )
+ return
+
+ recordset.records.remove(record)
+
+ if not recordset.records:
+ self.delete_recordset(
+ context, record.zone_id, record.recordset_id
+ )
+ return
+
+ recordset.validate()
+ self.update_recordset(context, recordset)
+ except exceptions.RecordSetNotFound:
+ pass
+
+ def _replace_or_create_ptr_recordset(self, context, record, zone_id,
+ name, type, ttl=None):
+ try:
+ recordset = self.find_recordset(context, {
+ 'zone_id': zone_id,
+ 'name': name,
+ 'type': type,
+ })
+ recordset.ttl = ttl
+ recordset.records = objects.RecordList(objects=[record])
+ recordset.validate()
+ recordset = self.update_recordset(
+ context, recordset
+ )
+ except exceptions.RecordSetNotFound:
+ values = {
+ 'name': name,
+ 'type': type,
+ 'ttl': ttl,
+ }
+ recordset = objects.RecordSet(**values)
+ recordset.records = objects.RecordList(objects=[record])
+ recordset.validate()
+ recordset = self.create_recordset(
+ context, zone_id, recordset
+ )
+ return recordset
@rpc.expected_exceptions()
@transaction
@@ -2178,12 +2428,15 @@ class Service(service.RPCService):
We strictly see if values['ptrdname'] is str or None and set / unset
the requested FloatingIP's PTR record based on that.
"""
- if 'ptrdname' in values.obj_what_changed() and\
- values['ptrdname'] is None:
- self._unset_floatingip_reverse(context, region, floatingip_id)
+ if ('ptrdname' in values.obj_what_changed() and
+ values['ptrdname'] is None):
+ self._unset_floatingip_reverse(
+ context, region, floatingip_id
+ )
elif isinstance(values['ptrdname'], str):
return self._set_floatingip_reverse(
- context, region, floatingip_id, values)
+ context, region, floatingip_id, values
+ )
# Blacklisted zones
@rpc.expected_exceptions()
@@ -2257,6 +2510,8 @@ class Service(service.RPCService):
policy.check('create_pool', context)
+ self._is_valid_project_id(pool.tenant_id)
+
created_pool = self.storage.create_pool(context, pool)
return created_pool
@@ -2288,46 +2543,49 @@ class Service(service.RPCService):
@notification('dns.pool.update')
@transaction
def update_pool(self, context, pool):
-
policy.check('update_pool', context)
# If there is a nameserver, then additional steps need to be done
# Since these are treated as mutable objects, we're only going to
# be comparing the nameserver.value which is the FQDN
- if pool.obj_attr_is_set('ns_records'):
- elevated_context = context.elevated(all_tenants=True)
+ elevated_context = context.elevated(all_tenants=True)
+
+ # TODO(kiall): ListObjects should be able to give you their
+ # original set of values.
+ original_pool_ns_records = self._get_pool_ns_records(
+ context, pool.id
+ )
+
+ updated_pool = self.storage.update_pool(context, pool)
- # TODO(kiall): ListObjects should be able to give you their
- # original set of values.
- original_pool_ns_records = self._get_pool_ns_records(context,
- pool.id)
- # Find the current NS hostnames
- existing_ns = set([n.hostname for n in original_pool_ns_records])
+ if not pool.obj_attr_is_set('ns_records'):
+ return updated_pool
- # Find the desired NS hostnames
- request_ns = set([n.hostname for n in pool.ns_records])
+ # Find the current NS hostnames
+ existing_ns = set([n.hostname for n in original_pool_ns_records])
- # Get the NS's to be created and deleted, ignoring the ones that
- # are in both sets, as those haven't changed.
- # TODO(kiall): Factor in priority
- create_ns = request_ns.difference(existing_ns)
- delete_ns = existing_ns.difference(request_ns)
+ # Find the desired NS hostnames
+ request_ns = set([n.hostname for n in pool.ns_records])
- updated_pool = self.storage.update_pool(context, pool)
+ # Get the NS's to be created and deleted, ignoring the ones that
+ # are in both sets, as those haven't changed.
+ # TODO(kiall): Factor in priority
+ create_ns = request_ns.difference(existing_ns)
+ delete_ns = existing_ns.difference(request_ns)
# After the update, handle new ns_records
- for ns in create_ns:
+ for ns_record in create_ns:
# Create new NS recordsets for every zone
zones = self.find_zones(
context=elevated_context,
criterion={'pool_id': pool.id, 'action': '!DELETE'})
- for z in zones:
- self._add_ns(elevated_context, z, ns)
+ for zone in zones:
+ self._add_ns(elevated_context, zone, ns_record)
# Then handle the ns_records to delete
- for ns in delete_ns:
+ for ns_record in delete_ns:
# Cannot delete the last nameserver, so verify that first.
- if len(pool.ns_records) == 0:
+ if not pool.ns_records:
raise exceptions.LastServerDeleteNotAllowed(
"Not allowed to delete last of servers"
)
@@ -2335,9 +2593,10 @@ class Service(service.RPCService):
# Delete the NS record for every zone
zones = self.find_zones(
context=elevated_context,
- criterion={'pool_id': pool.id})
- for z in zones:
- self._delete_ns(elevated_context, z, ns)
+ criterion={'pool_id': pool.id}
+ )
+ for zone in zones:
+ self._delete_ns(elevated_context, zone, ns_record)
return updated_pool
@@ -2508,9 +2767,11 @@ class Service(service.RPCService):
if zone.action == 'DELETE':
raise exceptions.BadRequest('Can not transfer a deleting zone')
- target = {
- 'tenant_id': zone.tenant_id,
- }
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: zone.tenant_id}
+ else:
+ target = {'tenant_id': zone.tenant_id}
+
policy.check('create_zone_transfer_request', context, target)
zone_transfer_request.key = self._transfer_key_generator()
@@ -2518,6 +2779,8 @@ class Service(service.RPCService):
if zone_transfer_request.tenant_id is None:
zone_transfer_request.tenant_id = context.project_id
+ self._is_valid_project_id(zone_transfer_request.tenant_id)
+
created_zone_transfer_request = \
self.storage.create_zone_transfer_request(
context, zone_transfer_request)
@@ -2534,10 +2797,18 @@ class Service(service.RPCService):
elevated_context, zone_transfer_request_id)
LOG.info('Target Tenant ID found - using scoped policy')
- target = {
- 'target_tenant_id': zone_transfer_request.target_tenant_id,
- 'tenant_id': zone_transfer_request.tenant_id,
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_TARGET_PROJECT_ID: (zone_transfer_request.
+ target_tenant_id),
+ constants.RBAC_PROJECT_ID: zone_transfer_request.tenant_id,
+ }
+ else:
+ target = {
+ 'target_tenant_id': zone_transfer_request.target_tenant_id,
+ 'tenant_id': zone_transfer_request.tenant_id,
+ }
+
policy.check('get_zone_transfer_request', context, target)
return zone_transfer_request
@@ -2557,9 +2828,15 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def find_zone_transfer_request(self, context, criterion):
- target = {
- 'tenant_id': context.project_id,
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: context.project_id,
+ }
+ else:
+ target = {
+ 'tenant_id': context.project_id,
+ }
+
policy.check('find_zone_transfer_request', context, target)
return self.storage.find_zone_transfer_requests(context, criterion)
@@ -2571,9 +2848,14 @@ class Service(service.RPCService):
if 'zone_id' in zone_transfer_request.obj_what_changed():
raise exceptions.InvalidOperation('Zone cannot be changed')
- target = {
- 'tenant_id': zone_transfer_request.tenant_id,
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: zone_transfer_request.tenant_id,
+ }
+ else:
+ target = {
+ 'tenant_id': zone_transfer_request.tenant_id,
+ }
policy.check('update_zone_transfer_request', context, target)
request = self.storage.update_zone_transfer_request(
context, zone_transfer_request)
@@ -2587,9 +2869,14 @@ class Service(service.RPCService):
# Get zone transfer request
zone_transfer_request = self.storage.get_zone_transfer_request(
context, zone_transfer_request_id)
- target = {
- 'tenant_id': zone_transfer_request.tenant_id,
- }
+
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: zone_transfer_request.tenant_id
+ }
+ else:
+ target = {'tenant_id': zone_transfer_request.tenant_id}
+
policy.check('delete_zone_transfer_request', context, target)
return self.storage.delete_zone_transfer_request(
context,
@@ -2616,14 +2903,23 @@ class Service(service.RPCService):
raise exceptions.IncorrectZoneTransferKey(
'Key does not match stored key for request')
- target = {
- 'target_tenant_id': zone_transfer_request.target_tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_TARGET_PROJECT_ID: (zone_transfer_request.
+ target_tenant_id)
+ }
+ else:
+ target = {
+ 'target_tenant_id': zone_transfer_request.target_tenant_id
+ }
+
policy.check('create_zone_transfer_accept', context, target)
if zone_transfer_accept.tenant_id is None:
zone_transfer_accept.tenant_id = context.project_id
+ self._is_valid_project_id(zone_transfer_accept.tenant_id)
+
created_zone_transfer_accept = \
self.storage.create_zone_transfer_accept(
context, zone_transfer_accept)
@@ -2666,9 +2962,15 @@ class Service(service.RPCService):
zone_transfer_accept = self.storage.get_zone_transfer_accept(
context, zone_transfer_accept_id)
- target = {
- 'tenant_id': zone_transfer_accept.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: zone_transfer_accept.tenant_id
+ }
+ else:
+ target = {
+ 'tenant_id': zone_transfer_accept.tenant_id
+ }
+
policy.check('get_zone_transfer_accept', context, target)
return zone_transfer_accept
@@ -2690,9 +2992,14 @@ class Service(service.RPCService):
@notification('dns.zone_transfer_accept.update')
@transaction
def update_zone_transfer_accept(self, context, zone_transfer_accept):
- target = {
- 'tenant_id': zone_transfer_accept.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: zone_transfer_accept.tenant_id
+ }
+ else:
+ target = {
+ 'tenant_id': zone_transfer_accept.tenant_id
+ }
policy.check('update_zone_transfer_accept', context, target)
accept = self.storage.update_zone_transfer_accept(
context, zone_transfer_accept)
@@ -2707,9 +3014,15 @@ class Service(service.RPCService):
zt_accept = self.storage.get_zone_transfer_accept(
context, zone_transfer_accept_id)
- target = {
- 'tenant_id': zt_accept.tenant_id
- }
+ if policy.enforce_new_defaults():
+ target = {
+ constants.RBAC_PROJECT_ID: zt_accept.tenant_id
+ }
+ else:
+ target = {
+ 'tenant_id': zt_accept.tenant_id
+ }
+
policy.check('delete_zone_transfer_accept', context, target)
return self.storage.delete_zone_transfer_accept(
context,
@@ -2719,9 +3032,15 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
@notification('dns.zone_import.create')
def create_zone_import(self, context, request_body):
- target = {'tenant_id': context.project_id}
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
+
policy.check('create_zone_import', context, target)
+ self._is_valid_project_id(context.project_id)
+
values = {
'status': 'PENDING',
'message': None,
@@ -2732,59 +3051,49 @@ class Service(service.RPCService):
zone_import = objects.ZoneImport(**values)
created_zone_import = self.storage.create_zone_import(context,
- zone_import)
+ zone_import)
self.tg.add_thread(self._import_zone, context, created_zone_import,
- request_body)
+ request_body)
return created_zone_import
def _import_zone(self, context, zone_import, request_body):
-
- def _import(self, context, zone_import, request_body):
- # Dnspython needs a str instead of a unicode object
- zone = None
- try:
- dnspython_zone = dnszone.from_text(
- request_body,
- # Don't relativize, or we end up with '@' record names.
- relativize=False,
- # Don't check origin, we allow missing NS records
- # (missing SOA records are taken care of in _create_zone).
- check_origin=False)
- zone = dnsutils.from_dnspython_zone(dnspython_zone)
- zone.type = 'PRIMARY'
-
- for rrset in list(zone.recordsets):
- if rrset.type == 'SOA':
- zone.recordsets.remove(rrset)
- # subdomain NS records should be kept
- elif rrset.type == 'NS' and rrset.name == zone.name:
- zone.recordsets.remove(rrset)
-
- except dnszone.UnknownOrigin:
- zone_import.message = ('The $ORIGIN statement is required and'
- ' must be the first statement in the'
- ' zonefile.')
- zone_import.status = 'ERROR'
- except dnsexception.SyntaxError:
- zone_import.message = 'Malformed zonefile.'
- zone_import.status = 'ERROR'
- except exceptions.BadRequest:
- zone_import.message = 'An SOA record is required.'
- zone_import.status = 'ERROR'
- except Exception as e:
- LOG.exception('An undefined error occurred during zone import')
- msg = 'An undefined error occurred. %s'\
- % str(e)[:130]
- zone_import.message = msg
- zone_import.status = 'ERROR'
-
- return zone, zone_import
-
- # Execute the import in a real Python thread
- zone, zone_import = tpool.execute(_import, self, context,
- zone_import, request_body)
+ zone = None
+ try:
+ dnspython_zone = dnszone.from_text(
+ request_body,
+ # Don't relativize, or we end up with '@' record names.
+ relativize=False,
+ # Don't check origin, we allow missing NS records
+ # (missing SOA records are taken care of in _create_zone).
+ check_origin=False)
+ zone = dnsutils.from_dnspython_zone(dnspython_zone)
+ zone.type = 'PRIMARY'
+ for rrset in list(zone.recordsets):
+ if rrset.type == 'SOA':
+ zone.recordsets.remove(rrset)
+ # subdomain NS records should be kept
+ elif rrset.type == 'NS' and rrset.name == zone.name:
+ zone.recordsets.remove(rrset)
+ except dnszone.UnknownOrigin:
+ zone_import.message = (
+ 'The $ORIGIN statement is required and must be the first '
+ 'statement in the zonefile.'
+ )
+ zone_import.status = 'ERROR'
+ except dnsexception.SyntaxError:
+ zone_import.message = 'Malformed zonefile.'
+ zone_import.status = 'ERROR'
+ except exceptions.BadRequest:
+ zone_import.message = 'An SOA record is required.'
+ zone_import.status = 'ERROR'
+ except Exception as e:
+ LOG.exception('An undefined error occurred during zone import')
+ zone_import.message = (
+ 'An undefined error occurred. %s' % str(e)[:130]
+ )
+ zone_import.status = 'ERROR'
# If the zone import was valid, create the zone
if zone_import.status != 'ERROR':
@@ -2792,28 +3101,38 @@ class Service(service.RPCService):
zone = self.create_zone(context, zone)
zone_import.status = 'COMPLETE'
zone_import.zone_id = zone.id
- zone_import.message = '%(name)s imported' % {'name':
- zone.name}
+ zone_import.message = (
+ '%(name)s imported' % {'name': zone.name}
+ )
except exceptions.DuplicateZone:
zone_import.status = 'ERROR'
zone_import.message = 'Duplicate zone.'
except exceptions.InvalidTTL as e:
zone_import.status = 'ERROR'
zone_import.message = str(e)
+ except exceptions.OverQuota:
+ zone_import.status = 'ERROR'
+ zone_import.message = 'Quota exceeded during zone import.'
except Exception as e:
- LOG.exception('An undefined error occurred during zone '
- 'import creation')
- msg = 'An undefined error occurred. %s'\
- % str(e)[:130]
- zone_import.message = msg
+ LOG.exception(
+ 'An undefined error occurred during zone import creation'
+ )
+ zone_import.message = (
+ 'An undefined error occurred. %s' % str(e)[:130]
+ )
zone_import.status = 'ERROR'
self.update_zone_import(context, zone_import)
@rpc.expected_exceptions()
def find_zone_imports(self, context, criterion=None, marker=None,
- limit=None, sort_key=None, sort_dir=None):
- target = {'tenant_id': context.project_id}
+ limit=None, sort_key=None, sort_dir=None):
+
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
+
policy.check('find_zone_imports', context, target)
if not criterion:
@@ -2828,16 +3147,22 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def get_zone_import(self, context, zone_import_id):
- target = {'tenant_id': context.project_id}
+
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
+
policy.check('get_zone_import', context, target)
return self.storage.get_zone_import(context, zone_import_id)
@rpc.expected_exceptions()
@notification('dns.zone_import.update')
def update_zone_import(self, context, zone_import):
- target = {
- 'tenant_id': zone_import.tenant_id,
- }
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: zone_import.tenant_id}
+ else:
+ target = {'tenant_id': zone_import.tenant_id}
policy.check('update_zone_import', context, target)
return self.storage.update_zone_import(context, zone_import)
@@ -2846,10 +3171,18 @@ class Service(service.RPCService):
@notification('dns.zone_import.delete')
@transaction
def delete_zone_import(self, context, zone_import_id):
- target = {
- 'zone_import_id': zone_import_id,
- 'tenant_id': context.project_id
- }
+
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_import_id': zone_import_id,
+ constants.RBAC_PROJECT_ID: context.project_id
+ }
+ else:
+ target = {
+ 'zone_import_id': zone_import_id,
+ 'tenant_id': context.project_id
+ }
+
policy.check('delete_zone_import', context, target)
zone_import = self.storage.delete_zone_import(context, zone_import_id)
@@ -2863,9 +3196,15 @@ class Service(service.RPCService):
# Try getting the zone to ensure it exists
zone = self.storage.get_zone(context, zone_id)
- target = {'tenant_id': context.project_id}
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
+
policy.check('create_zone_export', context, target)
+ self._is_valid_project_id(context.project_id)
+
values = {
'status': 'PENDING',
'message': None,
@@ -2886,7 +3225,11 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def find_zone_exports(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
- target = {'tenant_id': context.project_id}
+
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
policy.check('find_zone_exports', context, target)
if not criterion:
@@ -2901,7 +3244,12 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
def get_zone_export(self, context, zone_export_id):
- target = {'tenant_id': context.project_id}
+
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: context.project_id}
+ else:
+ target = {'tenant_id': context.project_id}
+
policy.check('get_zone_export', context, target)
return self.storage.get_zone_export(context, zone_export_id)
@@ -2909,9 +3257,12 @@ class Service(service.RPCService):
@rpc.expected_exceptions()
@notification('dns.zone_export.update')
def update_zone_export(self, context, zone_export):
- target = {
- 'tenant_id': zone_export.tenant_id,
- }
+
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: zone_export.tenant_id}
+ else:
+ target = {'tenant_id': zone_export.tenant_id}
+
policy.check('update_zone_export', context, target)
return self.storage.update_zone_export(context, zone_export)
@@ -2920,10 +3271,18 @@ class Service(service.RPCService):
@notification('dns.zone_export.delete')
@transaction
def delete_zone_export(self, context, zone_export_id):
- target = {
- 'zone_export_id': zone_export_id,
- 'tenant_id': context.project_id
- }
+
+ if policy.enforce_new_defaults():
+ target = {
+ 'zone_export_id': zone_export_id,
+ constants.RBAC_PROJECT_ID: context.project_id
+ }
+ else:
+ target = {
+ 'zone_export_id': zone_export_id,
+ 'tenant_id': context.project_id
+ }
+
policy.check('delete_zone_export', context, target)
zone_export = self.storage.delete_zone_export(context, zone_export_id)
diff --git a/designate/common/constants.py b/designate/common/constants.py
index 295ee8b7..3857e02f 100644
--- a/designate/common/constants.py
+++ b/designate/common/constants.py
@@ -22,3 +22,7 @@ QUOTA_ZONE_RECORDSETS = 'zone_recordsets'
QUOTA_ZONES = 'zones'
VALID_QUOTAS = [QUOTA_API_EXPORT_SIZE, QUOTA_RECORDSET_RECORDS,
QUOTA_ZONE_RECORDS, QUOTA_ZONE_RECORDSETS, QUOTA_ZONES]
+
+# RBAC related constants
+RBAC_PROJECT_ID = 'project_id'
+RBAC_TARGET_PROJECT_ID = 'target_project_id'
diff --git a/designate/common/policies/base.py b/designate/common/policies/base.py
index adb2a6c6..c09298db 100644
--- a/designate/common/policies/base.py
+++ b/designate/common/policies/base.py
@@ -12,17 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-
+from oslo_log import versionutils
from oslo_policy import policy
-RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner'
-RULE_ADMIN = 'rule:admin'
-RULE_ZONE_PRIMARY_OR_ADMIN = \
- "('PRIMARY':%(zone_type)s and rule:admin_or_owner) "\
- "OR ('SECONDARY':%(zone_type)s AND is_admin:True)"
-RULE_ZONE_TRANSFER = "rule:admin_or_owner OR tenant:%(target_tenant_id)s " \
- "OR None:%(target_tenant_id)s"
+DEPRECATED_REASON = """
+The designate API now supports system scope and default roles.
+"""
+
RULE_ANY = "@"
# Generic policy check string for system administrators. These are the people
@@ -59,37 +56,56 @@ SYSTEM_OR_PROJECT_READER = (
'(' + SYSTEM_READER + ') or (' + PROJECT_READER + ')'
)
+# Designate specific "secure RBAC" rules
+ALL_TENANTS = 'True:%(all_tenants)s'
+
+ALL_TENANTS_READER = ALL_TENANTS + ' and role:reader'
+
+SYSTEM_OR_PROJECT_READER_OR_ALL_TENANTS_READER = (
+ '(' + SYSTEM_READER + ') or (' + PROJECT_READER + ') or (' +
+ ALL_TENANTS_READER + ')'
+)
+
+RULE_ZONE_TRANSFER = (
+ '(' + SYSTEM_ADMIN_OR_PROJECT_MEMBER + ') or '
+ 'project_id:%(target_project_id)s or '
+ 'None:%(target_project_id)s')
+
+
+# Deprecated in Wallaby as part of the "secure RBAC" work.
+# TODO(johnsom) remove when the deprecated RBAC rules are removed.
+RULE_ADMIN = 'rule:admin'
+RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner'
+LEGACY_RULE_ZONE_TRANSFER = "rule:admin_or_owner OR " \
+ "tenant:%(target_tenant_id)s " \
+ "OR None:%(target_tenant_id)s"
+
+deprecated_default = policy.DeprecatedRule(
+ name="default",
+ check_str=RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
rules = [
+ # TODO(johnsom) remove when the deprecated RBAC rules are removed.
policy.RuleDefault(
name="admin",
check_str="role:admin or is_admin:True"),
- policy.RuleDefault(
- name="primary_zone",
- check_str="target.zone_type:SECONDARY"),
+ # TODO(johnsom) remove when the deprecated RBAC rules are removed.
policy.RuleDefault(
name="owner",
check_str="tenant:%(tenant_id)s"),
+ # TODO(johnsom) remove when the deprecated RBAC rules are removed.
policy.RuleDefault(
name="admin_or_owner",
check_str="rule:admin or rule:owner"),
+
+ # Default policy
policy.RuleDefault(
name="default",
- check_str="rule:admin_or_owner"),
- policy.RuleDefault(
- name="target",
- check_str="tenant:%(target_tenant_id)s"),
- policy.RuleDefault(
- name="owner_or_target",
- check_str="rule:target or rule:owner"),
- policy.RuleDefault(
- name="admin_or_owner_or_target",
- check_str="rule:owner_or_target or rule:admin"),
- policy.RuleDefault(
- name="admin_or_target",
- check_str="rule:admin or rule:target"),
- policy.RuleDefault(
- name="zone_primary_or_admin",
- check_str=RULE_ZONE_PRIMARY_OR_ADMIN)
+ check_str=SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ deprecated_rule=deprecated_default),
]
diff --git a/designate/common/policies/context.py b/designate/common/policies/context.py
index 08a528f3..81ab54d5 100644
--- a/designate/common/policies/context.py
+++ b/designate/common/policies/context.py
@@ -13,28 +13,62 @@
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from designate.common.policies import base
+deprecated_all_tenants = policy.DeprecatedRule(
+ name="all_tenants",
+ check_str=base.RULE_ADMIN,
+ deprecated_reason=base.DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_edit_managed_records = policy.DeprecatedRule(
+ name="edit_managed_records",
+ check_str=base.RULE_ADMIN,
+ deprecated_reason=base.DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_use_low_ttl = policy.DeprecatedRule(
+ name="use_low_ttl",
+ check_str=base.RULE_ADMIN,
+ deprecated_reason=base.DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_use_sudo = policy.DeprecatedRule(
+ name="use_sudo",
+ check_str=base.RULE_ADMIN,
+ deprecated_reason=base.DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
rules = [
policy.RuleDefault(
name="all_tenants",
- check_str=base.RULE_ADMIN,
- description='Action on all tenants.'),
+ check_str=base.SYSTEM_ADMIN,
+ scope_types=['system'],
+ description='Action on all tenants.',
+ deprecated_rule=deprecated_all_tenants),
policy.RuleDefault(
name="edit_managed_records",
- check_str=base.RULE_ADMIN,
- description='Edit managed records.'),
+ check_str=base.SYSTEM_ADMIN,
+ scope_types=['system'],
+ description='Edit managed records.',
+ deprecated_rule=deprecated_edit_managed_records),
policy.RuleDefault(
name="use_low_ttl",
- check_str=base.RULE_ADMIN,
- description='Use low TTL.'),
+ check_str=base.SYSTEM_ADMIN,
+ scope_types=['system'],
+ description='Use low TTL.',
+ deprecated_rule=deprecated_use_low_ttl),
policy.RuleDefault(
name="use_sudo",
- check_str=base.RULE_ADMIN,
- description='Accept sudo from user to tenant.')
+ check_str=base.SYSTEM_ADMIN,
+ scope_types=['system'],
+ description='Accept sudo from user to tenant.',
+ deprecated_rule=deprecated_use_sudo)
]
diff --git a/designate/common/policies/diagnostics.py b/designate/common/policies/diagnostics.py
index 9b903231..55574bf5 100644
--- a/designate/common/policies/diagnostics.py
+++ b/designate/common/policies/diagnostics.py
@@ -12,29 +12,62 @@
# License for the specific language governing permissions and limitations
# under the License.
-
+from oslo_log import versionutils
from oslo_policy import policy
from designate.common.policies import base
+deprecated_diagnostics_ping = policy.DeprecatedRule(
+ name="diagnostics_ping",
+ check_str=base.RULE_ADMIN,
+ deprecated_reason=base.DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_diagnostics_sync_zones = policy.DeprecatedRule(
+ name="diagnostics_sync_zones",
+ check_str=base.RULE_ADMIN,
+ deprecated_reason=base.DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_diagnostics_sync_zone = policy.DeprecatedRule(
+ name="diagnostics_sync_zone",
+ check_str=base.RULE_ADMIN,
+ deprecated_reason=base.DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_diagnostics_sync_record = policy.DeprecatedRule(
+ name="diagnostics_sync_record",
+ check_str=base.RULE_ADMIN,
+ deprecated_reason=base.DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
rules = [
policy.RuleDefault(
name="diagnostics_ping",
- check_str=base.RULE_ADMIN,
- description='Diagnose ping.'),
+ check_str=base.SYSTEM_ADMIN,
+ scope_types=['system'],
+ description='Diagnose ping.',
+ deprecated_rule=deprecated_diagnostics_ping),
policy.RuleDefault(
name="diagnostics_sync_zones",
- check_str=base.RULE_ADMIN,
- description='Diagnose sync zones.'),
+ check_str=base.SYSTEM_ADMIN,
+ scope_types=['system'],
+ description='Diagnose sync zones.',
+ deprecated_rule=deprecated_diagnostics_sync_zones),
policy.RuleDefault(
name="diagnostics_sync_zone",
- check_str=base.RULE_ADMIN,
- description='Diagnose sync zone.'),
+ check_str=base.SYSTEM_ADMIN,
+ scope_types=['system'],
+ description='Diagnose sync zone.',
+ deprecated_rule=deprecated_diagnostics_sync_zone),
policy.RuleDefault(
name="diagnostics_sync_record",
- check_str=base.RULE_ADMIN,
- description='Diagnose sync record.')
+ check_str=base.SYSTEM_ADMIN,
+ scope_types=['system'],
+ description='Diagnose sync record.',
+ deprecated_rule=deprecated_diagnostics_sync_record)
]
diff --git a/designate/common/policies/quota.py b/designate/common/policies/quota.py
index f430d9a7..0ddb4459 100644
--- a/designate/common/policies/quota.py
+++ b/designate/common/policies/quota.py
@@ -50,7 +50,7 @@ deprecated_reset_quotas = policy.DeprecatedRule(
rules = [
policy.DocumentedRuleDefault(
name="get_quotas",
- check_str=base.SYSTEM_OR_PROJECT_READER,
+ check_str=base.SYSTEM_OR_PROJECT_READER_OR_ALL_TENANTS_READER,
scope_types=['system', 'project'],
description="View Current Project's Quotas.",
operations=[
diff --git a/designate/common/policies/recordset.py b/designate/common/policies/recordset.py
index e77025eb..6dad34fc 100644
--- a/designate/common/policies/recordset.py
+++ b/designate/common/policies/recordset.py
@@ -22,9 +22,15 @@ DEPRECATED_REASON = """
The record set API now supports system scope and default roles.
"""
+# Deprecated in Wallaby as part of the "secure RBAC" work.
+# TODO(johnsom) remove when the deprecated RBAC rules are removed.
+RULE_ZONE_PRIMARY_OR_ADMIN = (
+ "('PRIMARY':%(zone_type)s and rule:admin_or_owner) "
+ "OR ('SECONDARY':%(zone_type)s AND is_admin:True)")
+
deprecated_create_recordset = policy.DeprecatedRule(
name="create_recordset",
- check_str=base.RULE_ZONE_PRIMARY_OR_ADMIN,
+ check_str=RULE_ZONE_PRIMARY_OR_ADMIN,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
@@ -40,15 +46,27 @@ deprecated_get_recordset = policy.DeprecatedRule(
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
+deprecated_find_recordset = policy.DeprecatedRule(
+ name="find_recordset",
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_find_recordsets = policy.DeprecatedRule(
+ name="find_recordsets",
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
deprecated_update_recordset = policy.DeprecatedRule(
name="update_recordset",
- check_str=base.RULE_ZONE_PRIMARY_OR_ADMIN,
+ check_str=RULE_ZONE_PRIMARY_OR_ADMIN,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_delete_recordset = policy.DeprecatedRule(
name="delete_recordset",
- check_str=base.RULE_ZONE_PRIMARY_OR_ADMIN,
+ check_str=RULE_ZONE_PRIMARY_OR_ADMIN,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
@@ -69,7 +87,7 @@ SYSTEM_ADMIN_AND_SECONDARY_ZONE = (
'(' + base.SYSTEM_ADMIN + ') and (\'SECONDARY\':%(zone_type)s)'
)
-SYSTEM_ADMIN_OR_PROJECT_MEMBER = ''.join(
+SYSTEM_ADMIN_OR_PROJECT_MEMBER_ZONE_TYPE = ' or '.join(
[PROJECT_MEMBER_AND_PRIMARY_ZONE,
SYSTEM_ADMIN_AND_PRIMARY_ZONE,
SYSTEM_ADMIN_AND_SECONDARY_ZONE]
@@ -79,16 +97,13 @@ SYSTEM_ADMIN_OR_PROJECT_MEMBER = ''.join(
rules = [
policy.DocumentedRuleDefault(
name="create_recordset",
- check_str=SYSTEM_ADMIN_AND_SECONDARY_ZONE,
+ check_str=SYSTEM_ADMIN_OR_PROJECT_MEMBER_ZONE_TYPE,
scope_types=['system', 'project'],
description="Create Recordset",
operations=[
{
'path': '/v2/zones/{zone_id}/recordsets',
'method': 'POST'
- }, {
- 'path': '/v2/reverse/floatingips/{region}:{floatingip_id}',
- 'method': 'PATCH'
}
],
deprecated_rule=deprecated_create_recordset
@@ -108,35 +123,46 @@ rules = [
{
'path': '/v2/zones/{zone_id}/recordsets/{recordset_id}',
'method': 'GET'
- }, {
- 'path': '/v2/zones/{zone_id}/recordsets/{recordset_id}',
- 'method': 'DELETE'
- }, {
- 'path': '/v2/zones/{zone_id}/recordsets/{recordset_id}',
- 'method': 'PUT'
}
],
deprecated_rule=deprecated_get_recordset
),
+ policy.RuleDefault(
+ name="find_recordset",
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ description="List a Recordset in a Zone",
+ deprecated_rule=deprecated_find_recordset
+ ),
+ policy.DocumentedRuleDefault(
+ name="find_recordsets",
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ description="List Recordsets in a Zone",
+ operations=[
+ {
+ 'path': '/v2/zones/{zone_id}/recordsets',
+ 'method': 'GET'
+ },
+ ],
+ deprecated_rule=deprecated_find_recordsets
+ ),
policy.DocumentedRuleDefault(
name="update_recordset",
- check_str=SYSTEM_ADMIN_AND_SECONDARY_ZONE,
+ check_str=SYSTEM_ADMIN_OR_PROJECT_MEMBER_ZONE_TYPE,
scope_types=['system', 'project'],
description="Update recordset",
operations=[
{
'path': '/v2/zones/{zone_id}/recordsets/{recordset_id}',
'method': 'PUT'
- }, {
- 'path': '/v2/reverse/floatingips/{region}:{floatingip_id}',
- 'method': 'PATCH'
}
],
deprecated_rule=deprecated_update_recordset
),
policy.DocumentedRuleDefault(
name="delete_recordset",
- check_str=SYSTEM_ADMIN_AND_SECONDARY_ZONE,
+ check_str=SYSTEM_ADMIN_OR_PROJECT_MEMBER_ZONE_TYPE,
scope_types=['system', 'project'],
description="Delete RecordSet",
operations=[
diff --git a/designate/common/policies/tsigkey.py b/designate/common/policies/tsigkey.py
index 89bbb0f1..b8d59311 100644
--- a/designate/common/policies/tsigkey.py
+++ b/designate/common/policies/tsigkey.py
@@ -89,9 +89,6 @@ rules = [
operations=[
{
'path': '/v2/tsigkeys/{tsigkey_id}',
- 'method': 'PATCH'
- }, {
- 'path': '/v2/tsigkeys/{tsigkey_id}',
'method': 'GET'
}
],
diff --git a/designate/common/policies/zone.py b/designate/common/policies/zone.py
index 74552597..028dcff5 100644
--- a/designate/common/policies/zone.py
+++ b/designate/common/policies/zone.py
@@ -46,6 +46,12 @@ deprecated_get_zone_servers = policy.DeprecatedRule(
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
+deprecated_get_zone_ns_records = policy.DeprecatedRule(
+ name="get_zone_ns_records",
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
deprecated_find_zones = policy.DeprecatedRule(
name="find_zones",
check_str=base.RULE_ADMIN_OR_OWNER,
@@ -131,12 +137,6 @@ rules = [
{
'path': '/v2/zones/{zone_id}',
'method': 'GET'
- }, {
- 'path': '/v2/zones/{zone_id}',
- 'method': 'PATCH'
- }, {
- 'path': '/v2/zones/{zone_id}/recordsets/{recordset_id}',
- 'method': 'PUT'
}
],
deprecated_rule=deprecated_get_zone
@@ -148,6 +148,19 @@ rules = [
deprecated_rule=deprecated_get_zone_servers
),
policy.DocumentedRuleDefault(
+ name="get_zone_ns_records",
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ description="Get the Name Servers for a Zone",
+ operations=[
+ {
+ 'path': '/v2/zones/{zone_id}/nameservers',
+ 'method': 'GET'
+ }
+ ],
+ deprecated_rule=deprecated_get_zone_ns_records
+ ),
+ policy.DocumentedRuleDefault(
name="find_zones",
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
diff --git a/designate/common/policies/zone_export.py b/designate/common/policies/zone_export.py
index c3f02443..ca45971b 100644
--- a/designate/common/policies/zone_export.py
+++ b/designate/common/policies/zone_export.py
@@ -52,6 +52,12 @@ deprecated_update_zone_export = policy.DeprecatedRule(
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
+deprecated_delete_zone_export = policy.DeprecatedRule(
+ name="delete_zone_export",
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
rules = [
@@ -103,9 +109,6 @@ rules = [
{
'path': '/v2/zones/tasks/exports/{zone_export_id}',
'method': 'GET'
- }, {
- 'path': '/v2/zones/tasks/exports/{zone_export_id}/export',
- 'method': 'GET'
}
],
deprecated_rule=deprecated_get_zone_export
@@ -122,7 +125,20 @@ rules = [
}
],
deprecated_rule=deprecated_update_zone_export
- )
+ ),
+ policy.DocumentedRuleDefault(
+ name="delete_zone_export",
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ description="Delete a zone export",
+ operations=[
+ {
+ 'path': '/v2/zones/tasks/exports/{zone_export_id}',
+ 'method': 'DELETE'
+ }
+ ],
+ deprecated_rule=deprecated_delete_zone_export
+ ),
]
diff --git a/designate/common/policies/zone_import.py b/designate/common/policies/zone_import.py
index 02a383e1..8d4f2b17 100644
--- a/designate/common/policies/zone_import.py
+++ b/designate/common/policies/zone_import.py
@@ -115,7 +115,7 @@ rules = [
operations=[
{
'path': '/v2/zones/tasks/imports/{zone_import_id}',
- 'method': 'GET'
+ 'method': 'DELETE'
}
],
deprecated_rule=deprecated_delete_zone_import
diff --git a/designate/common/policies/zone_transfer_accept.py b/designate/common/policies/zone_transfer_accept.py
index eeca7435..05e18a64 100644
--- a/designate/common/policies/zone_transfer_accept.py
+++ b/designate/common/policies/zone_transfer_accept.py
@@ -24,7 +24,7 @@ The zone transfer accept API now supports system scope and default roles.
deprecated_create_zone_transfer_accept = policy.DeprecatedRule(
name="create_zone_transfer_accept",
- check_str=base.RULE_ZONE_TRANSFER,
+ check_str=base.LEGACY_RULE_ZONE_TRANSFER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
@@ -64,13 +64,15 @@ rules = [
policy.DocumentedRuleDefault(
name="create_zone_transfer_accept",
check_str=base.RULE_ZONE_TRANSFER,
+ scope_types=['system', 'project'],
description="Create Zone Transfer Accept",
operations=[
{
'path': '/v2/zones/tasks/transfer_accepts',
'method': 'POST'
}
- ]
+ ],
+ deprecated_rule=deprecated_create_zone_transfer_accept
),
policy.DocumentedRuleDefault(
name="get_zone_transfer_accept",
diff --git a/designate/common/policies/zone_transfer_request.py b/designate/common/policies/zone_transfer_request.py
index 0ed2c8d3..5178aaf6 100644
--- a/designate/common/policies/zone_transfer_request.py
+++ b/designate/common/policies/zone_transfer_request.py
@@ -23,14 +23,14 @@ The zone transfer request API now supports system scope and default roles.
"""
deprecated_create_zone_transfer_request = policy.DeprecatedRule(
- name="create_zone_transfer_request",
- check_str=base.RULE_ADMIN_OR_OWNER,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.WALLABY
+ name="create_zone_transfer_request",
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_get_zone_transfer_request = policy.DeprecatedRule(
name="get_zone_transfer_request",
- check_str=base.RULE_ZONE_TRANSFER,
+ check_str=base.LEGACY_RULE_ZONE_TRANSFER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
@@ -40,12 +40,6 @@ deprecated_get_zone_transfer_request_detailed = policy.DeprecatedRule(
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
-deprecated_find_zone_transfer_requests = policy.DeprecatedRule(
- name="find_zone_transfer_requests",
- check_str=base.RULE_ANY,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.WALLABY
-)
deprecated_update_zone_transfer_request = policy.DeprecatedRule(
name="update_zone_transfer_request",
check_str=base.RULE_ADMIN_OR_OWNER,
@@ -77,16 +71,15 @@ rules = [
policy.DocumentedRuleDefault(
name="get_zone_transfer_request",
check_str=base.RULE_ZONE_TRANSFER,
+ scope_types=['system', 'project'],
description="Show a Zone Transfer Request",
operations=[
{
'path': '/v2/zones/tasks/transfer_requests/{zone_transfer_request_id}', # noqa
'method': 'GET'
- }, {
- 'path': '/v2/zones/tasks/transfer_requests/{zone_transfer_request_id}', # noqa
- 'method': 'PATCH'
}
- ]
+ ],
+ deprecated_rule=deprecated_get_zone_transfer_request
),
policy.RuleDefault(
name="get_zone_transfer_request_detailed",
@@ -103,7 +96,7 @@ rules = [
'path': '/v2/zones/tasks/transfer_requests',
'method': 'GET'
}
- ]
+ ],
),
policy.RuleDefault(
name="find_zone_transfer_request",
diff --git a/designate/context.py b/designate/context.py
index 13ccda13..b5c2c1e8 100644
--- a/designate/context.py
+++ b/designate/context.py
@@ -107,6 +107,8 @@ class DesignateContext(context.RequestContext):
# NOTE(kiall): Ugly - required to match http://tinyurl.com/o3y8qmw
context.roles.append('admin')
+ if policy.enforce_new_defaults():
+ context.system_scope = 'all'
if show_deleted is not None:
context.show_deleted = show_deleted
@@ -132,7 +134,8 @@ class DesignateContext(context.RequestContext):
def get_admin_context(cls, **kwargs):
# TODO(kiall): Remove Me
kwargs['is_admin'] = True
- kwargs['roles'] = ['admin']
+ kwargs['roles'] = ['admin', 'reader']
+ kwargs['system_scope'] = 'all'
return cls(None, **kwargs)
diff --git a/designate/dnsutils.py b/designate/dnsutils.py
index 5875bc1c..1bf46300 100644
--- a/designate/dnsutils.py
+++ b/designate/dnsutils.py
@@ -20,9 +20,10 @@ from threading import Lock
import dns
import dns.exception
+import dns.query
+import dns.rdatatype
import dns.zone
import eventlet
-from dns import rdatatype
from oslo_serialization import base64
from oslo_log import log as logging
@@ -312,7 +313,7 @@ def dnspyrecords_to_recordsetlist(dnspython_records):
def dnspythonrecord_to_recordset(rname, rdataset):
- record_type = rdatatype.to_text(rdataset.rdtype)
+ record_type = dns.rdatatype.to_text(rdataset.rdtype)
name = rname.to_text()
if isinstance(name, bytes):
@@ -346,39 +347,122 @@ def do_axfr(zone_name, servers, timeout=None, source=None):
timeout = timeout or CONF["service:mdns"].xfr_timeout
xfr = None
-
for srv in servers:
- to = eventlet.Timeout(timeout)
- log_info = {'name': zone_name, 'host': srv}
- try:
- LOG.info("Doing AXFR for %(name)s from %(host)s", log_info)
-
- xfr = dns.query.xfr(srv['host'], zone_name, relativize=False,
- timeout=1, port=srv['port'], source=source)
- raw_zone = dns.zone.from_xfr(xfr, relativize=False)
- break
- except eventlet.Timeout as t:
- if t == to:
- LOG.error("AXFR timed out for %(name)s from %(host)s",
- log_info)
- continue
- except dns.exception.FormError:
- LOG.error("Zone %(name)s is not present on %(host)s."
- "Trying next server.", log_info)
- except socket.error:
- LOG.error("Connection error when doing AXFR for %(name)s from "
- "%(host)s", log_info)
- except Exception:
- LOG.exception("Problem doing AXFR %(name)s from %(host)s. "
+ for address in get_ip_addresses(srv['host']):
+ to = eventlet.Timeout(timeout)
+ log_info = {'name': zone_name, 'host': srv, 'address': address}
+ try:
+ LOG.info(
+ 'Doing AXFR for %(name)s from %(host)s %(address)s',
+ log_info
+ )
+ xfr = dns.query.xfr(
+ address, zone_name, relativize=False, timeout=1,
+ port=srv['port'], source=source
+ )
+ raw_zone = dns.zone.from_xfr(xfr, relativize=False)
+ LOG.debug("AXFR Successful for %s", raw_zone.origin.to_text())
+ return raw_zone
+ except eventlet.Timeout as t:
+ if t == to:
+ LOG.error("AXFR timed out for %(name)s from %(host)s",
+ log_info)
+ continue
+ except dns.exception.FormError:
+ LOG.error("Zone %(name)s is not present on %(host)s."
"Trying next server.", log_info)
- finally:
- to.cancel()
- continue
- else:
- raise exceptions.XFRFailure(
- "XFR failed for %(name)s. No servers in %(servers)s was reached." %
- {"name": zone_name, "servers": servers})
+ except socket.error:
+ LOG.error("Connection error when doing AXFR for %(name)s from "
+ "%(host)s", log_info)
+ except Exception:
+ LOG.exception("Problem doing AXFR %(name)s from %(host)s. "
+ "Trying next server.", log_info)
+ finally:
+ to.cancel()
+
+ raise exceptions.XFRFailure(
+ "XFR failed for %(name)s. No servers in %(servers)s was reached." %
+ {"name": zone_name, "servers": servers}
+ )
+
+
+def prepare_msg(zone_name, rdatatype=dns.rdatatype.SOA,
+ dns_opcode=dns.opcode.QUERY):
+ """
+ Do the needful to set up a dns packet with dnspython
+ """
+ dns_message = dns.message.make_query(zone_name, rdatatype)
+ dns_message.set_opcode(dns_opcode)
+
+ return dns_message
+
+
+def dig(zone_name, host, rdatatype, port=53):
+ """
+ Set up and send a regular dns query, datatype configurable
+ """
+ query = prepare_msg(zone_name, rdatatype=rdatatype)
+
+ return send_dns_message(query, host, port=port)
+
+
+def notify(zone_name, host, port=53):
+ """
+ Set up a notify packet and send it
+ """
+ msg = prepare_msg(zone_name, dns_opcode=dns.opcode.NOTIFY)
+
+ return send_dns_message(msg, host, port=port)
- LOG.debug("AXFR Successful for %s", raw_zone.origin.to_text())
- return raw_zone
+def send_dns_message(dns_message, host, port=53, timeout=10):
+ """
+ Send the dns message and return the response
+
+ :return: dns.Message of the response to the dns query
+ """
+ ip_address = get_ip_address(host)
+ # This can raise some exceptions, but we'll catch them elsewhere
+ if not CONF['service:mdns'].all_tcp:
+ return dns.query.udp(
+ dns_message, ip_address, port=port, timeout=timeout)
+ return dns.query.tcp(
+ dns_message, ip_address, port=port, timeout=timeout)
+
+
+def get_serial(zone_name, host, port=53):
+ """
+ Possibly raises dns.exception.Timeout or dns.query.BadResponse.
+ Possibly returns 0 if, e.g., the answer section is empty.
+ """
+ resp = dig(zone_name, host, dns.rdatatype.SOA, port=port)
+ if not resp.answer:
+ return 0
+ rdataset = resp.answer[0].to_rdataset()
+ if not rdataset:
+ return 0
+ return rdataset[0].serial
+
+
+def get_ip_address(ip_address_or_hostname):
+ """
+ Provide an ip or hostname and return a valid ip4 or ipv6 address.
+
+ :return: ip address
+ """
+ addresses = get_ip_addresses(ip_address_or_hostname)
+ if not addresses:
+ return None
+ return addresses[0]
+
+
+def get_ip_addresses(ip_address_or_hostname):
+ """
+ Provide an ip or hostname and return all valid ip4 or ipv6 addresses.
+
+ :return: ip addresses
+ """
+ addresses = []
+ for res in socket.getaddrinfo(ip_address_or_hostname, 0):
+ addresses.append(res[4][0])
+ return list(set(addresses))
diff --git a/designate/exceptions.py b/designate/exceptions.py
index 2aa73127..071addf5 100644
--- a/designate/exceptions.py
+++ b/designate/exceptions.py
@@ -256,6 +256,10 @@ class IncorrectZoneTransferKey(Forbidden):
error_type = 'invalid_key'
+class InvalidTokenScope(Forbidden):
+ error_type = 'invalid_token_scope'
+
+
class Duplicate(DesignateException):
expected = True
error_code = 409
@@ -473,3 +477,12 @@ class LastServerDeleteNotAllowed(BadRequest):
class ResourceNotFound(NotFound):
# TODO(kiall): Should this be extending NotFound??
pass
+
+
+class MissingProjectID(BadRequest):
+ # Note: This should be 400, but is 401 for compatibility with
+ # previous versions of the API.
+ # https://github.com/openstack/designate/blob/stable/wallaby/ \
+ # designate/api/middleware.py#L132
+ error_code = 401
+ error_type = 'missing_project_id'
diff --git a/designate/mdns/notify.py b/designate/mdns/notify.py
index 0d66970d..0e464960 100644
--- a/designate/mdns/notify.py
+++ b/designate/mdns/notify.py
@@ -28,6 +28,7 @@ import dns.opcode
from oslo_config import cfg
from oslo_log import log as logging
+from designate import dnsutils
from designate.mdns import base
from designate.metrics import metrics
@@ -186,8 +187,9 @@ class NotifyEndpoint(base.BaseEndpoint):
'zone': zone.name, 'server': host,
'port': port})
try:
- response = self._send_dns_message(dns_message, host, port,
- timeout)
+ response = dnsutils.send_dns_message(
+ dns_message, host, port, timeout=timeout
+ )
except socket.error as e:
if e.errno != socket.errno.EAGAIN:
@@ -285,21 +287,3 @@ class NotifyEndpoint(base.BaseEndpoint):
dns_message.flags |= dns.flags.RD
return dns_message
-
- def _send_dns_message(self, dns_message, host, port, timeout):
- """
- Send DNS Message over TCP or UDP, return response.
-
- :param dns_message: The dns message that needs to be sent.
- :param host: The destination ip of dns_message.
- :param port: The destination port of dns_message.
- :param timeout: The timeout in seconds to wait for a response.
- :return: response
- """
- send = dns_query.tcp if CONF['service:mdns'].all_tcp else dns_query.udp
- return send(
- dns_message,
- socket.gethostbyname(host),
- port=port,
- timeout=timeout
- )
diff --git a/designate/objects/adapters/api_v2/zone_transfer_request.py b/designate/objects/adapters/api_v2/zone_transfer_request.py
index 56b9790f..5e430d9a 100644
--- a/designate/objects/adapters/api_v2/zone_transfer_request.py
+++ b/designate/objects/adapters/api_v2/zone_transfer_request.py
@@ -12,10 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from designate.objects.adapters.api_v2 import base
+from designate.common import constants
+from designate import exceptions
from designate import objects
+from designate.objects.adapters.api_v2 import base
from designate import policy
-from designate import exceptions
class ZoneTransferRequestAPIv2Adapter(base.APIv2Adapter):
@@ -66,9 +67,10 @@ class ZoneTransferRequestAPIv2Adapter(base.APIv2Adapter):
object, *args, **kwargs)
try:
- target = {
- 'tenant_id': object.tenant_id,
- }
+ if policy.enforce_new_defaults():
+ target = {constants.RBAC_PROJECT_ID: object.tenant_id}
+ else:
+ target = {'tenant_id': object.tenant_id}
policy.check(
'get_zone_transfer_request_detailed',
diff --git a/designate/objects/blacklist.py b/designate/objects/blacklist.py
index 1a5eb388..a0dd4fcf 100644
--- a/designate/objects/blacklist.py
+++ b/designate/objects/blacklist.py
@@ -20,8 +20,8 @@ from designate.objects import fields
class Blacklist(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
fields = {
- 'pattern': fields.StringFields(maxLength=255),
- 'description': fields.StringFields(maxLength=160, nullable=True),
+ 'pattern': fields.DenylistFields(maxLength=255),
+ 'description': fields.DenylistFields(maxLength=160, nullable=True),
}
STRING_KEYS = [
diff --git a/designate/objects/fields.py b/designate/objects/fields.py
index 9b533438..cda9efcc 100644
--- a/designate/objects/fields.py
+++ b/designate/objects/fields.py
@@ -206,9 +206,9 @@ class DomainField(StringFields):
if len(host) > 63:
raise ValueError("Host %s is too long" % host)
if not value.endswith('.'):
- raise ValueError("Domain %s is not end with a dot" % value)
+ raise ValueError("Domain %s does not end with a dot" % value)
if not re.match(self.RE_ZONENAME, value):
- raise ValueError("Domain %s is not match" % value)
+ raise ValueError("Domain %s is invalid" % value)
return value
@@ -222,7 +222,7 @@ class EmailField(StringFields):
raise ValueError("%s is not an email" % value)
email = value.replace('@', '.')
if not re.match(self.RE_ZONENAME, "%s." % email):
- raise ValueError("Email %s is not match" % value)
+ raise ValueError("Email %s is invalid" % value)
return value
@@ -239,9 +239,9 @@ class HostField(StringFields):
if len(host) > 63:
raise ValueError("Host %s is too long" % host)
if value.endswith('.') is False:
- raise ValueError("Host name %s is not end with a dot" % value)
+ raise ValueError("Host name %s does not end with a dot" % value)
if not re.match(self.RE_HOSTNAME, value):
- raise ValueError("Host name %s is not match" % value)
+ raise ValueError("Host name %s is invalid" % value)
return value
@@ -258,7 +258,7 @@ class SRVField(StringFields):
if len(host) > 63:
raise ValueError("Host %s is too long" % host)
if value.endswith('.') is False:
- raise ValueError("Host name %s is not end with a dot" % value)
+ raise ValueError("Host name %s does not end with a dot" % value)
if not re.match(self.RE_SRV_HOST_NAME, value):
raise ValueError("Host name %s is not a SRV record" % value)
return value
@@ -293,7 +293,7 @@ class TldField(StringFields):
def coerce(self, obj, attr, value):
value = super(TldField, self).coerce(obj, attr, value)
if not re.match(self.RE_TLDNAME, value):
- raise ValueError("%s is not an TLD" % value)
+ raise ValueError("%s is not a TLD" % value)
return value
@@ -321,7 +321,7 @@ class NaptrServiceField(StringFields):
raise ValueError("NAPTR record service field cannot be longer than"
" 255 characters" % value)
if not re.match(self.RE_NAPTR_SERVICE, "%s" % value):
- raise ValueError("%s NAPTR record service does not match" % value)
+ raise ValueError("%s NAPTR record service is invalid" % value)
return value
@@ -336,7 +336,7 @@ class NaptrRegexpField(StringFields):
" 255 characters" % value)
if value:
if not re.match(self.RE_NAPTR_REGEXP, "%s" % value):
- raise ValueError("%s is not a NAPTR record regexp" % value)
+ raise ValueError("%s NAPTR record is invalid" % value)
return value
@@ -358,10 +358,11 @@ class CaaPropertyField(StringFields):
raise ValueError("Host %s is too long" % host)
idn_with_dot = idn + '.'
if not re.match(self.RE_ZONENAME, idn_with_dot):
- raise ValueError("Domain %s does not match" % idn)
+ raise ValueError("Domain %s is invalid" % idn)
for entry in entries:
if not re.match(self.RE_KVP, entry):
- raise ValueError("%s is not valid key-value pair" % entry)
+ raise ValueError("%s is not a valid key-value pair" %
+ entry)
elif tag == 'iodef':
if re.match(self.RE_URL_MAIL, val):
parts = val.split('@')
@@ -372,7 +373,7 @@ class CaaPropertyField(StringFields):
raise ValueError("Host %s is too long" % host)
idn_with_dot = idn + '.'
if not re.match(self.RE_ZONENAME, idn_with_dot):
- raise ValueError("Domain %s does not match" % idn)
+ raise ValueError("Domain %s is invalid" % idn)
elif re.match(self.RE_URL_HTTP, val):
parts = val.split('/')
idn = parts[2]
@@ -382,9 +383,9 @@ class CaaPropertyField(StringFields):
raise ValueError("Host %s is too long" % host)
idn_with_dot = idn + '.'
if not re.match(self.RE_ZONENAME, idn_with_dot):
- raise ValueError("Domain %s does not match" % idn)
+ raise ValueError("Domain %s is invalid" % idn)
else:
- raise ValueError("%s is not valid URL" % val)
+ raise ValueError("%s is not a valid URL" % val)
else:
raise ValueError("Property tag %s must be 'issue', 'issuewild'"
" or 'iodef'" % value)
@@ -451,3 +452,25 @@ class IPOrHost(IPV4AndV6AddressField):
if not re.match(StringFields.RE_ZONENAME, value):
raise ValueError("%s is not IP address or host name" % value)
return value
+
+
+class DenylistFields(StringFields):
+ def __init__(self, **kwargs):
+ super(DenylistFields, self).__init__(**kwargs)
+
+ def coerce(self, obj, attr, value):
+ value = super(DenylistFields, self).coerce(obj, attr, value)
+
+ if value is None:
+ return self._null(obj, attr)
+
+ # determine the validity if a regex expression filter has been used.
+ msg = ("%s is not a valid regular expression" % value)
+ if not len(value):
+ raise ValueError(msg)
+ try:
+ re.compile(value)
+ except Exception:
+ raise ValueError(msg)
+
+ return value
diff --git a/designate/policy.py b/designate/policy.py
index 863f0ddc..041e7f41 100644
--- a/designate/policy.py
+++ b/designate/policy.py
@@ -73,10 +73,17 @@ def init(default_rule=None, policy_file=None):
def check(rule, ctxt, target=None, do_raise=True, exc=exceptions.Forbidden):
- creds = ctxt.to_dict()
+ if enforce_new_defaults():
+ creds = ctxt.to_policy_values()
+ else:
+ creds = ctxt.to_dict()
target = target or {}
try:
result = _ENFORCER.enforce(rule, target, creds, do_raise, exc)
+ except policy.InvalidScope:
+ result = False
+ if do_raise:
+ raise exceptions.InvalidTokenScope
except Exception:
result = False
raise
@@ -93,3 +100,9 @@ def check(rule, ctxt, target=None, do_raise=True, exc=exceptions.Forbidden):
LOG.info("Policy check failed for rule '%(rule)s' "
"on target %(target)s",
{'rule': rule, 'target': repr(target)}, extra=extra)
+
+
+def enforce_new_defaults():
+ if CONF.get('oslo_policy'):
+ return CONF['oslo_policy'].get('enforce_new_defaults', False)
+ return False
diff --git a/designate/quota/base.py b/designate/quota/base.py
index dc38c3a4..6082123c 100644
--- a/designate/quota/base.py
+++ b/designate/quota/base.py
@@ -27,6 +27,7 @@ class Quota(DriverPlugin, metaclass=abc.ABCMeta):
__plugin_type__ = 'quota'
def limit_check(self, context, tenant_id, **values):
+ resources_exceeded = []
quotas = self.get_quotas(context, tenant_id)
for resource, value in values.items():
@@ -34,10 +35,18 @@ class Quota(DriverPlugin, metaclass=abc.ABCMeta):
# Setting the resource quota to a negative value will make
# the resource unlimited
if quotas[resource] >= 0 and value > quotas[resource]:
- raise exceptions.OverQuota()
+ resources_exceeded.append(resource)
else:
- raise exceptions.QuotaResourceUnknown("%s is not a valid quota"
- " resource", resource)
+ raise exceptions.QuotaResourceUnknown(
+ "'%s' is not a valid quota resource." % resource
+ )
+
+ if resources_exceeded:
+ resources_exceeded.sort(key=len)
+ raise exceptions.OverQuota(
+ 'Quota exceeded for %s.' %
+ ', '.join(resources_exceeded)
+ )
def get_quotas(self, context, tenant_id):
quotas = self.get_default_quotas(context)
@@ -64,7 +73,7 @@ class Quota(DriverPlugin, metaclass=abc.ABCMeta):
if resource not in quotas:
raise exceptions.QuotaResourceUnknown("%s is not a valid quota "
- "resource", resource)
+ "resource" % resource)
return quotas[resource]
diff --git a/designate/quota/impl_storage.py b/designate/quota/impl_storage.py
index 4631b569..4262ade3 100644
--- a/designate/quota/impl_storage.py
+++ b/designate/quota/impl_storage.py
@@ -72,7 +72,7 @@ class StorageQuota(base.Quota):
if resource not in list(self.get_default_quotas(context).keys()):
raise exceptions.QuotaResourceUnknown("%s is not a valid quota "
- "resource", resource)
+ "resource" % resource)
try:
create_quota()
diff --git a/designate/storage/impl_sqlalchemy/__init__.py b/designate/storage/impl_sqlalchemy/__init__.py
index 7449f9e2..fe46a88b 100644
--- a/designate/storage/impl_sqlalchemy/__init__.py
+++ b/designate/storage/impl_sqlalchemy/__init__.py
@@ -1488,6 +1488,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
).select_from(ljoin)
if not context.all_tenants:
+ # If we have a system scoped token with no project_id and
+ # all_tenants was not used, we don't know what records to return,
+ # so return an empty list.
+ if not context.project_id:
+ return objects.ZoneTransferRequestList()
+
query = query.where(or_(
table.c.tenant_id == context.project_id,
table.c.target_tenant_id == context.project_id))
@@ -1498,7 +1504,8 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
exceptions.ZoneTransferRequestNotFound,
criterion,
one=one, marker=marker, limit=limit, sort_dir=sort_dir,
- sort_key=sort_key, query=query, apply_tenant_criteria=False
+ sort_key=sort_key, query=query,
+ apply_tenant_criteria=False
)
def create_zone_transfer_request(self, context, zone_transfer_request):
diff --git a/designate/tests/__init__.py b/designate/tests/__init__.py
index 364fad82..24c7beaa 100644
--- a/designate/tests/__init__.py
+++ b/designate/tests/__init__.py
@@ -388,6 +388,8 @@ class TestCase(base.BaseTestCase):
self.central_service = self.start_service('central')
self.admin_context = self.get_admin_context()
+ self.admin_context_all_tenants = self.get_admin_context(
+ all_tenants=True)
storage_driver = CONF['service:central'].storage_driver
self.storage = storage.get_storage(storage_driver)
@@ -437,10 +439,11 @@ class TestCase(base.BaseTestCase):
def get_context(self, **kwargs):
return DesignateContext(**kwargs)
- def get_admin_context(self):
+ def get_admin_context(self, **kwargs):
return DesignateContext.get_admin_context(
project_id=utils.generate_uuid(),
- user_id=utils.generate_uuid())
+ user_id=utils.generate_uuid(),
+ **kwargs)
# Fixture methods
def get_quota_fixture(self, fixture=0, values=None):
@@ -783,34 +786,36 @@ class TestCase(base.BaseTestCase):
return self.storage.create_zone_export(
context, objects.ZoneExport.from_dict(zone_export))
- def wait_for_import(self, zone_import_id, errorok=False):
+ def wait_for_import(self, zone_import_id, error_is_ok=False, max_wait=10):
"""
Zone imports spawn a thread to parse the zone file and
insert the data. This waits for this process before continuing
"""
- attempts = 0
- while attempts < 20:
- # Give the import a half second to complete
- time.sleep(.5)
-
+ start_time = time.time()
+ while True:
# Retrieve it, and ensure it's the same
zone_import = self.central_service.get_zone_import(
- self.admin_context, zone_import_id)
+ self.admin_context_all_tenants, zone_import_id
+ )
# If the import is done, we're done
if zone_import.status == 'COMPLETE':
break
# If errors are allowed, just make sure that something completed
- if errorok:
- if zone_import.status != 'PENDING':
- break
+ if error_is_ok and zone_import.status != 'PENDING':
+ break
- attempts += 1
+ if (time.time() - start_time) > max_wait:
+ break
- if not errorok:
+ time.sleep(0.5)
+
+ if not error_is_ok:
self.assertEqual('COMPLETE', zone_import.status)
+ return zone_import
+
def _ensure_interface(self, interface, implementation):
for name in interface.__abstractmethods__:
in_arginfo = inspect.getfullargspec(getattr(interface, name))
diff --git a/designate/tests/test_api/test_middleware.py b/designate/tests/test_api/test_middleware.py
index 3425c1c6..6bb8be79 100644
--- a/designate/tests/test_api/test_middleware.py
+++ b/designate/tests/test_api/test_middleware.py
@@ -102,7 +102,8 @@ class KeystoneContextMiddlewareTest(ApiTestCase):
# Process the request
response = app(request)
- self.assertEqual(401, response.status_code)
+ # Ensure request was not blocked
+ self.assertEqual(response, 'FakeResponse')
class NoAuthContextMiddlewareTest(ApiTestCase):
diff --git a/designate/tests/test_api/test_v2/test_blacklists.py b/designate/tests/test_api/test_v2/test_blacklists.py
index 2dff9b8c..0677b7e1 100644
--- a/designate/tests/test_api/test_v2/test_blacklists.py
+++ b/designate/tests/test_api/test_v2/test_blacklists.py
@@ -165,3 +165,48 @@ class ApiV2BlacklistsTest(ApiV2TestCase):
url = '/blacklists?description=test'
self.policy({'find_blacklists': '@'})
self._assert_exception('bad_request', 400, self.client.get, url)
+
+ def test_create_invalid_denylist_pattern(self):
+ self.policy({'create_blacklist': '@'})
+ body = {
+ 'description': u'This is the description.'
+ }
+
+ url = '/blacklists/'
+
+ # doing each pattern individually so upon error one can trace
+ # back to the exact line number
+ body['pattern'] = ''
+ self._assert_exception(
+ 'invalid_object', 400, self.client.post_json, url, body)
+
+ body['pattern'] = '#(*&^%$%$#@$'
+ self._assert_exception(
+ 'invalid_object', 400, self.client.post_json, url, body)
+
+ body['pattern'] = 'a' * 1000
+ self._assert_exception(
+ 'invalid_object', 400, self.client.post_json, url, body)
+
+ def test_update_invalid_denylist_pattern(self):
+ blacklist = self.create_blacklist(fixture=0)
+ self.policy({'update_blacklist': '@'})
+
+ url = ('/blacklists/%s' % blacklist['id'])
+
+ # doing each pattern individually so upon error one can trace
+ # back to the exact line number
+ body = {'pattern': ''}
+ self._assert_exception(
+ 'invalid_object', 400, self.client.patch_json, url, body,
+ status=400)
+
+ body = {'pattern': '#(*&^%$%$#@$'}
+ self._assert_exception(
+ 'invalid_object', 400, self.client.patch_json, url, body,
+ status=400)
+
+ body = {'pattern': 'a' * 1000}
+ self._assert_exception(
+ 'invalid_object', 400, self.client.patch_json, url, body,
+ status=400)
diff --git a/designate/tests/test_api/test_v2/test_floatingips.py b/designate/tests/test_api/test_v2/test_floatingips.py
index 2810029d..4667da55 100644
--- a/designate/tests/test_api/test_v2/test_floatingips.py
+++ b/designate/tests/test_api/test_v2/test_floatingips.py
@@ -197,19 +197,6 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
- criterion = {
- 'managed_resource_id': fip['id'],
- 'managed_tenant_id': context.project_id
- }
- zone_id = self.central_service.find_record(
- elevated_context, criterion=criterion).zone_id
-
- # Simulate the update on the backend
- zone_serial = self.central_service.get_zone(
- elevated_context, zone_id).serial
- self.central_service.update_status(
- elevated_context, zone_id, "SUCCESS", zone_serial)
-
# Unset PTR ('ptrdname' is None aka null in JSON)
response = self.client.patch_json(
'/reverse/floatingips/%s' % ":".join([fip['region'], fip['id']]),
@@ -218,12 +205,6 @@ class ApiV2ReverseFloatingIPTest(ApiV2TestCase):
self.assertIsNone(response.json)
self.assertEqual(202, response.status_int)
- # Simulate the unset on the backend
- zone_serial = self.central_service.get_zone(
- elevated_context, zone_id).serial
- self.central_service.update_status(
- elevated_context, zone_id, "SUCCESS", zone_serial)
-
fip = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
self.assertIsNone(fip['ptrdname'])
diff --git a/designate/tests/test_api/test_v2/test_import_export.py b/designate/tests/test_api/test_v2/test_import_export.py
index aefc6e1d..b9e491e0 100644
--- a/designate/tests/test_api/test_v2/test_import_export.py
+++ b/designate/tests/test_api/test_v2/test_import_export.py
@@ -53,7 +53,7 @@ class APIV2ZoneImportExportTest(ApiV2TestCase):
headers={'Content-type': 'text/dns'})
import_id = response.json_body['id']
- self.wait_for_import(import_id, errorok=True)
+ self.wait_for_import(import_id, error_is_ok=True)
url = '/zones/tasks/imports/%s' % import_id
@@ -70,7 +70,7 @@ class APIV2ZoneImportExportTest(ApiV2TestCase):
headers={'Content-type': 'text/dns'})
import_id = response.json_body['id']
- self.wait_for_import(import_id, errorok=True)
+ self.wait_for_import(import_id, error_is_ok=True)
url = '/zones/tasks/imports/%s' % import_id
@@ -86,7 +86,7 @@ class APIV2ZoneImportExportTest(ApiV2TestCase):
headers={'Content-type': 'text/dns'})
import_id = response.json_body['id']
- self.wait_for_import(import_id, errorok=True)
+ self.wait_for_import(import_id, error_is_ok=True)
url = '/zones/tasks/imports/%s' % import_id
diff --git a/designate/tests/test_central/test_service.py b/designate/tests/test_central/test_service.py
index 2c6f8cf5..8307f348 100644
--- a/designate/tests/test_central/test_service.py
+++ b/designate/tests/test_central/test_service.py
@@ -33,9 +33,10 @@ from oslo_messaging.rpc import dispatcher as rpc_dispatcher
from designate import exceptions
from designate import objects
from designate.mdns import rpcapi as mdns_api
+from designate.storage.impl_sqlalchemy import tables
from designate.tests import fixtures
from designate.tests.test_central import CentralTestCase
-from designate.storage.impl_sqlalchemy import tables
+from designate import utils
LOG = logging.getLogger(__name__)
@@ -2421,8 +2422,10 @@ class CentralServiceTest(CentralTestCase):
actual = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
- self.assertEqual(expected, actual)
+ self.assertEqual(expected.address, actual.address)
+ self.assertEqual(expected.ptrdname, actual.ptrdname)
+ self.assertEqual(expected.ttl, actual.ttl)
self.assertEqual(expected, actual)
def test_get_floatingip_not_allocated(self):
@@ -2455,14 +2458,6 @@ class CentralServiceTest(CentralTestCase):
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.project_id}
- zone_id = self.central_service.find_record(
- elevated_a, criterion).zone_id
-
- # Simulate the update on the backend
- zone_serial = self.central_service.get_zone(
- elevated_a, zone_id).serial
- self.central_service.update_status(
- elevated_a, zone_id, "SUCCESS", zone_serial)
self.network_api.fake.deallocate_floatingip(fip['id'])
@@ -2484,19 +2479,9 @@ class CentralServiceTest(CentralTestCase):
context_b, fip['region'], fip['id'])
self.assertIsNone(fip_ptr['ptrdname'])
- # Simulate the invalidation on the backend
- zone_serial = self.central_service.get_zone(
- elevated_a, zone_id).serial
- self.central_service.update_status(
- elevated_a, zone_id, "SUCCESS", zone_serial)
-
- # Ensure that the old record for tenant a for the fip now owned by
- # tenant b is gone
- exc = self.assertRaises(rpc_dispatcher.ExpectedException,
- self.central_service.find_record,
- elevated_a, criterion)
-
- self.assertEqual(exceptions.RecordNotFound, exc.exc_info[0])
+ record = self.central_service.find_record(elevated_a, criterion)
+ self.assertEqual('DELETE', record.action)
+ self.assertEqual('PENDING', record.status)
def test_list_floatingips_no_allocations(self):
context = self.get_context(project_id='a')
@@ -2582,19 +2567,9 @@ class CentralServiceTest(CentralTestCase):
self.assertEqual(1, len(fips))
self.assertIsNone(fips[0]['ptrdname'])
- # Simulate the invalidation on the backend
- zone_serial = self.central_service.get_zone(
- elevated_a, zone_id).serial
- self.central_service.update_status(
- elevated_a, zone_id, "SUCCESS", zone_serial)
-
- # Ensure that the old record for tenant a for the fip now owned by
- # tenant b is gone
- exc = self.assertRaises(rpc_dispatcher.ExpectedException,
- self.central_service.find_record,
- elevated_a, criterion)
-
- self.assertEqual(exceptions.RecordNotFound, exc.exc_info[0])
+ record = self.central_service.find_record(elevated_a, criterion)
+ self.assertEqual('DELETE', record.action)
+ self.assertEqual('PENDING', record.status)
def test_set_floatingip(self):
context = self.get_context(project_id='a')
@@ -3534,7 +3509,7 @@ class CentralServiceTest(CentralTestCase):
# Zone Import Tests
def test_create_zone_import(self):
# Create a Zone Import
- context = self.get_context()
+ context = self.get_context(project_id=utils.generate_uuid())
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(context,
request_body)
@@ -3547,8 +3522,32 @@ class CentralServiceTest(CentralTestCase):
self.wait_for_import(zone_import.id)
+ def test_create_zone_import_overquota(self):
+ self.config(
+ quota_zone_records=5,
+ quota_zone_recordsets=5,
+ )
+
+ # Create a Zone Import
+ context = self.get_context(project_id=utils.generate_uuid())
+ request_body = self.get_zonefile_fixture()
+ zone_import = self.central_service.create_zone_import(context,
+ request_body)
+
+ # Ensure all values have been set correctly
+ self.assertIsNotNone(zone_import['id'])
+ self.assertEqual('PENDING', zone_import.status)
+ self.assertIsNone(zone_import.message)
+ self.assertIsNone(zone_import.zone_id)
+
+ zone_import = self.wait_for_import(zone_import.id, error_is_ok=True)
+
+ self.assertEqual('Quota exceeded during zone import.',
+ zone_import.message)
+ self.assertEqual('ERROR', zone_import.status)
+
def test_find_zone_imports(self):
- context = self.get_context()
+ context = self.get_context(project_id=utils.generate_uuid())
# Ensure we have no zone_imports to start with.
zone_imports = self.central_service.find_zone_imports(
@@ -3565,7 +3564,7 @@ class CentralServiceTest(CentralTestCase):
# Ensure we can retrieve the newly created zone_import
zone_imports = self.central_service.find_zone_imports(
- self.admin_context)
+ self.admin_context_all_tenants)
self.assertEqual(1, len(zone_imports))
# Create a second zone_import
@@ -3578,14 +3577,14 @@ class CentralServiceTest(CentralTestCase):
# Ensure we can retrieve both zone_imports
zone_imports = self.central_service.find_zone_imports(
- self.admin_context)
+ self.admin_context_all_tenants)
self.assertEqual(2, len(zone_imports))
self.assertEqual('COMPLETE', zone_imports[0].status)
self.assertEqual('COMPLETE', zone_imports[1].status)
def test_get_zone_import(self):
# Create a Zone Import
- context = self.get_context()
+ context = self.get_context(project_id=utils.generate_uuid())
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(
context, request_body)
@@ -3595,7 +3594,7 @@ class CentralServiceTest(CentralTestCase):
# Retrieve it, and ensure it's the same
zone_import = self.central_service.get_zone_import(
- self.admin_context, zone_import.id)
+ self.admin_context_all_tenants, zone_import.id)
self.assertEqual(zone_import.id, zone_import['id'])
self.assertEqual(zone_import.status, zone_import['status'])
@@ -3603,7 +3602,7 @@ class CentralServiceTest(CentralTestCase):
def test_update_zone_import(self):
# Create a Zone Import
- context = self.get_context()
+ context = self.get_context(project_id=utils.generate_uuid())
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(
context, request_body)
@@ -3615,7 +3614,7 @@ class CentralServiceTest(CentralTestCase):
# Perform the update
zone_import = self.central_service.update_zone_import(
- self.admin_context, zone_import)
+ self.admin_context_all_tenants, zone_import)
# Fetch the zone_import again
zone_import = self.central_service.get_zone_import(context,
@@ -3626,7 +3625,7 @@ class CentralServiceTest(CentralTestCase):
def test_delete_zone_import(self):
# Create a Zone Import
- context = self.get_context()
+ context = self.get_context(project_id=utils.generate_uuid())
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(
context, request_body)
diff --git a/designate/tests/test_quota/test_quota.py b/designate/tests/test_quota/test_quota.py
index 56074239..0596ec65 100644
--- a/designate/tests/test_quota/test_quota.py
+++ b/designate/tests/test_quota/test_quota.py
@@ -15,15 +15,13 @@
# under the License.
from unittest import mock
-from testscenarios import load_tests_apply_scenarios as load_tests # noqa
-import testtools
from oslo_config import cfg
from oslo_log import log as logging
+from testscenarios import load_tests_apply_scenarios as load_tests # noqa
+from designate import exceptions
from designate import quota
from designate import tests
-from designate import exceptions
-
LOG = logging.getLogger(__name__)
@@ -56,11 +54,19 @@ class QuotaTestCase(tests.TestCase):
def test_limit_check_unknown(self):
context = self.get_admin_context()
- with testtools.ExpectedException(exceptions.QuotaResourceUnknown):
- self.quota.limit_check(context, 'tenant_id', unknown=0)
+ self.assertRaisesRegex(
+ exceptions.QuotaResourceUnknown,
+ "'unknown' is not a valid quota resource.",
+ self.quota.limit_check,
+ context, 'tenant_id', unknown=0
+ )
- with testtools.ExpectedException(exceptions.QuotaResourceUnknown):
- self.quota.limit_check(context, 'tenant_id', unknown=0, zones=0)
+ self.assertRaisesRegex(
+ exceptions.QuotaResourceUnknown,
+ "'unknown' is not a valid quota resource.",
+ self.quota.limit_check,
+ context, 'tenant_id', unknown=0, zones=0
+ )
def test_limit_check_under(self):
context = self.get_admin_context()
@@ -80,25 +86,27 @@ class QuotaTestCase(tests.TestCase):
def test_limit_check_at(self):
context = self.get_admin_context()
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id',
- zones=cfg.CONF.quota_zones + 1)
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zones\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zones=cfg.CONF.quota_zones + 1
+ )
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(
- context,
- 'tenant_id',
- zone_records=cfg.CONF.quota_zone_records + 1)
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zone_records\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zone_records=cfg.CONF.quota_zone_records + 1
+ )
def test_limit_check_unlimited(self):
context = self.get_admin_context()
self.quota.get_quotas = mock.Mock()
ret = {
- 'zones': -1,
- 'zone_recordsets': -1,
- 'zone_records': -1,
- 'recordset_records': -1,
- 'api_export_size': -1,
+ 'zones': -1,
+ 'zone_recordsets': -1,
+ 'zone_records': -1,
+ 'recordset_records': -1,
+ 'api_export_size': -1,
}
self.quota.get_quotas.return_value = ret
self.quota.limit_check(context, 'tenant_id', zones=99999)
@@ -111,42 +119,82 @@ class QuotaTestCase(tests.TestCase):
context = self.get_admin_context()
self.quota.get_quotas = mock.Mock()
ret = {
- 'zones': 0,
- 'zone_recordsets': 0,
- 'zone_records': 0,
- 'recordset_records': 0,
- 'api_export_size': 0,
+ 'zones': 0,
+ 'zone_recordsets': 0,
+ 'zone_records': 0,
+ 'recordset_records': 0,
+ 'api_export_size': 0,
}
self.quota.get_quotas.return_value = ret
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id', zones=1)
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id', zone_recordsets=1)
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id', zone_records=1)
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id',
- recordset_records=1)
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id', api_export_size=1)
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zones\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zones=1
+ )
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zone_recordsets\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zone_recordsets=1
+ )
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zone_records\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zone_records=1
+ )
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota,
+ 'Quota exceeded for recordset_records\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', recordset_records=1
+ )
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for api_export_size\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', api_export_size=1
+ )
def test_limit_check_over(self):
context = self.get_admin_context()
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id', zones=99999)
-
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id', zone_records=99999)
-
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id', zones=99999,
- zone_records=99999)
-
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id', zones=99999,
- zone_records=0)
-
- with testtools.ExpectedException(exceptions.OverQuota):
- self.quota.limit_check(context, 'tenant_id', zones=0,
- zone_records=99999)
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zones\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zones=99999
+ )
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zone_records\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zone_records=99999
+ )
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zones, zone_records\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zones=99999, zone_records=99999
+ )
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota,
+ 'Quota exceeded for zones, zone_records, zone_recordsets\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zones=99999, zone_records=99999,
+ zone_recordsets=99999
+ )
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zones\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zones=99999, zone_records=0
+ )
+
+ self.assertRaisesRegex(
+ exceptions.OverQuota, 'Quota exceeded for zone_records\\.',
+ self.quota.limit_check,
+ context, 'tenant_id', zones=0, zone_records=99999
+ )
diff --git a/designate/tests/unit/backend/test_agent.py b/designate/tests/unit/backend/test_agent.py
index d1e585bf..408544d8 100644
--- a/designate/tests/unit/backend/test_agent.py
+++ b/designate/tests/unit/backend/test_agent.py
@@ -14,11 +14,13 @@
from unittest import mock
import dns
+import dns.query
import dns.rdataclass
import dns.rdatatype
import designate.backend.agent as agent
import designate.backend.private_codes as pcodes
+from designate import dnsutils
from designate import exceptions
from designate import objects
from designate import tests
@@ -130,7 +132,7 @@ class AgentBackendTestCase(tests.TestCase):
def test_make_and_send_dns_message_bad_response(self):
self.backend._make_dns_message = mock.Mock(return_value='')
self.backend._send_dns_message = mock.Mock(
- return_value=agent.dns_query.BadResponse())
+ return_value=dns.query.BadResponse())
out = self.backend._make_and_send_dns_message('h', 123, 1, 2, 3, 4, 5)
@@ -176,50 +178,16 @@ class AgentBackendTestCase(tests.TestCase):
self.assertEqual((response, 0), out)
- @mock.patch.object(agent.dns_query, 'tcp')
- @mock.patch.object(agent.dns_query, 'udp')
- def test_send_dns_message(self, mock_udp, mock_tcp):
+ @mock.patch.object(dnsutils, 'get_ip_address')
+ @mock.patch.object(dns.query, 'tcp')
+ @mock.patch.object(dns.query, 'udp')
+ def test_send_dns_message(self, mock_udp, mock_tcp, mock_get_ip_address):
mock_udp.return_value = 'mock udp resp'
+ mock_get_ip_address.return_value = '10.0.1.39'
- out = self.backend._send_dns_message('msg', 'host', 123, 1)
+ out = self.backend._send_dns_message('msg', '10.0.1.39', 123, 1)
- self.assertFalse(agent.dns_query.tcp.called)
- agent.dns_query.udp.assert_called_with('msg', 'host', port=123,
- timeout=1)
+ self.assertFalse(mock_tcp.called)
+ mock_udp.assert_called_with('msg', '10.0.1.39', port=123,
+ timeout=1)
self.assertEqual('mock udp resp', out)
-
- @mock.patch.object(agent.dns_query, 'tcp')
- @mock.patch.object(agent.dns_query, 'udp')
- def test_send_dns_message_timeout(self, mock_udp, mock_tcp):
- mock_udp.side_effect = dns.exception.Timeout
-
- out = self.backend._send_dns_message('msg', 'host', 123, 1)
-
- agent.dns_query.udp.assert_called_with('msg', 'host', port=123,
- timeout=1)
- self.assertIsInstance(out, dns.exception.Timeout)
-
- @mock.patch.object(agent.dns_query, 'tcp')
- @mock.patch.object(agent.dns_query, 'udp')
- def test_send_dns_message_bad_response(self, mock_udp, mock_tcp):
- mock_udp.side_effect = agent.dns_query.BadResponse
-
- out = self.backend._send_dns_message('msg', 'host', 123, 1)
-
- agent.dns_query.udp.assert_called_with('msg', 'host', port=123,
- timeout=1)
- self.assertIsInstance(out, agent.dns_query.BadResponse)
-
- @mock.patch.object(agent.dns_query, 'tcp')
- @mock.patch.object(agent.dns_query, 'udp')
- def test_send_dns_message_tcp(self, mock_udp, mock_tcp):
- self.CONF.set_override('all_tcp', True, 'service:mdns')
-
- mock_tcp.return_value = 'mock tcp resp'
-
- out = self.backend._send_dns_message('msg', 'host', 123, 1)
-
- self.assertFalse(agent.dns_query.udp.called)
- agent.dns_query.tcp.assert_called_with('msg', 'host', port=123,
- timeout=1)
- self.assertEqual('mock tcp resp', out)
diff --git a/designate/tests/unit/backend/test_bind9.py b/designate/tests/unit/backend/test_bind9.py
index c1189e84..272569ae 100644
--- a/designate/tests/unit/backend/test_bind9.py
+++ b/designate/tests/unit/backend/test_bind9.py
@@ -80,6 +80,15 @@ class Bind9BackendTestCase(designate.tests.TestCase):
)
@mock.patch.object(impl_bind9.Bind9Backend, '_execute_rndc')
+ def test_get_zone(self, mock_execute):
+ with fixtures.random_seed(0):
+ self.backend.get_zone(self.admin_context, self.zone)
+
+ mock_execute.assert_called_with(
+ ['showzone', 'example.com ']
+ )
+
+ @mock.patch.object(impl_bind9.Bind9Backend, '_execute_rndc')
def test_create_zone_with_view(self, mock_execute):
self.target['options'].append(
{'key': 'view', 'value': 'guest'},
diff --git a/designate/tests/unit/mdns/test_notify.py b/designate/tests/unit/mdns/test_notify.py
index 68b47467..45c82338 100644
--- a/designate/tests/unit/mdns/test_notify.py
+++ b/designate/tests/unit/mdns/test_notify.py
@@ -20,6 +20,7 @@ import dns
import dns.rdataclass
import dns.rdatatype
+from designate import dnsutils
import designate.mdns.notify as notify
import designate.tests
from designate.tests.unit import RoObject
@@ -130,12 +131,11 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual(('ERROR', 310, 0), out)
@mock.patch('time.sleep')
- def test_make_and_send_dns_message_timeout(self, mock_sleep):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_timeout(self, mock_send_dns_message,
+ mock_sleep):
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
- self.notify._send_dns_message = mock.Mock(
- side_effect=dns.exception.Timeout
- )
+ mock_send_dns_message.side_effect = dns.exception.Timeout
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -143,12 +143,12 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((None, 3), out)
- def test_make_and_send_dns_message_bad_response(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_bad_response(self,
+ mock_send_dns_message):
zone = RoObject(name='zn')
self.notify._make_dns_message = mock.Mock(return_value='')
- self.notify._send_dns_message = mock.Mock(
- side_effect=notify.dns_query.BadResponse
- )
+ mock_send_dns_message.side_effect = notify.dns_query.BadResponse
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -157,15 +157,14 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((None, 1), out)
@mock.patch('time.sleep')
- def test_make_and_send_dns_message_eagain(self, mock_sleep):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_eagain(self, mock_send_dns_message,
+ mock_sleep):
# bug #1558096
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
socket_error = socket.error()
socket_error.errno = socket.errno.EAGAIN
- self.notify._send_dns_message = mock.Mock(
- side_effect=socket_error
- )
+ mock_send_dns_message.side_effect = socket_error
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -173,15 +172,15 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((None, 3), out)
- def test_make_and_send_dns_message_econnrefused(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_econnrefused(self,
+ mock_send_dns_message):
# bug #1558096
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
socket_error = socket.error()
socket_error.errno = socket.errno.ECONNREFUSED
# socket errors other than EAGAIN should raise
- self.notify._send_dns_message = mock.Mock(
- side_effect=socket_error)
+ mock_send_dns_message.side_effect = socket_error
self.assertRaises(
socket.error,
@@ -189,11 +188,11 @@ class MdnsNotifyTest(designate.tests.TestCase):
zone, 'host', 123, 1, 2, 3
)
- def test_make_and_send_dns_message_nxdomain(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_nxdomain(self, mock_send_dns_message):
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
response = RoObject(rcode=mock.Mock(return_value=dns.rcode.NXDOMAIN))
- self.notify._send_dns_message = mock.Mock(return_value=response)
+ mock_send_dns_message.return_value = response
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -201,17 +200,17 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((response, 1), out)
- def test_make_and_send_dns_message_missing_AA_flags(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_missing_AA_flags(self,
+ mock_send_dns_message):
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
-
response = RoObject(
rcode=mock.Mock(return_value=dns.rcode.NOERROR),
# rcode is NOERROR but (flags & dns.flags.AA) gives 0
flags=0,
answer=['answer'],
)
- self.notify._send_dns_message = mock.Mock(return_value=response)
+ mock_send_dns_message.return_value = response
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -219,9 +218,10 @@ class MdnsNotifyTest(designate.tests.TestCase):
self.assertEqual((None, 1), out)
- def test_make_and_send_dns_message_error_flags(self):
+ @mock.patch.object(dnsutils, 'send_dns_message')
+ def test_make_and_send_dns_message_error_flags(self,
+ mock_send_dns_message):
zone = RoObject(name='zn')
- self.notify._make_dns_message = mock.Mock(return_value='')
response = RoObject(
rcode=mock.Mock(return_value=dns.rcode.NOERROR),
# rcode is NOERROR but flags are not NOERROR
@@ -229,7 +229,7 @@ class MdnsNotifyTest(designate.tests.TestCase):
ednsflags=321,
answer=['answer'],
)
- self.notify._send_dns_message = mock.Mock(return_value=response)
+ mock_send_dns_message.return_value = response
out = self.notify._make_and_send_dns_message(
zone, 'host', 123, 1, 2, 3
@@ -266,23 +266,3 @@ class MdnsNotifyTest(designate.tests.TestCase):
';AUTHORITY',
';ADDITIONAL',
], txt)
-
- @mock.patch.object(notify.dns_query, 'udp')
- def test_send_udp_dns_message(self, mock_udp):
- self.CONF.set_override('all_tcp', False, 'service:mdns')
-
- self.notify._send_dns_message('msg', '192.0.2.1', 1234, 1)
-
- mock_udp.assert_called_with(
- 'msg', '192.0.2.1', port=1234, timeout=1
- )
-
- @mock.patch.object(notify.dns_query, 'tcp')
- def test_send_tcp_dns_message(self, mock_tcp):
- self.CONF.set_override('all_tcp', True, 'service:mdns')
-
- self.notify._send_dns_message('msg', '192.0.2.1', 1234, 1)
-
- mock_tcp.assert_called_with(
- 'msg', '192.0.2.1', port=1234, timeout=1
- )
diff --git a/designate/tests/unit/objects/test_caa_object.py b/designate/tests/unit/objects/test_caa_object.py
index 877ffb25..023944bf 100644
--- a/designate/tests/unit/objects/test_caa_object.py
+++ b/designate/tests/unit/objects/test_caa_object.py
@@ -72,7 +72,7 @@ class CAARecordTest(oslotest.base.BaseTestCase):
caa_record = objects.CAA()
self.assertRaisesRegex(
ValueError,
- 'Domain abc. does not match',
+ 'Domain abc. is invalid',
caa_record._from_string, '0 issue abc.'
)
@@ -80,7 +80,7 @@ class CAARecordTest(oslotest.base.BaseTestCase):
caa_record = objects.CAA()
self.assertRaisesRegex(
ValueError,
- 'def is not valid key-value pair',
+ 'def is not a valid key-value pair',
caa_record._from_string, '0 issue abc;def'
)
@@ -98,7 +98,7 @@ class CAARecordTest(oslotest.base.BaseTestCase):
caa_record = objects.CAA()
self.assertRaisesRegex(
ValueError,
- 'Domain example.net. does not match',
+ 'Domain example.net. is invalid',
caa_record._from_string, '0 iodef mailto:me@example.net.'
)
@@ -116,7 +116,7 @@ class CAARecordTest(oslotest.base.BaseTestCase):
caa_record = objects.CAA()
self.assertRaisesRegex(
ValueError,
- 'Domain example.net. does not match',
+ 'Domain example.net. is invalid',
caa_record._from_string, '0 iodef https://example.net./'
)
@@ -124,6 +124,6 @@ class CAARecordTest(oslotest.base.BaseTestCase):
caa_record = objects.CAA()
self.assertRaisesRegex(
ValueError,
- 'https:// is not valid URL',
+ 'https:// is not a valid URL',
caa_record._from_string, '0 iodef https://'
)
diff --git a/designate/tests/unit/test_central/test_basic.py b/designate/tests/unit/test_central/test_basic.py
index 688ca340..d3bb6b41 100644
--- a/designate/tests/unit/test_central/test_basic.py
+++ b/designate/tests/unit/test_central/test_basic.py
@@ -256,6 +256,7 @@ class CentralBasic(TestCase):
'set_rules',
'init',
'check',
+ 'enforce_new_defaults',
])
designate.central.service.quota = mock.NonCallableMock(spec_set=[
@@ -788,13 +789,13 @@ class CentralZoneTestCase(CentralBasic):
def test_add_ns_creation(self):
self.service._create_ns = mock.Mock()
- self.service.find_recordsets = mock.Mock(
- return_value=[]
+ self.service.find_recordset = mock.Mock(
+ side_effect=exceptions.RecordSetNotFound()
)
self.service._add_ns(
self.context,
- RoObject(id=CentralZoneTestCase.zone__id),
+ RoObject(name='foo', id=CentralZoneTestCase.zone__id),
RoObject(name='bar')
)
ctx, zone, records = self.service._create_ns.call_args[0]
@@ -803,16 +804,15 @@ class CentralZoneTestCase(CentralBasic):
def test_add_ns(self):
self.service._update_recordset_in_storage = mock.Mock()
- recordsets = [
- RoObject(records=objects.RecordList.from_list([]), managed=True)
- ]
- self.service.find_recordsets = mock.Mock(
- return_value=recordsets
+ self.service.find_recordset = mock.Mock(
+ return_value=RoObject(
+ records=objects.RecordList.from_list([]), managed=True
+ )
)
self.service._add_ns(
self.context,
- RoObject(id=CentralZoneTestCase.zone__id),
+ RoObject(name='foo', id=CentralZoneTestCase.zone__id),
RoObject(name='bar')
)
ctx, zone, rset = \
@@ -821,29 +821,6 @@ class CentralZoneTestCase(CentralBasic):
self.assertTrue(rset.records[0].managed)
self.assertEqual('bar', rset.records[0].data.name)
- def test_add_ns_with_other_ns_rs(self):
- self.service._update_recordset_in_storage = mock.Mock()
-
- recordsets = [
- RoObject(records=objects.RecordList.from_list([]), managed=True),
- RoObject(records=objects.RecordList.from_list([]), managed=False)
- ]
-
- self.service.find_recordsets = mock.Mock(
- return_value=recordsets
- )
-
- self.service._add_ns(
- self.context,
- RoObject(id=CentralZoneTestCase.zone__id),
- RoObject(name='bar')
- )
- ctx, zone, rset = \
- self.service._update_recordset_in_storage.call_args[0]
- self.assertEqual(1, len(rset.records))
- self.assertTrue(rset.records[0].managed)
- self.assertEqual('bar', rset.records[0].data.name)
-
def test_create_zone_no_servers(self):
self.service._enforce_zone_quota = mock.Mock()
self.service._is_valid_zone_name = mock.Mock()
@@ -932,7 +909,7 @@ class CentralZoneTestCase(CentralBasic):
n, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual(CentralZoneTestCase.zone__id, target['zone_id'])
self.assertEqual('foo', target['zone_name'])
- self.assertEqual('2', target['tenant_id'])
+ self.assertEqual('2', target['project_id'])
def test_get_zone_servers(self):
self.service.storage.get_zone.return_value = RoObject(
@@ -995,6 +972,7 @@ class CentralZoneTestCase(CentralBasic):
'set_rules',
'init',
'check',
+ 'enforce_new_defaults',
])
self.context.abandon = True
self.service.storage.count_zones.return_value = 0
@@ -1187,7 +1165,7 @@ class CentralZoneTestCase(CentralBasic):
'zone_id': CentralZoneTestCase.zone__id_2,
'zone_name': 'example.org.',
'recordset_id': CentralZoneTestCase.recordset__id,
- 'tenant_id': '2'}, target)
+ 'project_id': '2'}, target)
def test_find_recordsets(self):
self.context = mock.Mock()
@@ -1196,7 +1174,7 @@ class CentralZoneTestCase(CentralBasic):
self.assertTrue(self.service.storage.find_recordsets.called)
n, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('find_recordsets', n)
- self.assertEqual({'tenant_id': 't'}, target)
+ self.assertEqual({'project_id': 't'}, target)
def test_find_recordset(self):
self.context = mock.Mock()
@@ -1205,7 +1183,7 @@ class CentralZoneTestCase(CentralBasic):
self.assertTrue(self.service.storage.find_recordset.called)
n, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('find_recordset', n)
- self.assertEqual({'tenant_id': 't'}, target)
+ self.assertEqual({'project_id': 't'}, target)
def test_update_recordset_fail_on_changes(self):
self.service.storage.get_zone.return_value = RoObject()
@@ -1298,7 +1276,7 @@ class CentralZoneTestCase(CentralBasic):
'zone_name': 'example.org.',
'zone_type': 'foo',
'recordset_id': '9c85d9b0-1e9d-4e99-aede-a06664f1af2e',
- 'tenant_id': '2'}, target)
+ 'project_id': '2'}, target)
def test__update_recordset_in_storage(self):
recordset = mock.Mock()
@@ -1532,7 +1510,7 @@ class CentralZoneTestCase(CentralBasic):
self.service.count_recordsets(self.context)
n, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('count_recordsets', n)
- self.assertEqual({'tenant_id': None}, target)
+ self.assertEqual({'project_id': None}, target)
self.assertEqual(
{},
self.service.storage.count_recordsets.call_args[0][1]
@@ -1587,7 +1565,7 @@ class CentralZoneTestCase(CentralBasic):
'zone_type': 'foo',
'recordset_id': CentralZoneTestCase.recordset__id,
'recordset_name': 'rs',
- 'tenant_id': '2'}, target)
+ 'project_id': '2'}, target)
def test_create_record_worker(self):
self._test_create_record()
@@ -1689,7 +1667,7 @@ class CentralZoneTestCase(CentralBasic):
'record_id': CentralZoneTestCase.record__id,
'recordset_id': CentralZoneTestCase.recordset__id_2,
'recordset_name': 'foo',
- 'tenant_id': 2}, target)
+ 'project_id': 2}, target)
def test_update_record_fail_on_changes(self):
self.service.storage.get_zone.return_value = RoObject(
@@ -1789,7 +1767,7 @@ class CentralZoneTestCase(CentralBasic):
'record_id': 'abc12a-1e9d-4e99-aede-a06664f1af2e',
'recordset_id': 'abc12a-1e9d-4e99-aede-a06664f1af2e',
'recordset_name': 'rsn',
- 'tenant_id': 'tid'}, target)
+ 'project_id': 'tid'}, target)
def test__update_record_in_storage(self):
self.service._update_zone_in_storage = mock.Mock()
@@ -1893,7 +1871,7 @@ class CentralZoneTestCase(CentralBasic):
'record_id': CentralZoneTestCase.record__id_2,
'recordset_id': CentralZoneTestCase.recordset__id_2,
'recordset_name': 'rsn',
- 'tenant_id': 'tid'}, target)
+ 'project_id': 'tid'}, target)
def test_delete_record_in_storage(self):
self.service._delete_record_in_storage(
@@ -1911,7 +1889,7 @@ class CentralZoneTestCase(CentralBasic):
self.service.count_records(self.context)
t, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('count_records', t)
- self.assertEqual({'tenant_id': None}, target)
+ self.assertEqual({'project_id': None}, target)
def test_sync_zones(self):
self.service._sync_zone = mock.Mock()
@@ -1938,7 +1916,7 @@ class CentralZoneTestCase(CentralBasic):
t, ctx, target = designate.central.service.policy.check.call_args[0]
self.assertEqual('diagnostics_sync_zone', t)
- self.assertEqual({'tenant_id': 'tid',
+ self.assertEqual({'project_id': 'tid',
'zone_id': CentralZoneTestCase.zone__id,
'zone_name': 'n'}, target)
@@ -1965,7 +1943,7 @@ class CentralZoneTestCase(CentralBasic):
'record_id': CentralZoneTestCase.record__id,
'recordset_id': CentralZoneTestCase.recordset__id,
'recordset_name': 'n',
- 'tenant_id': 'tid'}, target)
+ 'project_id': 'tid'}, target)
def test_ping(self):
self.service.storage.ping.return_value = True
@@ -2118,7 +2096,7 @@ class CentralZoneExportTests(CentralBasic):
n, ctx, target = designate.central.service.policy.check.call_args[0]
# Check arguments to policy
- self.assertEqual('t', target['tenant_id'])
+ self.assertEqual('t', target['project_id'])
# Check output
self.assertEqual(CentralZoneTestCase.zone__id, out.zone_id)
diff --git a/designate/tests/unit/test_dnsutils.py b/designate/tests/unit/test_dnsutils.py
index 8345b8ef..eac016fc 100644
--- a/designate/tests/unit/test_dnsutils.py
+++ b/designate/tests/unit/test_dnsutils.py
@@ -23,6 +23,7 @@ import dns.rcode
import dns.rdatatype
import dns.zone
import eventlet
+from oslo_config import cfg
import oslotest.base
from dns import zone as dnszone
@@ -31,6 +32,8 @@ from designate import dnsutils
from designate import exceptions
from designate import objects
+CONF = cfg.CONF
+
SAMPLES = {
("cname.example.com.", "CNAME"): {
"ttl": 10800,
@@ -320,3 +323,19 @@ class TestDoAfxr(oslotest.base.BaseTestCase):
self.assertTrue(mock_xfr.called)
self.assertTrue(mock_from_xfr.called)
+
+ @mock.patch.object(dns.query, 'udp')
+ def test_send_udp_dns_message(self, mock_udp):
+ CONF.set_override('all_tcp', False, 'service:mdns')
+ dnsutils.send_dns_message('msg', '192.0.2.1', 1234, 1)
+ mock_udp.assert_called_with(
+ 'msg', '192.0.2.1', port=1234, timeout=1
+ )
+
+ @mock.patch.object(dns.query, 'tcp')
+ def test_send_tcp_dns_message(self, mock_tcp):
+ CONF.set_override('all_tcp', True, 'service:mdns')
+ dnsutils.send_dns_message('msg', '192.0.2.1', 1234, 1)
+ mock_tcp.assert_called_with(
+ 'msg', '192.0.2.1', port=1234, timeout=1
+ )
diff --git a/designate/tests/unit/workers/test_base_task.py b/designate/tests/unit/workers/test_base_task.py
index 9b37b52e..f419f4f4 100644
--- a/designate/tests/unit/workers/test_base_task.py
+++ b/designate/tests/unit/workers/test_base_task.py
@@ -14,17 +14,105 @@
# License for the specific language governing permissions and limitations
# under the License.mport threading
import oslotest.base
+from unittest import mock
+from designate import exceptions
+from designate import objects
from designate.worker.tasks import base
class TestTask(oslotest.base.BaseTestCase):
def setUp(self):
super(TestTask, self).setUp()
+ self.context = mock.Mock()
self.task = base.Task(None)
+ self.storage = self.task._storage = mock.Mock()
def test_constructor(self):
self.assertTrue(self.task)
def test_call(self):
self.assertRaises(NotImplementedError, self.task)
+
+ def test_current_action_is_valid(self):
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='UPDATE')
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'UPDATE', objects.Zone(action='UPDATE'))
+ )
+
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='CREATE')
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'CREATE', objects.Zone(action='CREATE'))
+ )
+
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='UPDATE')
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'CREATE', objects.Zone(action='CREATE'))
+ )
+
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='DELETE')
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'DELETE', objects.Zone(action='DELETE'))
+ )
+
+ def test_current_action_delete_always_valid(self):
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'DELETE', None)
+ )
+
+ def test_current_action_bad_storage_always_valid(self):
+ self.storage.get_zone = mock.Mock(
+ side_effect=exceptions.DesignateException()
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'CREATE', objects.Zone(action='CREATE'))
+ )
+
+ def test_current_action_is_not_valid_none(self):
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='NONE')
+ )
+ self.assertFalse(
+ self.task.is_current_action_valid(
+ self.context, 'UPDATE', objects.Zone(action='UPDATE'))
+ )
+
+ def test_current_action_is_not_valid_deleted(self):
+ self.storage.get_zone = mock.Mock(
+ return_value=objects.Zone(action='DELETE')
+ )
+ self.assertFalse(
+ self.task.is_current_action_valid(
+ self.context, 'UPDATE', objects.Zone(action='UPDATE'))
+ )
+
+ def test_current_action_is_not_found(self):
+ self.storage.get_zone = mock.Mock(
+ side_effect=exceptions.ZoneNotFound()
+ )
+ self.assertTrue(
+ self.task.is_current_action_valid(
+ self.context, 'CREATE', objects.Zone(action='CREATE'))
+ )
+
+ self.storage.get_zone = mock.Mock(
+ side_effect=exceptions.ZoneNotFound()
+ )
+ self.assertFalse(
+ self.task.is_current_action_valid(
+ self.context, 'UPDATE', objects.Zone(action='UPDATE'))
+ )
diff --git a/designate/tests/unit/workers/test_zone_tasks.py b/designate/tests/unit/workers/test_zone_tasks.py
index 25b9c41a..47eb7d27 100644
--- a/designate/tests/unit/workers/test_zone_tasks.py
+++ b/designate/tests/unit/workers/test_zone_tasks.py
@@ -20,11 +20,11 @@ import oslotest.base
from oslo_config import cfg
from oslo_config import fixture as cfg_fixture
+from designate import dnsutils
from designate import exceptions
from designate import objects
from designate.tests.unit import utils
from designate.worker import processing
-from designate.worker import utils as wutils
from designate.worker.tasks import zone
CONF = cfg.CONF
@@ -167,7 +167,7 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
self.context = mock.Mock()
self.executor = mock.Mock()
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_create(self, mock_notify):
self.zone = objects.Zone(name='example.org.', action='CREATE')
self.actor = zone.ZoneActionOnTarget(
@@ -185,7 +185,7 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
port=53
)
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_update(self, mock_notify):
self.zone = objects.Zone(name='example.org.', action='UPDATE')
self.actor = zone.ZoneActionOnTarget(
@@ -203,7 +203,7 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
port=53
)
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_delete(self, mock_notify):
self.zone = objects.Zone(name='example.org.', action='DELETE')
self.actor = zone.ZoneActionOnTarget(
@@ -217,7 +217,7 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
mock_notify.assert_not_called()
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
@mock.patch('time.sleep', mock.Mock())
def test_call_exception_raised(self, mock_notify):
self.backend.create_zone.side_effect = exceptions.BadRequest()
@@ -250,7 +250,7 @@ class TestSendNotify(oslotest.base.BaseTestCase):
self.executor = mock.Mock()
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_notify(self, mock_notify):
self.zone = objects.Zone(name='example.org.')
self.actor = zone.SendNotify(
@@ -267,7 +267,7 @@ class TestSendNotify(oslotest.base.BaseTestCase):
port=53
)
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_notify_timeout(self, mock_notify):
mock_notify.side_effect = dns.exception.Timeout()
self.zone = objects.Zone(name='example.org.')
@@ -282,7 +282,7 @@ class TestSendNotify(oslotest.base.BaseTestCase):
self.actor
)
- @mock.patch.object(wutils, 'notify')
+ @mock.patch.object(dnsutils, 'notify')
def test_call_dont_notify(self, mock_notify):
CONF.set_override('notify', False, 'service:worker')
@@ -668,11 +668,11 @@ class TestPollForZone(oslotest.base.BaseTestCase):
self.task._max_retries = 3
self.task._retry_interval = 2
- @mock.patch.object(zone.wutils, 'get_serial', mock.Mock(return_value=10))
+ @mock.patch.object(dnsutils, 'get_serial', mock.Mock(return_value=10))
def test_get_serial(self):
self.assertEqual(10, self.task._get_serial())
- zone.wutils.get_serial.assert_called_with(
+ dnsutils.get_serial.assert_called_with(
'example.org.',
'ns.example.org',
port=53
diff --git a/designate/worker/README.md b/designate/worker/README.md
index 9c5d9920..47da1934 100644
--- a/designate/worker/README.md
+++ b/designate/worker/README.md
@@ -29,7 +29,7 @@ class SendNotify(base.Task):
port = int(self.target.options.get('port'))
try:
- wutils.notify(self.zone.name, host, port=port)
+ dnsutils.notify(self.zone.name, host, port=port)
return True
except Exception:
return False
diff --git a/designate/worker/tasks/base.py b/designate/worker/tasks/base.py
index b6959391..5c3c8294 100644
--- a/designate/worker/tasks/base.py
+++ b/designate/worker/tasks/base.py
@@ -18,6 +18,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from designate.central import rpcapi as central_rpcapi
+from designate import exceptions
from designate import quota
from designate import storage
from designate import utils
@@ -139,5 +140,52 @@ class Task(TaskConfig):
self._worker_api = worker_rpcapi.WorkerAPI.get_instance()
return self._worker_api
+ def is_current_action_valid(self, context, action, zone):
+ """Is our current action still valid?"""
+
+ # We always allow for DELETE operations.
+ if action == 'DELETE':
+ return True
+
+ try:
+ zone = self.storage.get_zone(context, zone.id)
+
+ # If the zone is either in a DELETE or NONE state,
+ # we don't need to continue with the current action.
+ if zone.action in ['DELETE', 'NONE']:
+ LOG.info(
+ 'Failed to %(action)s zone_name=%(zone_name)s '
+ 'zone_id=%(zone_id)s action state has changed '
+ 'to %(current_action)s, not retrying action',
+ {
+ 'action': action,
+ 'zone_name': zone.name,
+ 'zone_id': zone.id,
+ 'current_action': zone.action,
+ }
+ )
+ return False
+ except exceptions.ZoneNotFound:
+ if action != 'CREATE':
+ LOG.info(
+ 'Failed to %(action)s zone_name=%(zone_name)s '
+ 'zone_id=%(zone_id)s Error=ZoneNotFound',
+ {
+ 'action': action,
+ 'zone_name': zone.name,
+ 'zone_id': zone.id,
+ }
+ )
+ return False
+ except Exception as e:
+ LOG.warning(
+ 'Error trying to get zone action. Error=%(error)s',
+ {
+ 'error': str(e),
+ }
+ )
+
+ return True
+
def __call__(self):
raise NotImplementedError
diff --git a/designate/worker/tasks/zone.py b/designate/worker/tasks/zone.py
index 3189de27..6b18b693 100644
--- a/designate/worker/tasks/zone.py
+++ b/designate/worker/tasks/zone.py
@@ -20,10 +20,10 @@ import dns
from oslo_config import cfg
from oslo_log import log as logging
-from designate.worker import utils as wutils
-from designate.worker.tasks import base
+from designate import dnsutils
from designate import exceptions
from designate import utils
+from designate.worker.tasks import base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -88,17 +88,21 @@ class ZoneActionOnTarget(base.Task):
self.action, self.zone.name, self.target)
return True
except Exception as e:
- LOG.info('Failed to %(action)s zone %(zone)s on '
- 'target %(target)s on attempt %(attempt)d, '
- 'Error: %(error)s.',
- {
- 'action': self.action,
- 'zone': self.zone.name,
- 'target': self.target.id,
- 'attempt': retry + 1,
- 'error': str(e)
- })
- time.sleep(self.retry_interval)
+ LOG.info(
+ 'Failed to %(action)s zone_name=%(zone_name)s '
+ 'zone_id=%(zone_id)s on target=%(target)s on '
+ 'attempt=%(attempt)d Error=%(error)s',
+ {
+ 'action': self.action,
+ 'zone_name': self.zone.name,
+ 'zone_id': self.zone.id,
+ 'target': self.target,
+ 'attempt': retry + 1,
+ 'error': str(e),
+ }
+ )
+
+ time.sleep(self.retry_interval)
return False
@@ -124,7 +128,7 @@ class SendNotify(base.Task):
port = int(self.target.options.get('port'))
try:
- wutils.notify(self.zone.name, host, port=port)
+ dnsutils.notify(self.zone.name, host, port=port)
LOG.debug('Sent NOTIFY to %(host)s:%(port)s for zone %(zone)s',
{
'host': host,
@@ -311,7 +315,7 @@ class PollForZone(base.Task):
self.ns = ns
def _get_serial(self):
- return wutils.get_serial(
+ return dnsutils.get_serial(
self.zone.name,
self.ns.host,
port=self.ns.port
@@ -404,6 +408,10 @@ class ZonePoller(base.Task, ThresholdMixin):
{'zone': self.zone.name, 'n': retry + 1})
time.sleep(retry_interval)
+ if not self.is_current_action_valid(self.context, self.zone.action,
+ self.zone):
+ break
+
return query_result
def _on_failure(self, error_status):
diff --git a/designate/worker/utils.py b/designate/worker/utils.py
deleted file mode 100644
index f82d5432..00000000
--- a/designate/worker/utils.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2016 Rackspace Inc.
-#
-# Author: Tim Simmons <tim.simmons@rackspace>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.mport threading
-import dns
-import dns.exception
-import dns.query
-from oslo_config import cfg
-from oslo_log import log as logging
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-
-def prepare_msg(zone_name, rdatatype=dns.rdatatype.SOA, notify=False):
- """
- Do the needful to set up a dns packet with dnspython
- """
- dns_message = dns.message.make_query(zone_name, rdatatype)
- if notify:
- dns_message.set_opcode(dns.opcode.NOTIFY)
- else:
- dns_message.set_opcode(dns.opcode.QUERY)
- return dns_message
-
-
-def dig(zone_name, host, rdatatype, port=53):
- """
- Set up and send a regular dns query, datatype configurable
- """
- query = prepare_msg(zone_name, rdatatype=rdatatype)
-
- return send_dns_msg(query, host, port=port)
-
-
-def notify(zone_name, host, port=53):
- """
- Set up a notify packet and send it
- """
- msg = prepare_msg(zone_name, notify=True)
-
- return send_dns_msg(msg, host, port=port)
-
-
-def send_dns_msg(dns_message, host, port=53):
- """
- Send the dns message and return the response
-
- :return: dns.Message of the response to the dns query
- """
- # This can raise some exceptions, but we'll catch them elsewhere
- if not CONF['service:mdns'].all_tcp:
- return dns.query.udp(
- dns_message, host, port=port, timeout=10)
- else:
- return dns.query.tcp(
- dns_message, host, port=port, timeout=10)
-
-
-def get_serial(zone_name, host, port=53):
- """
- Possibly raises dns.exception.Timeout or dns.query.BadResponse.
- Possibly returns 0 if, e.g., the answer section is empty.
- """
- resp = dig(zone_name, host, dns.rdatatype.SOA, port=port)
- if not resp.answer:
- return 0
- rdataset = resp.answer[0].to_rdataset()
- if not rdataset:
- return 0
- return rdataset[0].serial
diff --git a/releasenotes/notes/Fix-to-address-denylist-invalid-patterns-not-being-checked-ec1f1316ccc6cb1d.yaml b/releasenotes/notes/Fix-to-address-denylist-invalid-patterns-not-being-checked-ec1f1316ccc6cb1d.yaml
new file mode 100644
index 00000000..43a3ca94
--- /dev/null
+++ b/releasenotes/notes/Fix-to-address-denylist-invalid-patterns-not-being-checked-ec1f1316ccc6cb1d.yaml
@@ -0,0 +1,16 @@
+---
+fixes:
+ - |
+ Fixes `bug 1934252`_ which ignored invalid denylist patterns. The fix
+ entailed checking the pattern string via regular expression compiler and
+ testing for zero length.
+
+ Previously you could create blacklist/denylist using string that cannot
+ be used either as a regex or as a zone name, for example:
+ patterns = ['', ``'#(*&^%$%$#@$']``
+
+ In addition, the server will return a 400 BadRequest response to an
+ invalid pattern.
+
+ .. _Bug 1934252: https://bugs.launchpad.net/designate/+bug/1934252
+
diff --git a/releasenotes/notes/Fix-update-zone-create-zone-ada1fd81de479492.yaml b/releasenotes/notes/Fix-update-zone-create-zone-ada1fd81de479492.yaml
new file mode 100644
index 00000000..600ec937
--- /dev/null
+++ b/releasenotes/notes/Fix-update-zone-create-zone-ada1fd81de479492.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fixed an issue where new BIND9 pool instances may fail on zone update.
diff --git a/releasenotes/notes/Support-scoped-tokens-6b7d6052a258cd11.yaml b/releasenotes/notes/Support-scoped-tokens-6b7d6052a258cd11.yaml
new file mode 100644
index 00000000..3571dbb3
--- /dev/null
+++ b/releasenotes/notes/Support-scoped-tokens-6b7d6052a258cd11.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Adds support for keystone default roles and scoped tokens.